trove-5.0.0/0000775000567000056710000000000012701410521014005 5ustar jenkinsjenkins00000000000000trove-5.0.0/devstack/0000775000567000056710000000000012701410521015611 5ustar jenkinsjenkins00000000000000trove-5.0.0/devstack/settings0000664000567000056710000000331012701410316017373 0ustar jenkinsjenkins00000000000000# Settings needed for Trove plugin # -------------------------------- # Set up default directories TROVE_DIR=${TROVE_DIR:-${DEST}/trove} TROVECLIENT_DIR=${TROVECLIENT_DIR:-${DEST}/python-troveclient} TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git} TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master} # Set up configuration directory and files TROVE_CONF_DIR=${TROVE_CONF_DIR:-/etc/trove} TROVE_CONF=${TROVE_CONF:-${TROVE_CONF_DIR}/trove.conf} TROVE_TASKMANAGER_CONF=${TROVE_TASKMANAGER_CONF:-${TROVE_CONF_DIR}/trove-taskmanager.conf} TROVE_CONDUCTOR_CONF=${TROVE_CONDUCTOR_CONF:-${TROVE_CONF_DIR}/trove-conductor.conf} TROVE_GUESTAGENT_CONF=${TROVE_GUESTAGENT_CONF:-${TROVE_CONF_DIR}/trove-guestagent.conf} TROVE_API_PASTE_INI=${TROVE_API_PASTE_INI:-${TROVE_CONF_DIR}/api-paste.ini} TROVE_LOCAL_CONF_DIR=${TROVE_LOCAL_CONF_DIR:-${TROVE_DIR}/etc/trove} TROVE_LOCAL_API_PASTE_INI=${TROVE_LOCAL_API_PASTE_INI:-${TROVE_LOCAL_CONF_DIR}/api-paste.ini} TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"} TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.6"} TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.6"} # Setup the host gateway if is_service_enabled neutron; then TROVE_HOST_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} else TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} fi # Support entry points installation of console scripts if [[ -d $TROVE_DIR/bin ]]; then TROVE_BIN_DIR=$TROVE_DIR/bin else TROVE_BIN_DIR=$(get_python_exec_prefix) fi TROVE_MANAGE=$TROVE_BIN_DIR/trove-manage # Tell Tempest this project is present TEMPEST_SERVICES+=,trove enable_service trove tr-api tr-tmgr tr-cond trove-5.0.0/devstack/plugin.sh0000664000567000056710000002664112701410316017456 0ustar jenkinsjenkins00000000000000#!/bin/bash # # lib/trove # Functions to control the configuration and operation of the **Trove** service # Dependencies: # ``functions`` file # ``DEST``, ``STACK_USER`` must be defined # ``SERVICE_{HOST|PROTOCOL|TOKEN}`` must be defined # ``stack.sh`` calls the entry points in this order: # # install_trove # install_python_troveclient # configure_trove # init_trove # start_trove # stop_trove # cleanup_trove # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Functions # --------- # Test if any Trove services are enabled # is_trove_enabled function is_trove_enabled { [[ ,${ENABLED_SERVICES} =~ ,"tr-" ]] && return 0 return 1 } # setup_trove_logging() - Adds logging configuration to conf files function setup_trove_logging { local CONF=$1 iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CONF DEFAULT use_syslog $SYSLOG if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output setup_colorized_logging $CONF DEFAULT tenant user fi } # create_trove_accounts() - Set up common required trove accounts # Tenant User Roles # ------------------------------------------------------------------ # service trove admin # if enabled function create_trove_accounts { if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then create_service_user "trove" if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then local trove_service=$(get_or_create_service "trove" \ "database" "Trove Service") get_or_create_endpoint $trove_service \ "$REGION_NAME" \ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" fi fi } # stack.sh entry points # --------------------- # cleanup_trove() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_trove { #Clean up dirs rm -fr $TROVE_AUTH_CACHE_DIR/* rm -fr $TROVE_CONF_DIR/* } # configure_trove() - Set config files, create data dirs, etc function configure_trove { setup_develop $TROVE_DIR # Create the trove conf dir and cache dirs if they don't exist sudo install -d -o $STACK_USER ${TROVE_CONF_DIR} ${TROVE_AUTH_CACHE_DIR} # Copy api-paste file over to the trove conf dir cp $TROVE_LOCAL_API_PASTE_INI $TROVE_API_PASTE_INI # (Re)create trove conf files rm -f $TROVE_CONF rm -f $TROVE_TASKMANAGER_CONF rm -f $TROVE_CONDUCTOR_CONF iniset $TROVE_CONF DEFAULT rabbit_userid $RABBIT_USERID iniset $TROVE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_CONF database connection `database_connection_url trove` iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE setup_trove_logging $TROVE_CONF iniset $TROVE_CONF DEFAULT trove_api_workers "$API_WORKERS" # Increase default quota. iniset $TROVE_CONF DEFAULT max_accepted_volume_size 10 iniset $TROVE_CONF DEFAULT max_instances_per_user 10 iniset $TROVE_CONF DEFAULT max_volumes_per_user 10 configure_auth_token_middleware $TROVE_CONF trove $TROVE_AUTH_CACHE_DIR # (Re)create trove taskmanager conf file if needed if is_service_enabled tr-tmgr; then TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_userid $RABBIT_USERID iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_TASKMANAGER_CONF database connection `database_connection_url trove` iniset $TROVE_TASKMANAGER_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user radmin iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name trove iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_TASKMANAGER_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT # TODO(cp16net) use the compute v2 api as default v2.1 is not working. # https://bugs.launchpad.net/python-novaclient/+bug/1493446 iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_compute_service_type compute_legacy setup_trove_logging $TROVE_TASKMANAGER_CONF # Increase default timeouts (required by the tests). iniset $TROVE_TASKMANAGER_CONF DEFAULT agent_call_high_timeout 300 iniset $TROVE_TASKMANAGER_CONF DEFAULT usage_timeout 1200 fi # (Re)create trove conductor conf file if needed if is_service_enabled tr-cond; then iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_userid $RABBIT_USERID iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_CONDUCTOR_CONF database connection `database_connection_url trove` iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_user radmin iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_tenant_name trove iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_CONDUCTOR_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT iniset $TROVE_CONDUCTOR_CONF DEFAULT control_exchange trove setup_trove_logging $TROVE_CONDUCTOR_CONF fi # Set up Guest Agent conf iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_userid $RABBIT_USERID iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_host $TROVE_HOST_GATEWAY iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_user radmin iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_tenant_name trove iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_GUESTAGENT_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT iniset $TROVE_GUESTAGENT_CONF DEFAULT control_exchange trove iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/ iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log setup_trove_logging $TROVE_GUESTAGENT_CONF } # install_trove() - Collect source and prepare function install_trove { setup_develop $TROVE_DIR } # install_python_troveclient() - Collect source and prepare function install_python_troveclient { if use_library_from_git "python-troveclient"; then git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH setup_develop $TROVECLIENT_DIR fi } # init_trove() - Initializes Trove Database as a Service function init_trove { # (Re)Create trove db recreate_database trove # Initialize the trove database $TROVE_MANAGE db_sync # Add an admin user to the 'tempest' alt_demo tenant. # This is needed to test the guest_log functionality. # The first part mimics the tempest setup, so make sure we have that. ALT_USERNAME=${ALT_USERNAME:-alt_demo} ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} get_or_create_project ${ALT_TENANT_NAME} default get_or_create_user ${ALT_USERNAME} "$ADMIN_PASSWORD" "default" "alt_demo@example.com" get_or_add_user_project_role Member ${ALT_USERNAME} ${ALT_TENANT_NAME} # The second part adds an admin user to the tenant. ADMIN_ALT_USERNAME=${ADMIN_ALT_USERNAME:-admin_${ALT_USERNAME}} get_or_create_user ${ADMIN_ALT_USERNAME} "$ADMIN_PASSWORD" "default" "admin_alt_demo@example.com" get_or_add_user_project_role admin ${ADMIN_ALT_USERNAME} ${ALT_TENANT_NAME} # If no guest image is specified, skip remaining setup [ -z "$TROVE_GUEST_IMAGE_URL" ] && return 0 # Find the glance id for the trove guest image # The image is uploaded by stack.sh -- see $IMAGE_URLS handling GUEST_IMAGE_NAME=$(basename "$TROVE_GUEST_IMAGE_URL") GUEST_IMAGE_NAME=${GUEST_IMAGE_NAME%.*} TROVE_GUEST_IMAGE_ID=$(openstack --os-token $TOKEN --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image list | grep "${GUEST_IMAGE_NAME}" | get_field 1) if [ -z "$TROVE_GUEST_IMAGE_ID" ]; then # If no glance id is found, skip remaining setup echo "Datastore ${TROVE_DATASTORE_TYPE} will not be created: guest image ${GUEST_IMAGE_NAME} not found." return 1 fi # Now that we have the guest image id, initialize appropriate datastores / datastore versions $TROVE_MANAGE datastore_update "$TROVE_DATASTORE_TYPE" "" $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" "$TROVE_DATASTORE_TYPE" \ "$TROVE_GUEST_IMAGE_ID" "$TROVE_DATASTORE_PACKAGE" 1 $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "inactive_version" "inactive_manager" "$TROVE_GUEST_IMAGE_ID" "" 0 $TROVE_MANAGE datastore_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" $TROVE_MANAGE datastore_update "Inactive_Datastore" "" } # finalize_trove_network() - do the last thing(s) before starting Trove function finalize_trove_network { management_network_id=$(neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $PRIVATE_NETWORK_NAME | awk '{print $2}') echo "finalize_trove_network: found network id $management_network_id" iniset $TROVE_CONF DEFAULT network_label_regex .* iniset $TROVE_CONF DEFAULT ip_regex .* iniset $TROVE_CONF DEFAULT blacklist_regex ^10.0.1.* iniset $TROVE_CONF DEFAULT default_neutron_networks $management_network_id iniset $TROVE_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver iniset $TROVE_TASKMANAGER_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver iniset $TROVE_TASKMANAGER_CONF mysql tcp_ports 22,3306 } # start_trove() - Start running processes, including screen function start_trove { run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF --debug" run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_TASKMANAGER_CONF --debug" run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONDUCTOR_CONF --debug" } # stop_trove() - Stop running processes function stop_trove { # Kill the trove screen windows local serv for serv in tr-api tr-tmgr tr-cond; do stop_process $serv done } # Dispatcher for trove plugin if is_service_enabled trove; then if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Trove" install_trove install_python_troveclient cleanup_trove elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Trove" configure_trove if is_service_enabled key; then create_trove_accounts fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # Initialize trove init_trove # finish the last step in trove network configuration echo_summary "Finalizing Trove Network Configuration" if is_service_enabled neutron; then echo "finalize_trove_network: Neutron is enabled." finalize_trove_network else echo "finalize_trove_network: Neutron is not enabled. Nothing to do." fi # Start the trove API and trove taskmgr components echo_summary "Starting Trove" start_trove fi if [[ "$1" == "unstack" ]]; then stop_trove fi fi # Restore xtrace $XTRACE # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: trove-5.0.0/devstack/files/0000775000567000056710000000000012701410521016713 5ustar jenkinsjenkins00000000000000trove-5.0.0/devstack/files/debs/0000775000567000056710000000000012701410521017630 5ustar jenkinsjenkins00000000000000trove-5.0.0/devstack/files/debs/trove0000664000567000056710000000003212701410316020707 0ustar jenkinsjenkins00000000000000libxslt1-dev # testonly trove-5.0.0/devstack/files/rpms-suse/0000775000567000056710000000000012701410521020651 5ustar jenkinsjenkins00000000000000trove-5.0.0/devstack/files/rpms-suse/trove0000664000567000056710000000003212701410316021730 0ustar jenkinsjenkins00000000000000libxslt1-dev # testonly trove-5.0.0/devstack/files/rpms/0000775000567000056710000000000012701410521017674 5ustar jenkinsjenkins00000000000000trove-5.0.0/devstack/files/rpms/trove0000664000567000056710000000003312701410316020754 0ustar jenkinsjenkins00000000000000libxslt-devel # testonly trove-5.0.0/devstack/README.rst0000664000567000056710000000212212701410316017277 0ustar jenkinsjenkins00000000000000=========================== Enabling Trove in DevStack =========================== To enable Trove in DevStack, perform the following steps: :: Note: The python-troveclient is automatically installed. If you need to control how the client gets installed, set the TROVECLIENT_REPO, TROVECLIENT_DIR and TROVECLIENT_BRANCH environment variables appropriately. Download DevStack ================= .. sourcecode:: bash export DEVSTACK_DIR=~/devstack git clone git://git.openstack.org/openstack-dev/devstack.git $DEVSTACK_DIR Enable the Trove plugin ======================= Enable the plugin by adding the following section to ``$DEVSTACK_DIR/local.conf`` .. sourcecode:: bash [[local|localrc]] enable_plugin trove git://git.openstack.org/openstack/trove Optionally, a git refspec (branch or tag or commit) may be provided as follows: .. sourcecode:: bash [[local|localrc]] enable_plugin trove git://git.openstack.org/openstack/trove Run the DevStack utility ======================== .. sourcecode:: bash cd $DEVSTACK_DIR ./stack.sh trove-5.0.0/etc/0000775000567000056710000000000012701410521014560 5ustar jenkinsjenkins00000000000000trove-5.0.0/etc/trove/0000775000567000056710000000000012701410521015717 5ustar jenkinsjenkins00000000000000trove-5.0.0/etc/trove/trove-taskmanager.conf.sample0000664000567000056710000002107012701410316023502 0ustar jenkinsjenkins00000000000000[DEFAULT] # Show more verbose log output (sets INFO log level output) verbose = True # Show debugging output in logs (sets DEBUG log level output) debug = True # Update the service and instance statuses if the instances fails to become # active within the configured usage_timeout. # usage_timeout = 600 # restore_usage_timeout = 36000 update_status_on_fail = True #================= RPC Configuration ================================ # URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url= # The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove #DB Api Implementation db_api_implementation = trove.db.sqlalchemy.api # Configuration options for talking to nova via the novaclient. trove_auth_url = http://0.0.0.0:5000/v2.0 #nova_compute_url = http://localhost:8774/v2 #cinder_url = http://localhost:8776/v1 #swift_url = http://localhost:8080/v1/AUTH_ #neutron_url = http://localhost:9696/ # nova_compute_url, cinder_url, swift_url, and heat_url can all be fetched # from Keystone. To fetch from Keystone, comment out nova_compute_url, # cinder_url, swift_url, and heat_url and optionally uncomment the lines below. # Region name of this node. Used when searching catalog. Default value is None. #os_region_name = RegionOne # Service type to use when searching catalog. #nova_compute_service_type = compute # Service type to use when searching catalog. #cinder_service_type = volumev2 # Service type to use when searching catalog. #swift_service_type = object-store # Service type to use when searching catalog. #heat_service_type = orchestration # Service type to use when searching catalog. #neutron_service_type = network # Config options for enabling volume service trove_volume_support = True block_device_mapping = vdb device_path = /dev/vdb mount_point = /var/lib/mysql volume_time_out=30 server_delete_time_out=480 # Nova server boot options # sets the --config-drive argument when doing a nova boot # (controls how file injection is handled by nova) use_nova_server_config_drive = False # Configuration options for talking to nova via the novaclient. # These options are for an admin user in your keystone config. # It proxy's the token received from the user to send to nova via this admin users creds, # basically acting like the client via that proxy token. nova_proxy_admin_user = admin nova_proxy_admin_pass = 3de4922d8b6ac5a1aad9 nova_proxy_admin_tenant_id = # Manager impl for the taskmanager taskmanager_manager=trove.taskmanager.manager.Manager # Manager sends Exists Notifications exists_notification_transformer = trove.extensions.mgmt.instances.models.NovaNotificationTransformer exists_notification_ticks = 30 notification_service_id = mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b # Trove DNS trove_dns_support = False dns_account_id = 123456 dns_auth_url = http://127.0.0.1:5000/v2.0 dns_username = user dns_passkey = password dns_ttl = 3600 dns_domain_name = 'trove.com.' dns_domain_id = 11111111-1111-1111-1111-111111111111 dns_driver = trove.dns.designate.driver.DesignateDriver dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory dns_endpoint_url = http://127.0.0.1/v1/ dns_service_type = dns # Neutron network_driver = trove.network.nova.NovaNetwork default_neutron_networks = # Trove Security Groups for Instances trove_security_groups_support = True trove_security_group_rule_cidr = 0.0.0.0/0 # Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 agent_replication_snapshot_timeout = 36000 # Whether to use nova's contrib api for create server with volume use_nova_server_volume = False # Config option for filtering the IP address that DNS uses # For nova-network, set this to the appropriate network label defined in nova # For neutron, set this to .* since users can specify custom network labels # You can also optionally specify regex'es to match the actual IP addresses # ip_regex (white-list) is applied before black_list_regex in the filter chain network_label_regex = ^private$ #ip_regex = ^(15.|123.) #black_list_regex = ^(10.0.0.) # Datastore templates template_path = /etc/trove/templates/ # ============ Notification System configuration =========================== # Sets the notification driver used by oslo.messaging. Options include # messaging, messagingv2, log and routing. Default is 'noop' # notification_driver=noop # Topics used for OpenStack notifications, list value. Default is 'notifications'. # notification_topics=notifications # ============ Logging information ============================= #log_dir = /integration/report #log_file = trove-taskmanager.log # ============ PyDev remote dubugging ============================= # Enable or disable pydev remote debugging. # There are three values allowed: 'disabled', 'enabled' and 'auto' # If value is 'auto' tries to connect to remote debugger server, # but in case of error continue running with disabled debugging pydev_debug = disabled # remote debug server host and port options #pydev_debug_host = localhost #pydev_debug_port = 5678 # path to pydevd library. It will be used if pydevd is absent in sys.path #pydev_path = # ================= Guestagent related ======================== #guest_config = /etc/trove/trove-guestagent.conf # Use 'guest_info = /etc/guest_info' for pre-Kilo compatibility #guest_info = guest_info.conf # Use 'injected_config_location = /etc/trove' for pre-Kilo compatibility #injected_config_location = /etc/trove/conf.d #cloudinit_location = /etc/trove/cloudinit [database] # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine connection = mysql://root:e1a2c042c828d3566d0a@localhost/trove # connection = mysql://root:root@localhost/trove # Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default `wait_timeout` of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. idle_timeout = 3600 # ================= Security groups related ======================== # Each future datastore implementation should implement # its own oslo group with defined in it: # - tcp_ports; upd_ports; [profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True [oslo_messaging_rabbit] # The RabbitMQ broker address where a single node is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host #rabbit_host=localhost # The RabbitMQ broker port where a single node is used. (integer value) # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port=5672 # RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts=$rabbit_host:$rabbit_port # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl=false # The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid #rabbit_userid=guest # The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=f7999d1955c5014aa32c # The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host=/ [mysql] # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 3306 volume_support = True device_path = /dev/vdb [redis] # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 6379 # redis uses local storage volume_support = False # default device_path = None [cassandra] tcp_ports = 7000, 7001, 9042, 9160 volume_support = True device_path = /dev/vdb [couchbase] tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199 volume_support = True device_path = /dev/vdb [mongodb] volume_support = True device_path = /dev/vdb [vertica] tcp_ports = 5433, 5434, 22, 5444, 5450, 4803 udp_ports = 5433, 4803, 4804, 6453 volume_support = True device_path = /dev/vdb mount_point = /var/lib/vertica taskmanager_strategy = trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy trove-5.0.0/etc/trove/trove-guestagent.conf.sample0000664000567000056710000001364712701410316023366 0ustar jenkinsjenkins00000000000000[DEFAULT] #=========== RPC Configuration ====================== # URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url= # The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove # ========== Configuration options for Swift ========== # The swift_url can be specified directly or fetched from Keystone catalog. # To fetch from Keystone, comment out swift_url, and uncomment the others. # swift_url = http://10.0.0.1:8080/v1/AUTH_ # Region name of this node. Default value is None. # os_region_name = RegionOne # Service type to use when searching catalog. # swift_service_type = object-store # ========== Datastore Manager Configurations ========== # Datastore manager implementations. # Format: list of 'datastore-type:datastore.manager.implementation.module' # datastore_registry_ext = mysql:trove.guestagent.datastore.mysql.manager.Manager, percona:trove.guestagent.datastore.mysql.manager.Manager # ========== Default Users / DBs Configuration ========== # Permissions to grant "root" user by default root_grant = ALL root_grant_option = True # root_grant = ALTER ROUTINE, CREATE, ALTER, CREATE ROUTINE, CREATE TEMPORARY TABLES, CREATE VIEW, CREATE USER, DELETE, DROP, EVENT, EXECUTE, INDEX, INSERT, LOCK TABLES, PROCESS, REFERENCES, SELECT, SHOW DATABASES, SHOW VIEW, TRIGGER, UPDATE, USAGE # root_grant_option = False # Default password Length for root password # default_password_length = 36 # ========== Default Storage Options for backup ========== # Default configuration for storage strategy and storage options # for backups # For storage to Swift, use the following as defaults: # storage_strategy = SwiftStorage # storage_namespace = trove.common.strategies.storage.swift # Default config options for storing backups to swift # backup_swift_container = database_backups # backup_use_gzip_compression = True # backup_use_openssl_encryption = True # backup_aes_cbc_key = "default_aes_cbc_key" # backup_use_snet = False # backup_chunk_size = 65536 # backup_segment_max_size = 2147483648 # ========== Sample Logging Configuration ========== # Show more verbose log output (sets INFO log level output) # verbose = True # Show debugging output in logs (sets DEBUG log level output) # debug = True # Directory and path for log files log_dir = /var/log/trove/ log_file = logfile.txt [profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True [oslo_messaging_rabbit] # The RabbitMQ broker address where a single node is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host rabbit_host=10.0.0.1 # The RabbitMQ broker port where a single node is used. (integer value) # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port=5672 # RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts=$rabbit_host:$rabbit_port # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl=false # The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid #rabbit_userid=guest # The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=f7999d1955c5014aa32c # The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host=/ # ========== Datastore Specific Configuration Options ========== [mysql] # For mysql, the following are the defaults for backup, and restore: # backup_strategy = InnoBackupEx # backup_namespace = trove.guestagent.strategies.backup.mysql_impl # restore_namespace = trove.guestagent.strategies.restore.mysql_impl # Default configuration for mysql replication # replication_strategy = MysqlBinlogReplication # replication_namespace = trove.guestagent.strategies.replication.mysql_binlog # replication_user = slave_user # replication_password = slave_password # Users to ignore for user create/list/delete operations # ignore_users = os_admin # Databases to ignore for db create/list/delete operations # ignore_dbs = mysql, information_schema, performance_schema [vertica] # For vertica, following are the defaults needed: # mount_point = /var/lib/vertica # readahead_size = 2048 # guestagent_strategy = trove.common.strategies.cluster.experimental.vertica.guestagent.VerticaGuestAgentStrategy [redis] # For redis, the following are the defaults for backup, and restore: # backup_strategy = RedisBackup # backup_namespace = trove.guestagent.strategies.backup.experimental.redis_impl # restore_namespace = trove.guestagent.strategies.restore.experimental.redis_impl [percona] backup_namespace = trove.guestagent.strategies.backup.mysql_impl restore_namespace = trove.guestagent.strategies.restore.mysql_impl [couchbase] backup_namespace = trove.guestagent.strategies.backup.experimental.couchbase_impl restore_namespace = trove.guestagent.strategies.restore.experimental.couchbase_impl [cassandra] backup_namespace = trove.guestagent.strategies.backup.experimental.cassandra_impl restore_namespace = trove.guestagent.strategies.restore.experimental.cassandra_impl [db2] # For db2, the following are the defaults for backup, and restore: # backup_strategy = DB2Backup # backup_namespace = trove.guestagent.strategies.backup.experimental.db2_impl # restore_namespace = trove.guestagent.strategies.restore.experimental.db2_impl [couchdb] #For CouchDB, the following are the defaults for backup and restore: # backup_strategy = CouchDBBackup # backup_namespace = trove.guestagent.strategies.backup.experimental.couchdb_impl # restore_namespace = trove.guestagent.strategies.restore.experimental.couchdb_impl trove-5.0.0/etc/trove/conf.d/0000775000567000056710000000000012701410521017066 5ustar jenkinsjenkins00000000000000trove-5.0.0/etc/trove/conf.d/README0000664000567000056710000000035412701410316017752 0ustar jenkinsjenkins00000000000000These conf files are read and used by the guest to provide extra information to the guest. The first example of this is the guest_info.conf which will have the uuid of the instance so that the guest can report back things to the infra. trove-5.0.0/etc/trove/conf.d/guest_info.conf0000664000567000056710000000007112701410316022077 0ustar jenkinsjenkins00000000000000# Guest-specific information injected by the taskmanager trove-5.0.0/etc/trove/cloudinit/0000775000567000056710000000000012701410521017711 5ustar jenkinsjenkins00000000000000trove-5.0.0/etc/trove/cloudinit/README0000664000567000056710000000022612701410316020573 0ustar jenkinsjenkins00000000000000These cloudinit scripts will used as userdata on instance create File names should match pattern: service_type.cloudinit For example: mysql.cloudinit trove-5.0.0/etc/trove/trove-conductor.conf.sample0000664000567000056710000000354412701410316023213 0ustar jenkinsjenkins00000000000000[DEFAULT] verbose = True debug = True trove_auth_url = http://0.0.0.0:5000/v2.0 # The manager class to use for conductor. (string value) conductor_manager = trove.conductor.manager.Manager #===================== RPC Configuration ================================= # URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url= # The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove [profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True [database] connection = mysql://root:e1a2c042c828d3566d0a@localhost/trove [oslo_messaging_rabbit] # The RabbitMQ broker address where a single node is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host #rabbit_host=localhost # The RabbitMQ broker port where a single node is used. (integer value) # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port=5672 # RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts=$rabbit_host:$rabbit_port # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl=false # The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid #rabbit_userid=guest # The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=f7999d1955c5014aa32c # The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host=/ trove-5.0.0/etc/trove/api-paste.ini0000664000567000056710000000231212701410316020303 0ustar jenkinsjenkins00000000000000[composite:trove] use = call:trove.common.wsgi:versioned_urlmap /: versions /v1.0: troveapi [app:versions] paste.app_factory = trove.versions:app_factory [pipeline:troveapi] pipeline = cors faultwrapper osprofiler authtoken authorization contextwrapper ratelimit extensions troveapp #pipeline = debug extensions troveapp [filter:extensions] paste.filter_factory = trove.common.extensions:factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:authorization] paste.filter_factory = trove.common.auth:AuthorizationMiddleware.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = trove [filter:contextwrapper] paste.filter_factory = trove.common.wsgi:ContextMiddleware.factory [filter:faultwrapper] paste.filter_factory = trove.common.wsgi:FaultWrapper.factory [filter:ratelimit] paste.filter_factory = trove.common.limits:RateLimitingMiddleware.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [app:troveapp] paste.app_factory = trove.common.api:app_factory #Add this filter to log request and response for debugging [filter:debug] paste.filter_factory = trove.common.wsgi:Debug trove-5.0.0/etc/trove/api-paste.ini.test0000664000567000056710000000231512701410316021264 0ustar jenkinsjenkins00000000000000[composite:trove] use = call:trove.common.wsgi:versioned_urlmap /: versions /v1.0: troveapi [app:versions] paste.app_factory = trove.versions:app_factory [pipeline:troveapi] pipeline = faultwrapper authtoken authorization contextwrapper extensions ratelimit troveapp #pipeline = debug extensions troveapp [filter:extensions] paste.filter_factory = trove.common.extensions:factory [filter:authtoken] paste.filter_factory = trove.tests.fakes.keystone:filter_factory service_protocol = http service_host = 127.0.0.1 service_port = 5000 auth_host = 127.0.0.1 auth_port = 35357 auth_protocol = http auth_uri = http://127.0.0.1:5000/ signing_dir = /tmp/keystone-signing-trove [filter:authorization] paste.filter_factory = trove.common.auth:AuthorizationMiddleware.factory [filter:contextwrapper] paste.filter_factory = trove.common.wsgi:ContextMiddleware.factory [filter:faultwrapper] paste.filter_factory = trove.common.wsgi:FaultWrapper.factory [filter:ratelimit] paste.filter_factory = trove.common.limits:RateLimitingMiddleware.factory [app:troveapp] paste.app_factory = trove.common.api:app_factory #Add this filter to log request and response for debugging [filter:debug] paste.filter_factory = trove.common.wsgi:Debug trove-5.0.0/etc/trove/trove.conf.test0000664000567000056710000001305412701410316020710 0ustar jenkinsjenkins00000000000000[DEFAULT] # Fake out the remote implementations remote_nova_client = trove.tests.fakes.nova.fake_create_nova_client remote_guest_client = trove.tests.fakes.guestagent.fake_create_guest_client remote_swift_client = trove.tests.fakes.swift.fake_create_swift_client remote_cinder_client = trove.tests.fakes.nova.fake_create_cinder_client # Fake out the RPC implementation rpc_backend = fake # Skip running periodic tasks report_interval = 0 # Fake out DNS. trove_dns_support = True dns_driver = trove.tests.fakes.dns.FakeDnsDriver dns_instance_entry_factory = trove.tests.fakes.dns.FakeDnsInstanceEntryFactory # This will remove some of the verbose logging when trying to diagnose tox issues default_log_levels=routes.middleware=ERROR,trove.common.auth=WARN log_file = trovetest.log use_stderr = False # Show debugging output in logs (sets DEBUG log level output) debug = True # Address to bind the API server bind_host = 0.0.0.0 # Port the bind the API server to bind_port = 8779 # Number of workers for the API service. The default will # be the number of CPUs available. (integer value) #trove_api_workers=None #DB Api Implementation db_api_implementation = trove.db.sqlalchemy.api # Configuration options for talking to nova via the novaclient. # These options are for an admin user in your keystone config. # It proxy's the token received from the user to send to nova via this admin users creds, # basically acting like the client via that proxy token. nova_proxy_admin_user = admin nova_proxy_admin_pass = 3de4922d8b6ac5a1aad9 nova_proxy_admin_tenant_id = trove_auth_url = http://0.0.0.0:5000/v2.0 nova_region_name = RegionOne nova_compute_service_type = compute nova_service_name = Compute Service # Config option for showing the IP address that nova doles out network_label_regex = ^private$ ip_regex = ^(15.|123.) black_list_regex = ^(10.0.0.) # Config options for enabling volume service trove_volume_support = True nova_volume_service_type = volume nova_volume_service_name = Volume Service device_path = /dev/vdb max_accepted_volume_size = 25 max_instances_per_tenant = 55 max_volumes_per_tenant = 100 max_backups_per_tenant = 5 volume_time_out=30 # Config options for rate limits http_get_rate = 500 http_post_rate = 500 http_put_rate = 500 http_delete_rate = 500 # default datastore default_datastore = a00000a0-00a0-0a00-00a0-000a000000aa # Auth admin_roles = admin # Users to ignore for user create/list/delete operations ignore_users = os_admin, root ignore_dbs = lost+found, mysql, information_schema # Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 server_delete_time_out=10 use_nova_server_volume = False dns_time_out = 120 resize_time_out = 120 revert_time_out = 120 # usage notifications notification_driver = trove.tests.util.usage notification_service_id = mysql:123,percona:abc # ============ notifer queue kombu connection options ======================== notifier_queue_hostname = localhost notifier_queue_userid = guest notifier_queue_password = guest notifier_queue_ssl = False notifier_queue_port = 5672 notifier_queue_virtual_host = / notifier_queue_transport = memory control_exchange = trove paste_config_file=api-paste.ini.test [oslo_messaging_rabbit] # AMQP Connection info # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=f7999d1955c5014aa32c [mysql] volume_support = True device_path = /dev/vdb [redis] # redis uses local storage volume_support = False # default device_path = None [cassandra] volume_support = True device_path = /dev/vdb [couchbase] volume_support = True device_path = /dev/vdb [mongodb] volume_support = True device_path = /dev/vdb [database] # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine connection = sqlite:///trove_test.sqlite #connection = mysql://root:e1a2c042c828d3566d0a@localhost/trove #connection = postgresql://trove:trove@localhost/trove # Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default `wait_timeout` of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. idle_timeout = 3600 [composite:trove] use = call:trove.common.wsgi:versioned_urlmap /: versions /v1.0: troveapi [app:versions] paste.app_factory = trove.versions:app_factory [pipeline:troveapi] pipeline = faultwrapper authtoken authorization contextwrapper ratelimit extensions troveapp # pipeline = debug troveapp [filter:extensions] paste.filter_factory = trove.common.extensions:factory [filter:authtoken] paste.filter_factory = trove.tests.fakes.keystone:filter_factory service_protocol = http service_host = 127.0.0.1 service_port = 5000 auth_host = 127.0.0.1 auth_port = 35357 auth_protocol = http auth_uri = http://127.0.0.1:5000/ [filter:authorization] paste.filter_factory = trove.common.auth:AuthorizationMiddleware.factory [filter:contextwrapper] paste.filter_factory = trove.common.wsgi:ContextMiddleware.factory [filter:faultwrapper] paste.filter_factory = trove.common.wsgi:FaultWrapper.factory [filter:ratelimit] paste.filter_factory = trove.tests.fakes.limits:FakeRateLimitingMiddleware.factory [app:troveapp] paste.app_factory = trove.common.api:app_factory #Add this filter to log request and response for debugging [filter:debug] paste.filter_factory = trove.common.wsgi:Debug trove-5.0.0/etc/trove/trove.conf.sample0000664000567000056710000002313312701410320021204 0ustar jenkinsjenkins00000000000000[DEFAULT] # Show more verbose log output (sets INFO log level output) verbose = True # Show debugging output in logs (sets DEBUG log level output) debug = True # Address to bind the API server bind_host = 0.0.0.0 # Port the bind the API server to bind_port = 8779 # Number of workers for the API service. The default will # be the number of CPUs available. (integer value) #trove_api_workers=None #===================== RPC Configuration ================================= # URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url= # The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove # Maximum line size of message headers to be accepted. # max_header_line may need to be increased when using large tokens # (typically those generated by the Keystone v3 API with big service # catalogs) # max_header_line = 16384 #DB Api Implementation db_api_implementation = "trove.db.sqlalchemy.api" # Configuration options for talking to nova via the novaclient. trove_auth_url = http://0.0.0.0:5000/v2.0 #nova_compute_url = http://localhost:8774/v2 #cinder_url = http://localhost:8776/v1 #swift_url = http://localhost:8080/v1/AUTH_ #neutron_url = http://localhost:9696/ # nova_compute_url, cinder_url, swift_url, and heat_url can all be fetched # from Keystone. To fetch from Keystone, comment out nova_compute_url, # cinder_url, swift_url, and heat_url and optionally uncomment the lines below. # Region name of this node. Used when searching catalog. Default value is None. #os_region_name = RegionOne # Service type to use when searching catalog. #nova_compute_service_type = compute # Service type to use when searching catalog. #cinder_service_type = volumev2 # Service type to use when searching catalog. #swift_service_type = object-store # Service type to use when searching catalog. #heat_service_type = orchestration # Service type to use when searching catalog. #neutron_service_type = network # Config option for showing the IP address that nova doles out # For nova-network, set this to the appropriate network label defined in nova # For neutron, set this to .* since users can specify custom network labels # You can also optionally specify regex'es to match the actual IP addresses # ip_regex (white-list) is applied before black_list_regex in the filter chain network_label_regex = ^private$ #network_label_regex = .* //with neutron enabled #ip_regex = ^(15.|123.) #black_list_regex = ^10.0.0. # Config options for enabling volume service trove_volume_support = True block_device_mapping = vdb device_path = /dev/vdb # Maximum volume size for an instance max_accepted_volume_size = 10 max_instances_per_tenant = 5 # Maximum volume capacity (in GB) spanning across all trove volumes per tenant max_volumes_per_tenant = 100 max_backups_per_tenant = 5 volume_time_out=30 # Config options for rate limits http_get_rate = 200 http_post_rate = 200 http_put_rate = 200 http_delete_rate = 200 http_mgmt_post_rate = 200 # Trove DNS trove_dns_support = False dns_account_id = 123456 dns_auth_url = http://127.0.0.1:5000/v2.0 dns_username = user dns_passkey = password dns_ttl = 3600 dns_domain_name = 'trove.com.' dns_domain_id = 11111111-1111-1111-1111-111111111111 dns_driver = trove.dns.designate.driver.DesignateDriver dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory dns_endpoint_url = http://127.0.0.1/v1/ dns_service_type = dns # Neutron network_driver = trove.network.nova.NovaNetwork default_neutron_networks = # Taskmanager queue name taskmanager_queue = taskmanager # Auth admin_roles = admin # Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 # Reboot time out for instances reboot_time_out = 60 # Trove api-paste file name api_paste_config = api-paste.ini # ============ Notification System configuration =========================== # Sets the notification driver used by oslo.messaging. Options include # messaging, messagingv2, log and routing. Default is 'noop' # notification_driver=noop # Topics used for OpenStack notifications, list value. Default is 'notifications'. # notification_topics=notifications # ============ Logging information ============================= #log_dir = /integration/report #log_file = trove-api.log [database] # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine # connection = sqlite:///trove_test.sqlite connection = mysql://root:e1a2c042c828d3566d0a@localhost/trove #connection = postgresql://trove:trove@localhost/trove # Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default `wait_timeout` of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. idle_timeout = 3600 # ============ SSL configuration (and enablement) ============================= # In order to enable SSL for the trove api server, uncomment # the cert_file and key_file - and of course have those files # accessible. The existence of those setting and files will # enable SSL. [profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True [ssl] #cert_file = /path/to/server.crt #key_file = /path/to/server.key #optional: #ca_file = /path/to/ca_file [oslo_messaging_rabbit] # The RabbitMQ broker address where a single node is used. (string value) # Deprecated group/name - [DEFAULT]/rabbit_host #rabbit_host=localhost # The RabbitMQ broker port where a single node is used. (integer value) # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port=5672 # RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts=$rabbit_host:$rabbit_port # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl=false # The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid #rabbit_userid=guest # The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password=f7999d1955c5014aa32c # The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host=/ [mysql] root_on_create = False # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 3306 volume_support = True device_path = /dev/vdb # Users to ignore for user create/list/delete operations ignore_users = os_admin, root ignore_dbs = mysql, information_schema, performance_schema [redis] tcp_ports = 6379 #redis uses local storage volume_support = False # default device_path = None [cassandra] tcp_ports = 7000, 7001, 9042, 9160 volume_support = True device_path = /dev/vdb [couchbase] tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199 volume_support = True device_path = /dev/vdb [mongodb] tcp_ports = 2500, 27017 volume_support = True device_path = /dev/vdb num_config_servers_per_cluster = 1 num_query_routers_per_cluster = 1 [vertica] tcp_ports = 5433, 5434, 22, 5444, 5450, 4803 udp_ports = 5433, 4803, 4804, 6453 volume_support = True device_path = /dev/vdb cluster_support = True cluster_member_count = 3 api_strategy = trove.common.strategies.cluster.experimental.vertica.api.VerticaAPIStrategy # ============ CORS configuration ============================= [cors] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers. (list value) #expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual request. # (list value) #allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID [cors.subdomain] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers. (list value) #expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual request. # (list value) #allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-IDtrove-5.0.0/etc/tests/0000775000567000056710000000000012701410521015722 5ustar jenkinsjenkins00000000000000trove-5.0.0/etc/tests/localhost.test.conf0000664000567000056710000000607412701410316021550 0ustar jenkinsjenkins00000000000000{ "include-files":["core.test.conf"], "fake_mode": true, "dbaas_url":"http://localhost:8779/v1.0", "version_url":"http://localhost:8779", "nova_auth_url":"http://localhost:8779/v1.0/auth", "trove_auth_url":"http://localhost:8779/v1.0/auth", "trove_client_insecure":false, "auth_strategy":"fake", "trove_version":"v1.0", "trove_api_updated":"2012-08-01T00:00:00Z", "trove_dns_support":true, "trove_dns_checker":"trove.tests.fakes.dns.FakeDnsChecker", "trove_ip_support":false, "nova_client": null, "users": [ { "auth_user":"admin", "auth_key":"password", "tenant":"admin-1000", "tenant_id":"admin-1000", "requirements": { "is_admin":true, "services": ["trove"] } }, { "auth_user":"jsmith", "auth_key":"password", "tenant":"2500", "tenant_id":"2500", "requirements": { "is_admin":false, "services": ["trove"] } }, { "auth_user":"hub_cap", "auth_key":"password", "tenant":"3000", "tenant_id":"3000", "requirements": { "is_admin":false, "services": ["trove"] } } ], "flavors": [ { "id": 1, "name": "m1.tiny", "ram": 512 }, { "id": 2, "name": "m1.small", "ram": 2048 }, { "id": 3, "name": "m1.medium", "ram": 4096 }, { "id": 4, "name": "m1.large", "ram": 8192 }, { "id": 5, "name": "m1.xlarge", "ram": 16384 }, { "id": 6, "name": "m1.nano", "ram": 64 }, { "id": 7, "name": "m1.micro", "ram": 128 }, { "id": 8, "name": "m1.rd-smaller", "ram": 768 }, { "id": 9, "name": "tinier", "ram": 506 }, { "id": 10, "name": "m1.rd-tiny", "ram": 512 }, { "id": 11, "name": "eph.rd-tiny", "ram": 512, "local_storage": 1 }, { "id": 12, "name": "eph.rd-smaller", "ram": 768, "local_storage": 2 }, { "id": "custom", "name": "custom.small", "ram": 512, "local_storage": 1 } ], "examples": { "directory":"apidocs/src/samples", "normal_user_name":"hub_cap", "normal_user_tenant":"3000", "admin_user_name":"admin", "admin_user_tenant":"admin-1000", "replace_host":"https://troveapi.org", "replace_dns_hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com" }, "sentinel": null } trove-5.0.0/etc/tests/core.test.conf0000664000567000056710000000236312701410316020505 0ustar jenkinsjenkins00000000000000{ "report_directory":"rdli-test-report", "start_services": false, "test_mgmt":false, "use_local_ovz":false, "use_venv":false, "glance_code_root":"/opt/stack/glance", "glance_api_conf":"/vagrant/conf/glance-api.conf", "glance_reg_conf":"/vagrant/conf/glance-reg.conf", "glance_images_directory": "/glance_images", "glance_image": "fakey_fakerson.tar.gz", "instance_flavor_name":"m1.tiny", "instance_bigger_flavor_name":"m1.rd-smaller", "nova_code_root":"/opt/stack/nova", "nova_conf":"/home/vagrant/nova.conf", "keystone_code_root":"/opt/stack/keystone", "keystone_conf":"/etc/keystone/keystone.conf", "keystone_use_combined":true, "trove_code_root":"/opt/stack/trove", "trove_conf":"/tmp/trove.conf", "trove_version":"v1.0", "trove_api_updated":"2012-08-01T00:00:00Z", "trove_must_have_volume":false, "trove_can_have_volume":true, "trove_main_instance_has_volume": true, "trove_max_accepted_volume_size": 25, "trove_max_instances_per_tenant": 55, "trove_max_volumes_per_tenant": 100, "use_reaper":false, "root_removed_from_instance_api": true, "root_timestamp_disabled": false, "openvz_disabled": false, "management_api_disabled": true } trove-5.0.0/trove/0000775000567000056710000000000012701410521015144 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/README0000664000567000056710000000003612701410316016025 0ustar jenkinsjenkins00000000000000This is the trove code itself trove-5.0.0/trove/backup/0000775000567000056710000000000012701410521016411 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/backup/__init__.py0000664000567000056710000000000012701410316020512 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/backup/state.py0000664000567000056710000000162112701410316020105 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # class BackupState(object): NEW = "NEW" BUILDING = "BUILDING" SAVING = "SAVING" COMPLETED = "COMPLETED" FAILED = "FAILED" DELETE_FAILED = "DELETE_FAILED" RUNNING_STATES = [NEW, BUILDING, SAVING] END_STATES = [COMPLETED, FAILED, DELETE_FAILED] trove-5.0.0/trove/backup/service.py0000664000567000056710000000660712701410316020436 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.backup.models import Backup from trove.backup import views from trove.common import apischema from trove.common import cfg from trove.common.i18n import _ from trove.common import notification from trove.common.notification import StartNotification from trove.common import pagination from trove.common import wsgi CONF = cfg.CONF LOG = logging.getLogger(__name__) class BackupController(wsgi.Controller): """ Controller for accessing backups in the OpenStack API. """ schemas = apischema.backup def index(self, req, tenant_id): """ Return all backups information for a tenant ID. """ LOG.debug("Listing backups for tenant %s" % tenant_id) datastore = req.GET.get('datastore') context = req.environ[wsgi.CONTEXT_KEY] backups, marker = Backup.list(context, datastore) view = views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200) def show(self, req, tenant_id, id): """Return a single backup.""" LOG.debug("Showing a backup for tenant %s ID: '%s'" % (tenant_id, id)) context = req.environ[wsgi.CONTEXT_KEY] backup = Backup.get_by_id(context, id) return wsgi.Result(views.BackupView(backup).data(), 200) def create(self, req, body, tenant_id): LOG.info(_("Creating a backup for tenant %s"), tenant_id) context = req.environ[wsgi.CONTEXT_KEY] data = body['backup'] instance = data['instance'] name = data['name'] desc = data.get('description') parent = data.get('parent_id') context.notification = notification.DBaaSBackupCreate(context, request=req) with StartNotification(context, name=name, instance_id=instance, description=desc, parent_id=parent): backup = Backup.create(context, instance, name, desc, parent_id=parent) return wsgi.Result(views.BackupView(backup).data(), 202) def delete(self, req, tenant_id, id): LOG.info(_('Deleting backup for tenant %(tenant_id)s ' 'ID: %(backup_id)s') % {'tenant_id': tenant_id, 'backup_id': id}) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSBackupDelete(context, request=req) with StartNotification(context, backup_id=id): Backup.delete(context, id) return wsgi.Result(None, 202) trove-5.0.0/trove/backup/views.py0000664000567000056710000000350512701410316020125 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class BackupView(object): def __init__(self, backup): self.backup = backup def data(self): result = { "backup": { "id": self.backup.id, "name": self.backup.name, "description": self.backup.description, "locationRef": self.backup.location, "instance_id": self.backup.instance_id, "created": self.backup.created, "updated": self.backup.updated, "size": self.backup.size, "status": self.backup.state, "parent_id": self.backup.parent_id, } } if self.backup.datastore_version_id: result['backup']['datastore'] = { "type": self.backup.datastore.name, "version": self.backup.datastore_version.name, "version_id": self.backup.datastore_version.id } return result class BackupViews(object): def __init__(self, backups): self.backups = backups def data(self): backups = [] for b in self.backups: backups.append(BackupView(b).data()["backup"]) return {"backups": backups} trove-5.0.0/trove/backup/models.py0000664000567000056710000003202012701410316020245 0ustar jenkinsjenkins00000000000000# Copyright [2013] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Model classes that form the core of snapshots functionality.""" from oslo_log import log as logging from sqlalchemy import desc from swiftclient.client import ClientException from trove.backup.state import BackupState from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.remote import create_swift_client from trove.common import utils from trove.datastore import models as datastore_models from trove.db.models import DatabaseModelBase from trove.quota.quota import run_with_quotas from trove.taskmanager import api CONF = cfg.CONF LOG = logging.getLogger(__name__) class Backup(object): @classmethod def validate_can_perform_action(cls, instance, operation): """ Raises exception if backup strategy is not supported """ datastore_cfg = CONF.get(instance.datastore_version.manager) if not datastore_cfg or not ( datastore_cfg.get('backup_strategy', None)): raise exception.DatastoreOperationNotSupported( operation=operation, datastore=instance.datastore.name) @classmethod def create(cls, context, instance, name, description=None, parent_id=None): """ create db record for Backup :param cls: :param context: tenant_id included :param instance: :param name: :param description: :return: """ def _create_resources(): # parse the ID from the Ref instance_id = utils.get_id_from_href(instance) # verify that the instance exists and can perform actions from trove.instance.models import Instance instance_model = Instance.load(context, instance_id) instance_model.validate_can_perform_action() cls.validate_can_perform_action( instance_model, 'backup_create') cls.verify_swift_auth_token(context) if instance_model.cluster_id is not None: raise exception.ClusterInstanceOperationNotSupported() ds = instance_model.datastore ds_version = instance_model.datastore_version parent = None if parent_id: # Look up the parent info or fail early if not found or if # the user does not have access to the parent. _parent = cls.get_by_id(context, parent_id) parent = { 'location': _parent.location, 'checksum': _parent.checksum, } try: db_info = DBBackup.create(name=name, description=description, tenant_id=context.tenant, state=BackupState.NEW, instance_id=instance_id, parent_id=parent_id, datastore_version_id=ds_version.id, deleted=False) except exception.InvalidModelError as ex: LOG.exception(_("Unable to create backup record for " "instance: %s"), instance_id) raise exception.BackupCreationError(str(ex)) backup_info = {'id': db_info.id, 'name': name, 'description': description, 'instance_id': instance_id, 'backup_type': db_info.backup_type, 'checksum': db_info.checksum, 'parent': parent, 'datastore': ds.name, 'datastore_version': ds_version.name, } api.API(context).create_backup(backup_info, instance_id) return db_info return run_with_quotas(context.tenant, {'backups': 1}, _create_resources) @classmethod def running(cls, instance_id, exclude=None): """ Returns the first running backup for instance_id :param instance_id: Id of the instance :param exclude: Backup ID to exclude from the query (any other running) """ query = DBBackup.query() query = query.filter(DBBackup.instance_id == instance_id, DBBackup.state.in_(BackupState.RUNNING_STATES)) # filter out deleted backups, PEP8 does not like field == False! query = query.filter_by(deleted=False) if exclude: query = query.filter(DBBackup.id != exclude) return query.first() @classmethod def get_by_id(cls, context, backup_id, deleted=False): """ get the backup for that id :param cls: :param backup_id: Id of the backup to return :param deleted: Return deleted backups :return: """ try: db_info = DBBackup.find_by(context=context, id=backup_id, deleted=deleted) return db_info except exception.NotFound: raise exception.NotFound(uuid=backup_id) @classmethod def _paginate(cls, context, query): """Paginate the results of the base query. We use limit/offset as the results need to be ordered by date and not the primary key. """ marker = int(context.marker or 0) limit = int(context.limit or CONF.backups_page_size) # order by 'updated DESC' to show the most recent backups first query = query.order_by(desc(DBBackup.updated)) # Apply limit/offset query = query.limit(limit) query = query.offset(marker) # check if we need to send a marker for the next page if query.count() < limit: marker = None else: marker += limit return query.all(), marker @classmethod def list(cls, context, datastore=None): """ list all live Backups belong to given tenant :param cls: :param context: tenant_id included :param datastore: datastore to filter by :return: """ query = DBBackup.query() filters = [DBBackup.tenant_id == context.tenant, DBBackup.deleted == 0] if datastore: ds = datastore_models.Datastore.load(datastore) filters.append(datastore_models.DBDatastoreVersion. datastore_id == ds.id) query = query.join(datastore_models.DBDatastoreVersion) query = query.filter(*filters) return cls._paginate(context, query) @classmethod def list_for_instance(cls, context, instance_id): """ list all live Backups associated with given instance :param cls: :param instance_id: :return: """ query = DBBackup.query() if context.is_admin: query = query.filter_by(instance_id=instance_id, deleted=False) else: query = query.filter_by(instance_id=instance_id, tenant_id=context.tenant, deleted=False) return cls._paginate(context, query) @classmethod def get_last_completed(cls, context, instance_id, include_incremental=True): """ returns last completed backup :param cls: :param instance_id: :param include_incremental: :return: """ last_backup = None backups, marker = cls.list_for_instance(context, instance_id) # we don't care about the marker since we only want the first backup # and they are ordered descending based on date (what we want) for backup in backups: if backup.state == BackupState.COMPLETED and ( include_incremental or not backup.parent_id): if not last_backup or backup.updated > last_backup.updated: last_backup = backup return last_backup @classmethod def fail_for_instance(cls, instance_id): query = DBBackup.query() query = query.filter(DBBackup.instance_id == instance_id, DBBackup.state.in_(BackupState.RUNNING_STATES)) query = query.filter_by(deleted=False) for backup in query.all(): backup.state = BackupState.FAILED backup.save() @classmethod def delete(cls, context, backup_id): """ update Backup table on deleted flag for given Backup :param cls: :param context: context containing the tenant id and token :param backup_id: Backup uuid :return: """ # Recursively delete all children and grandchildren of this backup. query = DBBackup.query() query = query.filter_by(parent_id=backup_id, deleted=False) for child in query.all(): cls.delete(context, child.id) def _delete_resources(): backup = cls.get_by_id(context, backup_id) if backup.is_running: msg = _("Backup %s cannot be deleted because it is running.") raise exception.UnprocessableEntity(msg % backup_id) cls.verify_swift_auth_token(context) api.API(context).delete_backup(backup_id) return run_with_quotas(context.tenant, {'backups': -1}, _delete_resources) @classmethod def verify_swift_auth_token(cls, context): try: client = create_swift_client(context) client.get_account() except ClientException: raise exception.SwiftAuthError(tenant_id=context.tenant) except exception.NoServiceEndpoint: raise exception.SwiftNotFound(tenant_id=context.tenant) def persisted_models(): return {'backups': DBBackup} class DBBackup(DatabaseModelBase): """A table for Backup records.""" _data_fields = ['id', 'name', 'description', 'location', 'backup_type', 'size', 'tenant_id', 'state', 'instance_id', 'checksum', 'backup_timestamp', 'deleted', 'created', 'updated', 'deleted_at', 'parent_id', 'datastore_version_id'] preserve_on_delete = True @property def is_running(self): return self.state in BackupState.RUNNING_STATES @property def is_done(self): return self.state in BackupState.END_STATES @property def is_done_successfuly(self): return self.state == BackupState.COMPLETED @property def filename(self): if self.location: last_slash = self.location.rfind("/") if last_slash < 0: raise ValueError(_("Bad location for backup object: %s") % self.location) return self.location[last_slash + 1:] else: return None @property def datastore(self): if self.datastore_version_id: return datastore_models.Datastore.load( self.datastore_version.datastore_id) @property def datastore_version(self): if self.datastore_version_id: return datastore_models.DatastoreVersion.load_by_uuid( self.datastore_version_id) def check_swift_object_exist(self, context, verify_checksum=False): try: parts = self.location.split('/') obj = parts[-1] container = parts[-2] client = create_swift_client(context) LOG.debug("Checking if backup exists in %s" % self.location) resp = client.head_object(container, obj) if verify_checksum: LOG.debug("Checking if backup checksum matches swift " "for backup %s" % self.id) # swift returns etag in double quotes # e.g. '"dc3b0827f276d8d78312992cc60c2c3f"' swift_checksum = resp['etag'].strip('"') if self.checksum != swift_checksum: raise exception.RestoreBackupIntegrityError( backup_id=self.id) return True except ClientException as e: if e.http_status == 404: return False else: raise exception.SwiftAuthError(tenant_id=context.tenant) trove-5.0.0/trove/quota/0000775000567000056710000000000012701410521016275 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/quota/quota.py0000664000567000056710000003134712701410316020012 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for DB instances and resources.""" from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from trove.common import exception from trove.common.i18n import _ from trove.quota.models import Quota from trove.quota.models import QuotaUsage from trove.quota.models import Reservation from trove.quota.models import Resource LOG = logging.getLogger(__name__) CONF = cfg.CONF class DbQuotaDriver(object): """ Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ def __init__(self, resources): self.resources = resources def get_quota_by_tenant(self, tenant_id, resource): """Get a specific quota by tenant.""" quotas = Quota.find_all(tenant_id=tenant_id, resource=resource).all() if len(quotas) == 0: return Quota(tenant_id, resource, self.resources[resource].default) return quotas[0] def get_all_quotas_by_tenant(self, tenant_id, resources): """ Retrieve the quotas for the given tenant. :param resources: A list of the registered resource to get. :param tenant_id: The ID of the tenant to return quotas for. """ all_quotas = Quota.find_all(tenant_id=tenant_id).all() result_quotas = {quota.resource: quota for quota in all_quotas if quota.resource in resources} if len(result_quotas) != len(resources): for resource in resources: # Not in the DB, return default value if resource not in result_quotas: quota = Quota(tenant_id, resource, self.resources[resource].default) result_quotas[resource] = quota return result_quotas def get_quota_usage_by_tenant(self, tenant_id, resource): """Get a specific quota usage by tenant.""" quotas = QuotaUsage.find_all(tenant_id=tenant_id, resource=resource).all() if len(quotas) == 0: return QuotaUsage.create(tenant_id=tenant_id, in_use=0, reserved=0, resource=resource) return quotas[0] def get_all_quota_usages_by_tenant(self, tenant_id, resources): """ Retrieve the quota usagess for the given tenant. :param tenant_id: The ID of the tenant to return quotas for. :param resources: A list of the registered resources to get. """ all_usages = QuotaUsage.find_all(tenant_id=tenant_id).all() result_usages = {usage.resource: usage for usage in all_usages if usage.resource in resources} if len(result_usages) != len(resources): for resource in resources: # Not in the DB, return default value if resource not in result_usages: usage = QuotaUsage.create(tenant_id=tenant_id, in_use=0, reserved=0, resource=resource) result_usages[resource] = usage return result_usages def get_defaults(self, resources): """Given a list of resources, retrieve the default quotas. :param resources: A list of the registered resources. """ quotas = {} for resource in resources.values(): quotas[resource.name] = resource.default return quotas def check_quotas(self, tenant_id, resources, deltas): """Check quotas for a tenant. This method checks quotas against current usage, reserved resources and the desired deltas. If any of the proposed values is over the defined quota, an QuotaExceeded exception will be raised with the sorted list of the resources which are too high. :param tenant_id: The ID of the tenant reserving the resources. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. """ unregistered_resources = [delta for delta in deltas if delta not in resources] if unregistered_resources: raise exception.QuotaResourceUnknown( unknown=unregistered_resources) quotas = self.get_all_quotas_by_tenant(tenant_id, deltas.keys()) quota_usages = self.get_all_quota_usages_by_tenant(tenant_id, deltas.keys()) overs = [resource for resource in deltas if (int(deltas[resource]) > 0 and (quota_usages[resource].in_use + quota_usages[resource].reserved + int(deltas[resource])) > quotas[resource].hard_limit)] if overs: raise exception.QuotaExceeded(overs=sorted(overs)) def reserve(self, tenant_id, resources, deltas): """Check quotas and reserve resources for a tenant. This method checks quotas against current usage, reserved resources and the desired deltas. If any of the proposed values is over the defined quota, an QuotaExceeded exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation objects which were created. :param tenant_id: The ID of the tenant reserving the resources. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. """ self.check_quotas(tenant_id, resources, deltas) quota_usages = self.get_all_quota_usages_by_tenant(tenant_id, deltas.keys()) reservations = [] for resource in sorted(deltas): reserved = deltas[resource] usage = quota_usages[resource] usage.reserved += reserved usage.save() resv = Reservation.create(usage_id=usage.id, delta=reserved, status=Reservation.Statuses.RESERVED) reservations.append(resv) return reservations def commit(self, reservations): """Commit reservations. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ for reservation in reservations: usage = QuotaUsage.find_by(id=reservation.usage_id) usage.in_use += reservation.delta if usage.in_use < 0: usage.in_use = 0 usage.reserved -= reservation.delta reservation.status = Reservation.Statuses.COMMITTED usage.save() reservation.save() def rollback(self, reservations): """Roll back reservations. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ for reservation in reservations: usage = QuotaUsage.find_by(id=reservation.usage_id) usage.reserved -= reservation.delta reservation.status = Reservation.Statuses.ROLLEDBACK usage.save() reservation.save() class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._resources = {} if not quota_driver_class: quota_driver_class = CONF.quota_driver if isinstance(quota_driver_class, basestring): quota_driver_class = importutils.import_object(quota_driver_class, self._resources) self._driver = quota_driver_class def __contains__(self, resource): return resource in self._resources def register_resource(self, resource): """Register a resource.""" self._resources[resource.name] = resource def register_resources(self, resources): """Register a dictionary of resources.""" for resource in resources: self.register_resource(resource) def get_quota_by_tenant(self, tenant_id, resource): """Get a specific quota by tenant.""" return self._driver.get_quota_by_tenant(tenant_id, resource) def get_quota_usage(self, quota): """Get the usage for a quota.""" return self._driver.get_quota_usage_by_tenant(quota.tenant_id, quota.resource) def get_defaults(self): """Retrieve the default quotas.""" return self._driver.get_defaults(self._resources) def get_all_quotas_by_tenant(self, tenant_id): """Retrieve the quotas for the given tenant. :param tenant_id: The ID of the tenant to return quotas for. """ return self._driver.get_all_quotas_by_tenant(tenant_id, self._resources) def check_quotas(self, tenant_id, **deltas): self._driver.check_quotas(tenant_id, self._resources, deltas) def reserve(self, tenant_id, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. The deltas are given as keyword arguments, and current usage and other reservations are factored into the quota check. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an QuotaExceeded exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param tenant_id: The ID of the tenant to reserve quotas for. """ reservations = self._driver.reserve(tenant_id, self._resources, deltas) LOG.debug("Created reservations %(reservations)s" % {'reservations': reservations}) return reservations def commit(self, reservations): """Commit reservations. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ try: self._driver.commit(reservations) except Exception: LOG.exception(_("Failed to commit reservations " "%(reservations)s") % {'reservations': reservations}) def rollback(self, reservations): """Roll back reservations. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ try: self._driver.rollback(reservations) except Exception: LOG.exception(_("Failed to roll back reservations " "%(reservations)s") % {'reservations': reservations}) @property def resources(self): return sorted(self._resources.keys()) QUOTAS = QuotaEngine() ''' Define all kind of resources here ''' resources = [Resource(Resource.INSTANCES, 'max_instances_per_tenant'), Resource(Resource.BACKUPS, 'max_backups_per_tenant'), Resource(Resource.VOLUMES, 'max_volumes_per_tenant')] QUOTAS.register_resources(resources) def run_with_quotas(tenant_id, deltas, f): """Quota wrapper.""" reservations = QUOTAS.reserve(tenant_id, **deltas) result = None try: result = f() except Exception: QUOTAS.rollback(reservations) raise else: QUOTAS.commit(reservations) return result def check_quotas(tenant_id, deltas): QUOTAS.check_quotas(tenant_id, **deltas) trove-5.0.0/trove/quota/__init__.py0000664000567000056710000000000012701410316020376 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/quota/models.py0000664000567000056710000000603512701410316020140 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import utils from trove.db import models as dbmodels LOG = logging.getLogger(__name__) CONF = cfg.CONF def enum(**enums): return type('Enum', (), enums) class Quota(dbmodels.DatabaseModelBase): """Defines the base model class for a quota.""" _data_fields = ['created', 'updated', 'tenant_id', 'resource', 'hard_limit', 'id'] def __init__(self, tenant_id, resource, hard_limit, id=utils.generate_uuid(), created=utils.utcnow(), update=utils.utcnow()): self.tenant_id = tenant_id self.resource = resource self.hard_limit = hard_limit self.id = id self.created = created self.update = update class QuotaUsage(dbmodels.DatabaseModelBase): """Defines the quota usage for a tenant.""" _data_fields = ['created', 'updated', 'tenant_id', 'resource', 'in_use', 'reserved', 'id'] class Reservation(dbmodels.DatabaseModelBase): """Defines the reservation for a quota.""" _data_fields = ['created', 'updated', 'usage_id', 'id', 'delta', 'status'] Statuses = enum(NEW='New', RESERVED='Reserved', COMMITTED='Committed', ROLLEDBACK='Rolled Back') def persisted_models(): return { 'quotas': Quota, 'quota_usages': QuotaUsage, 'reservations': Reservation, } class Resource(object): """Describe a single resource for quota checking.""" INSTANCES = 'instances' VOLUMES = 'volumes' BACKUPS = 'backups' def __init__(self, name, flag=None): """ Initializes a Resource. :param name: The name of the resource, i.e., "volumes". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ self.name = name self.flag = flag def __str__(self): return self.name def __hash__(self): return hash(self.name) def __eq__(self, other): return (isinstance(other, Resource) and self.name == other.name and self.flag == other.flag) @property def default(self): """Return the default value of the quota.""" return CONF[self.flag] if self.flag is not None else -1 trove-5.0.0/trove/configuration/0000775000567000056710000000000012701410521020013 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/configuration/__init__.py0000664000567000056710000000000012701410316022114 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/configuration/service.py0000664000567000056710000003657412701410316022046 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from oslo_log import log as logging import trove.common.apischema as apischema from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import notification from trove.common.notification import StartNotification, EndNotification from trove.common import pagination from trove.common import wsgi from trove.configuration import models from trove.configuration.models import DBConfigurationParameter from trove.configuration import views from trove.datastore import models as ds_models from trove.instance import models as instances_models CONF = cfg.CONF LOG = logging.getLogger(__name__) class ConfigurationsController(wsgi.Controller): schemas = apischema.configuration def index(self, req, tenant_id): context = req.environ[wsgi.CONTEXT_KEY] configs, marker = models.Configurations.load(context) view = views.ConfigurationsView(configs) paged = pagination.SimplePaginatedDataView(req.url, 'configurations', view, marker) return wsgi.Result(paged.data(), 200) def show(self, req, tenant_id, id): LOG.debug("Showing configuration group %(id)s on tenant %(tenant)s" % {"tenant": tenant_id, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] configuration = models.Configuration.load(context, id) configuration_items = models.Configuration.load_items(context, id) configuration.instance_count = instances_models.DBInstance.find_all( tenant_id=context.tenant, configuration_id=configuration.id, deleted=False).count() return wsgi.Result(views.DetailedConfigurationView( configuration, configuration_items).data(), 200) def instances(self, req, tenant_id, id): context = req.environ[wsgi.CONTEXT_KEY] configuration = models.Configuration.load(context, id) instances = instances_models.DBInstance.find_all( tenant_id=context.tenant, configuration_id=configuration.id, deleted=False) limit = int(context.limit or CONF.instances_page_size) if limit > CONF.instances_page_size: limit = CONF.instances_page_size data_view = instances_models.DBInstance.find_by_pagination( 'instances', instances, "foo", limit=limit, marker=context.marker) view = views.DetailedConfigurationInstancesView(data_view.collection) paged = pagination.SimplePaginatedDataView(req.url, 'instances', view, data_view.next_page_marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id): LOG.debug("req : '%s'\n\n" % req) LOG.debug("body : '%s'\n\n" % req) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSConfigurationCreate( context, request=req) name = body['configuration']['name'] description = body['configuration'].get('description') values = body['configuration']['values'] msg = _("Creating configuration group on tenant " "%(tenant_id)s with name: %(cfg_name)s") LOG.info(msg % {"tenant_id": tenant_id, "cfg_name": name}) datastore_args = body['configuration'].get('datastore', {}) datastore, datastore_version = ( ds_models.get_datastore_version(**datastore_args)) with StartNotification(context, name=name, datastore=datastore.name, datastore_version=datastore_version.name): configItems = [] if values: # validate that the values passed in are permitted by the # operator. ConfigurationsController._validate_configuration( body['configuration']['values'], datastore_version, models.DatastoreConfigurationParameters.load_parameters( datastore_version.id)) for k, v in values.iteritems(): configItems.append(DBConfigurationParameter( configuration_key=k, configuration_value=v)) cfg_group = models.Configuration.create(name, description, tenant_id, datastore.id, datastore_version.id) with EndNotification(context, configuration_id=cfg_group.id): cfg_group_items = models.Configuration.create_items( cfg_group.id, values) view_data = views.DetailedConfigurationView(cfg_group, cfg_group_items) return wsgi.Result(view_data.data(), 200) def delete(self, req, tenant_id, id): msg = _("Deleting configuration group %(cfg_id)s on tenant: " "%(tenant_id)s") LOG.info(msg % {"tenant_id": tenant_id, "cfg_id": id}) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSConfigurationDelete( context, request=req) with StartNotification(context, configuration_id=id): group = models.Configuration.load(context, id) instances = instances_models.DBInstance.find_all( tenant_id=context.tenant, configuration_id=id, deleted=False).all() if instances: raise exception.InstanceAssignedToConfiguration() models.Configuration.delete(context, group) return wsgi.Result(None, 202) def update(self, req, body, tenant_id, id): msg = _("Updating configuration group %(cfg_id)s for tenant " "id %(tenant_id)s") LOG.info(msg % {"tenant_id": tenant_id, "cfg_id": id}) context = req.environ[wsgi.CONTEXT_KEY] group = models.Configuration.load(context, id) # if name/description are provided in the request body, update the # model with these values as well. if 'name' in body['configuration']: group.name = body['configuration']['name'] if 'description' in body['configuration']: group.description = body['configuration']['description'] context.notification = notification.DBaaSConfigurationUpdate( context, request=req) with StartNotification(context, configuration_id=id, name=group.name, description=group.description): items = self._configuration_items_list(group, body['configuration']) deleted_at = datetime.utcnow() models.Configuration.remove_all_items(context, group.id, deleted_at) models.Configuration.save(group, items) self._refresh_on_all_instances(context, id) return wsgi.Result(None, 202) def edit(self, req, body, tenant_id, id): context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSConfigurationEdit( context, request=req) with StartNotification(context, configuration_id=id): group = models.Configuration.load(context, id) items = self._configuration_items_list(group, body['configuration']) models.Configuration.save(group, items) self._refresh_on_all_instances(context, id) def _refresh_on_all_instances(self, context, configuration_id): """Refresh a configuration group on all its instances. """ dbinstances = instances_models.DBInstance.find_all( tenant_id=context.tenant, configuration_id=configuration_id, deleted=False).all() LOG.debug( "All instances with configuration group '%s' on tenant '%s': %s" % (configuration_id, context.tenant, dbinstances)) config = models.Configuration(context, configuration_id) for dbinstance in dbinstances: LOG.debug("Applying configuration group '%s' to instance: %s" % (configuration_id, dbinstance.id)) instance = instances_models.Instance.load(context, dbinstance.id) instance.update_overrides(config) def _configuration_items_list(self, group, configuration): ds_version_id = group.datastore_version_id ds_version = ds_models.DatastoreVersion.load_by_uuid(ds_version_id) items = [] if 'values' in configuration: # validate that the values passed in are permitted by the operator. ConfigurationsController._validate_configuration( configuration['values'], ds_version, models.DatastoreConfigurationParameters.load_parameters( ds_version.id)) for k, v in configuration['values'].iteritems(): items.append(DBConfigurationParameter( configuration_id=group.id, configuration_key=k, configuration_value=v, deleted=False)) return items @staticmethod def _validate_configuration(values, datastore_version, config_rules): LOG.info(_("Validating configuration values")) # create rules dictionary based on parameter name rules_lookup = {} for item in config_rules: rules_lookup[item.name.lower()] = item # checking if there are any rules for the datastore if not rules_lookup: output = {"version": datastore_version.name, "name": datastore_version.datastore_name} msg = _("Configuration groups are not supported for this " "datastore: %(name)s %(version)s") % output raise exception.UnprocessableEntity(message=msg) for k, v in values.iteritems(): key = k.lower() # parameter name validation if key not in rules_lookup: output = {"key": k, "version": datastore_version.name, "name": datastore_version.datastore_name} msg = _("The configuration parameter %(key)s is not " "supported for this datastore: " "%(name)s %(version)s.") % output raise exception.UnprocessableEntity(message=msg) rule = rules_lookup[key] # type checking value_type = rule.data_type if not isinstance(v, ConfigurationsController._find_type( value_type)): output = {"key": k, "type": value_type} msg = _("The value provided for the configuration " "parameter %(key)s is not of type %(type)s.") % output raise exception.UnprocessableEntity(message=msg) # integer min/max checking if isinstance(v, (int, long)) and not isinstance(v, bool): if rule.min_size is not None: try: min_value = int(rule.min_size) except ValueError: raise exception.TroveError(_( "Invalid or unsupported min value defined in the " "configuration-parameters configuration file. " "Expected integer.")) if v < min_value: output = {"key": k, "min": min_value} message = _( "The value for the configuration parameter " "%(key)s is less than the minimum allowed: " "%(min)s") % output raise exception.UnprocessableEntity(message=message) if rule.max_size is not None: try: max_value = int(rule.max_size) except ValueError: raise exception.TroveError(_( "Invalid or unsupported max value defined in the " "configuration-parameters configuration file. " "Expected integer.")) if v > max_value: output = {"key": k, "max": max_value} message = _( "The value for the configuration parameter " "%(key)s is greater than the maximum " "allowed: %(max)s") % output raise exception.UnprocessableEntity(message=message) @staticmethod def _find_type(value_type): if value_type == "boolean": return bool elif value_type == "string": return basestring elif value_type == "integer": return (int, long) else: raise exception.TroveError(_( "Invalid or unsupported type defined in the " "configuration-parameters configuration file.")) @staticmethod def _get_item(key, dictList): for item in dictList: if key == item.get('name'): return item raise exception.UnprocessableEntity( message=_("%s is not a supported configuration parameter.") % key) class ParametersController(wsgi.Controller): def index(self, req, tenant_id, datastore, id): ds, ds_version = ds_models.get_datastore_version( type=datastore, version=id) rules = models.DatastoreConfigurationParameters.load_parameters( ds_version.id) return wsgi.Result(views.ConfigurationParametersView(rules).data(), 200) def show(self, req, tenant_id, datastore, id, name): ds, ds_version = ds_models.get_datastore_version( type=datastore, version=id) rule = models.DatastoreConfigurationParameters.load_parameter_by_name( ds_version.id, name) return wsgi.Result(views.ConfigurationParameterView(rule).data(), 200) def index_by_version(self, req, tenant_id, version): ds_version = ds_models.DatastoreVersion.load_by_uuid(version) rules = models.DatastoreConfigurationParameters.load_parameters( ds_version.id) return wsgi.Result(views.ConfigurationParametersView(rules).data(), 200) def show_by_version(self, req, tenant_id, version, name): ds_models.DatastoreVersion.load_by_uuid(version) rule = models.DatastoreConfigurationParameters.load_parameter_by_name( version, name) return wsgi.Result(views.ConfigurationParameterView(rule).data(), 200) trove-5.0.0/trove/configuration/views.py0000664000567000056710000001075412701410316021533 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging LOG = logging.getLogger(__name__) class ConfigurationView(object): def __init__(self, configuration): self.configuration = configuration def data(self): configuration_dict = { "id": self.configuration.id, "name": self.configuration.name, "description": self.configuration.description, "created": self.configuration.created, "updated": self.configuration.updated, "datastore_version_id": self.configuration.datastore_version_id, "datastore_name": self.configuration.datastore.name, "datastore_version_name": self.configuration.datastore_version.name} return {"configuration": configuration_dict} class ConfigurationsView(object): def __init__(self, configurations): self.configurations = configurations def data(self): data = [] for configuration in self.configurations: data.append(self.data_for_configuration(configuration)) return {"configurations": data} def data_for_configuration(self, configuration): view = ConfigurationView(configuration) return view.data()['configuration'] class DetailedConfigurationInstancesView(object): def __init__(self, instances): self.instances = instances def instance_data(self): instances_list = [] if self.instances: for instance in self.instances: instances_list.append( { "id": instance.id, "name": instance.name } ) return instances_list def data(self): return {"instances": self.instance_data()} class DetailedConfigurationView(object): def __init__(self, configuration, configuration_items): self.configuration = configuration self.configuration_items = configuration_items def data(self): values = {} for configItem in self.configuration_items: key = configItem.configuration_key value = configItem.configuration_value values[key] = value configuration_dict = { "id": self.configuration.id, "name": self.configuration.name, "description": self.configuration.description, "values": values, "created": self.configuration.created, "updated": self.configuration.updated, "instance_count": getattr(self.configuration, "instance_count", 0), "datastore_name": self.configuration.datastore.name, "datastore_version_id": self.configuration.datastore_version_id, "datastore_version_name": self.configuration.datastore_version.name } return {"configuration": configuration_dict} class ConfigurationParameterView(object): def __init__(self, config): self.config = config def data(self): # v1 api is to be a 'true' or 'false' json boolean instead of 1/0 restart_required = True if self.config.restart_required else False ret = { "name": self.config.name, "datastore_version_id": self.config.datastore_version_id, "restart_required": restart_required, "type": self.config.data_type, } if self.config.max_size: ret["max"] = int(self.config.max_size) if self.config.min_size: ret["min"] = int(self.config.min_size) return ret class ConfigurationParametersView(object): def __init__(self, configs): self.configs = configs def data(self): params = [] for p in self.configs: param = ConfigurationParameterView(p) params.append(param.data()) return {"configuration-parameters": params} trove-5.0.0/trove/configuration/models.py0000664000567000056710000003512212701410316021655 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import json from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.exception import ModelNotFoundError from trove.common.i18n import _ from trove.common import utils from trove.datastore import models as dstore_models from trove.db import get_db_api from trove.db import models as dbmodels CONF = cfg.CONF LOG = logging.getLogger(__name__) class Configurations(object): DEFAULT_LIMIT = CONF.configurations_page_size @staticmethod def load(context): if context is None: raise TypeError("Argument context not defined.") elif id is None: raise TypeError("Argument is not defined.") if context.is_admin: db_info = DBConfiguration.find_all(deleted=False) if db_info.count() == 0: LOG.debug("No configurations found for admin user") else: db_info = DBConfiguration.find_all(tenant_id=context.tenant, deleted=False) if db_info.count() == 0: LOG.debug("No configurations found for tenant %s" % context.tenant) limit = utils.pagination_limit(context.limit, Configurations.DEFAULT_LIMIT) data_view = DBConfiguration.find_by_pagination('configurations', db_info, "foo", limit=limit, marker=context.marker) next_marker = data_view.next_page_marker return data_view.collection, next_marker class Configuration(object): def __init__(self, context, configuration_id): self.context = context self.configuration_id = configuration_id @property def instances(self): return self.instances @property def items(self): return self.items @staticmethod def create(name, description, tenant_id, datastore, datastore_version): configurationGroup = DBConfiguration.create( name=name, description=description, tenant_id=tenant_id, datastore_version_id=datastore_version) return configurationGroup @staticmethod def create_items(cfg_id, values): LOG.debug("Saving configuration values for %s - " "values: %s" % (cfg_id, values)) config_items = [] for key, val in values.iteritems(): config_item = DBConfigurationParameter.create( configuration_id=cfg_id, configuration_key=key, configuration_value=val) config_items.append(config_item) return config_items @staticmethod def delete(context, group): deleted_at = datetime.utcnow() Configuration.remove_all_items(context, group.id, deleted_at) group.deleted = True group.deleted_at = deleted_at group.save() @staticmethod def remove_all_items(context, id, deleted_at): items = DBConfigurationParameter.find_all(configuration_id=id, deleted=False).all() LOG.debug("Removing all configuration values for %s" % id) for item in items: item.deleted = True item.deleted_at = deleted_at item.save() @staticmethod def load_configuration_datastore_version(context, id): config = Configuration.load(context, id) datastore_version = dstore_models.DatastoreVersion.load_by_uuid( config.datastore_version_id) return datastore_version @staticmethod def load(context, id): try: if context.is_admin: return DBConfiguration.find_by(id=id, deleted=False) else: return DBConfiguration.find_by(id=id, tenant_id=context.tenant, deleted=False) except ModelNotFoundError: msg = _("Configuration group with ID %s could not be found.") % id raise ModelNotFoundError(msg) @staticmethod def find_parameter_details(name, detail_list): for item in detail_list: if item.name == name: return item return None @staticmethod def load_items(context, id): datastore_v = Configuration.load_configuration_datastore_version( context, id) config_items = DBConfigurationParameter.find_all( configuration_id=id, deleted=False).all() detail_list = DatastoreConfigurationParameters.load_parameters( datastore_v.id) for item in config_items: rule = Configuration.find_parameter_details( str(item.configuration_key), detail_list) if not rule: continue if rule.data_type == 'boolean': item.configuration_value = bool(int(item.configuration_value)) elif rule.data_type == 'integer': item.configuration_value = int(item.configuration_value) else: item.configuration_value = str(item.configuration_value) return config_items def get_configuration_overrides(self): """Gets the overrides dictionary to apply to an instance.""" overrides = {} if self.configuration_id: config_items = Configuration.load_items(self.context, id=self.configuration_id) for i in config_items: overrides[i.configuration_key] = i.configuration_value return overrides def does_configuration_need_restart(self): datastore_v = Configuration.load_configuration_datastore_version( self.context, self.configuration_id) config_items = Configuration.load_items(self.context, id=self.configuration_id) LOG.debug("config_items: %s" % config_items) detail_list = DatastoreConfigurationParameters.load_parameters( datastore_v.id, show_deleted=True) for i in config_items: LOG.debug("config item: %s" % i) details = Configuration.find_parameter_details( i.configuration_key, detail_list) LOG.debug("parameter details: %s" % details) if not details: raise exception.NotFound(uuid=i.configuration_key) if bool(details.restart_required): return True return False @staticmethod def save(configuration, configuration_items): DBConfiguration.save(configuration) for item in configuration_items: item["deleted_at"] = None DBConfigurationParameter.save(item) class DBConfiguration(dbmodels.DatabaseModelBase): _data_fields = ['name', 'description', 'tenant_id', 'datastore_version_id', 'deleted', 'deleted_at', 'created', 'updated'] @property def datastore(self): datastore_version = dstore_models.DatastoreVersion.load_by_uuid( self.datastore_version_id) datastore = dstore_models.Datastore.load( datastore_version.datastore_id) return datastore @property def datastore_version(self): datastore_version = dstore_models.DatastoreVersion.load_by_uuid( self.datastore_version_id) return datastore_version class DBConfigurationParameter(dbmodels.DatabaseModelBase): _data_fields = ['configuration_id', 'configuration_key', 'configuration_value', 'deleted', 'deleted_at'] def __hash__(self): return self.configuration_key.__hash__() class DBDatastoreConfigurationParameters(dbmodels.DatabaseModelBase): """Model for storing the configuration parameters on a datastore.""" _auto_generated_attrs = ['id'] _data_fields = [ 'name', 'datastore_version_id', 'restart_required', 'max_size', 'min_size', 'data_type', 'deleted', 'deleted_at', ] _table_name = "datastore_configuration_parameters" preserve_on_delete = True class DatastoreConfigurationParameters(object): def __init__(self, db_info): self.db_info = db_info @staticmethod def create(**kwargs): """Create a configuration parameter for a datastore version.""" # Do we already have a parameter in the db? # yes: and its deleted then modify the param # yes: and its not deleted then error on create. # no: then just create the new param ds_v_id = kwargs.get('datastore_version_id') config_param_name = kwargs.get('name') try: param = DatastoreConfigurationParameters.load_parameter_by_name( ds_v_id, config_param_name, show_deleted=True) if param.deleted == 1: param.restart_required = kwargs.get('restart_required') param.data_type = kwargs.get('data_type') param.max_size = kwargs.get('max_size') param.min_size = kwargs.get('min_size') param.deleted = 0 param.save() return param else: raise exception.ConfigurationParameterAlreadyExists( parameter_name=config_param_name, datastore_version=ds_v_id) except exception.NotFound: pass config_param = DBDatastoreConfigurationParameters.create( **kwargs) return config_param @staticmethod def delete(version_id, config_param_name): config_param = DatastoreConfigurationParameters.load_parameter_by_name( version_id, config_param_name) config_param.deleted = True config_param.deleted_at = datetime.utcnow() config_param.save() @classmethod def load_parameters(cls, datastore_version_id, show_deleted=False): try: if show_deleted: return DBDatastoreConfigurationParameters.find_all( datastore_version_id=datastore_version_id ) else: return DBDatastoreConfigurationParameters.find_all( datastore_version_id=datastore_version_id, deleted=False ) except exception.NotFound: raise exception.NotFound(uuid=datastore_version_id) @classmethod def load_parameter(cls, config_id, show_deleted=False): try: if show_deleted: return DBDatastoreConfigurationParameters.find_by( id=config_id ) else: return DBDatastoreConfigurationParameters.find_by( id=config_id, deleted=False ) except exception.NotFound: raise exception.NotFound(uuid=config_id) @classmethod def load_parameter_by_name(cls, datastore_version_id, config_param_name, show_deleted=False): try: if show_deleted: return DBDatastoreConfigurationParameters.find_by( datastore_version_id=datastore_version_id, name=config_param_name ) else: return DBDatastoreConfigurationParameters.find_by( datastore_version_id=datastore_version_id, name=config_param_name, deleted=False ) except exception.NotFound: raise exception.NotFound(uuid=config_param_name) def create_or_update_datastore_configuration_parameter(name, datastore_version_id, restart_required, data_type, max_size, min_size): get_db_api().configure_db(CONF) datastore_version = dstore_models.DatastoreVersion.load_by_uuid( datastore_version_id) try: config = DatastoreConfigurationParameters.load_parameter_by_name( datastore_version_id, name, show_deleted=True) config.restart_required = restart_required config.max_size = max_size config.min_size = min_size config.data_type = data_type get_db_api().save(config) except exception.NotFound: config = DBDatastoreConfigurationParameters( id=utils.generate_uuid(), name=name, datastore_version_id=datastore_version.id, restart_required=restart_required, data_type=data_type, max_size=max_size, min_size=min_size, deleted=False, ) get_db_api().save(config) def load_datastore_configuration_parameters(datastore, datastore_version, config_file): get_db_api().configure_db(CONF) (ds, ds_v) = dstore_models.get_datastore_version( type=datastore, version=datastore_version, return_inactive=True) with open(config_file) as f: config = json.load(f) for param in config['configuration-parameters']: create_or_update_datastore_configuration_parameter( param['name'], ds_v.id, param['restart_required'], param['type'], param.get('max'), param.get('min'), ) def persisted_models(): return { 'configurations': DBConfiguration, 'configuration_parameters': DBConfigurationParameter, 'datastore_configuration_parameters': DBDatastoreConfigurationParameters, # noqa } trove-5.0.0/trove/tests/0000775000567000056710000000000012701410521016306 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/root_logger.py0000664000567000056710000000477212701410316021216 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import traceback class DefaultRootHandler(logging.StreamHandler): """A singleton StreamHandler""" __handler = logging.StreamHandler() __singleton = None __info = None __enable_backtrace = False @classmethod def activate(cls, enable_backtrace=False): # leverage the singleton __handler which has an # acquire() method to create a critical section. cls.__handler.acquire() if cls.__singleton is None: cls.__singleton = DefaultRootHandler() cls.__enable_backtrace = enable_backtrace cls.__handler.release() return cls.__singleton @classmethod def set_info(cls, info=None): cls.__info = info def __init__(self): if DefaultRootHandler.__singleton is not None: raise Exception( "Do not directly instantiate DefaultRootHandler(). " "Only use the activate() class method.") super(DefaultRootHandler, self).__init__() def emit(self, record): if DefaultRootHandler.__info: msg = ("*************************\n" + "Unhandled message logged from " + DefaultRootHandler.__info + ", " + record.name + "\n") if DefaultRootHandler.__enable_backtrace: msg += ''.join(traceback.format_stack()) + "\n" msg += "*************************\n" self.stream.write(msg) self.flush() class DefaultRootLogger(object): """A root logger that uses the singleton handler""" def __init__(self, enable_backtrace=False): super(DefaultRootLogger, self).__init__() handler = DefaultRootHandler.activate(enable_backtrace=False) handler.acquire() if handler not in logging.getLogger('').handlers: logging.getLogger('').addHandler(handler) handler.release() trove-5.0.0/trove/tests/util/0000775000567000056710000000000012701410521017263 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/util/server_connection.py0000664000567000056710000000444612701410316023374 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from trove import tests from trove.tests import util from trove.tests.util.users import Requirements def create_server_connection(instance_id): if util.test_config.use_local_ovz: return OpenVZServerConnection(instance_id) return ServerSSHConnection(instance_id) class ServerSSHConnection(object): def __init__(self, instance_id): self.instance_id = instance_id req_admin = Requirements(is_admin=True) self.user = util.test_config.users.find_user(req_admin) self.dbaas_admin = util.create_dbaas_client(self.user) self.instance = self.dbaas_admin.management.show(self.instance_id) self.ip_address = self.instance.ip[0] TROVE_TEST_SSH_USER = os.environ.get('TROVE_TEST_SSH_USER') if TROVE_TEST_SSH_USER and '@' not in self.ip_address: self.ip_address = TROVE_TEST_SSH_USER + '@' + self.ip_address def execute(self, cmd): exe_cmd = "%s %s %s" % (tests.SSH_CMD, self.ip_address, cmd) print("RUNNING COMMAND: %s" % exe_cmd) return util.process(exe_cmd) class OpenVZServerConnection(object): def __init__(self, instance_id): self.instance_id = instance_id req_admin = Requirements(is_admin=True) self.user = util.test_config.users.find_user(req_admin) self.dbaas_admin = util.create_dbaas_client(self.user) self.instance = self.dbaas_admin.management.show(self.instance_id) self.instance_local_id = self.instance.server["local_id"] def execute(self, cmd): exe_cmd = "sudo vzctl exec %s %s" % (self.instance_local_id, cmd) print("RUNNING COMMAND: %s" % exe_cmd) return util.process(exe_cmd) trove-5.0.0/trove/tests/util/__init__.py0000664000567000056710000002362112701410316021402 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`tests` -- Utility methods for tests. =================================== .. automodule:: utils :platform: Unix :synopsis: Tests for Nova. """ import subprocess from urllib import unquote try: EVENT_AVAILABLE = True except ImportError: EVENT_AVAILABLE = False from proboscis.asserts import assert_true from proboscis.asserts import Check from proboscis.asserts import fail from proboscis import SkipTest from sqlalchemy import create_engine from troveclient.compat import Dbaas from troveclient.compat import exceptions from trove.common import cfg from trove.common.utils import import_class from trove.common.utils import import_object from trove.tests.config import CONFIG as test_config from trove.tests.util.client import TestClient as TestClient from trove.tests.util import test_config as CONFIG from trove.tests.util.users import Requirements WHITE_BOX = test_config.white_box CONF = cfg.CONF def assert_http_code(expected_http_code, func, *args, **kwargs): try: rtn_value = func(*args, **kwargs) assert_equal( expected_http_code, 200, "Expected the function to return http code %s but instead got " "no error (code 200?)." % expected_http_code) return rtn_value except exceptions.ClientException as ce: assert_equal( expected_http_code, ce.code, "Expected the function to return http code %s but instead got " "code %s." % (expected_http_code, ce.code)) def create_client(*args, **kwargs): """ Using the User Requirements as arguments, finds a user and grabs a new DBAAS client. """ reqs = Requirements(*args, **kwargs) user = test_config.users.find_user(reqs) return create_dbaas_client(user) def create_dbaas_client(user): """Creates a rich client for the Trove API using the test config.""" auth_strategy = None kwargs = { 'service_type': 'database', 'insecure': test_config.values['trove_client_insecure'], } def set_optional(kwargs_name, test_conf_name): value = test_config.values.get(test_conf_name, None) if value is not None: kwargs[kwargs_name] = value force_url = 'override_trove_api_url' in test_config.values service_url = test_config.get('override_trove_api_url', None) if user.requirements.is_admin: service_url = test_config.get('override_admin_trove_api_url', service_url) if service_url: kwargs['service_url'] = service_url auth_strategy = None if user.requirements.is_admin: auth_strategy = test_config.get('admin_auth_strategy', test_config.auth_strategy) else: auth_strategy = test_config.auth_strategy set_optional('region_name', 'trove_client_region_name') if test_config.values.get('override_trove_api_url_append_tenant', False): kwargs['service_url'] += "/" + user.tenant if auth_strategy == 'fake': from troveclient.compat import auth class FakeAuth(auth.Authenticator): def authenticate(self): class FakeCatalog(object): def __init__(self, auth): self.auth = auth def get_public_url(self): return "%s/%s" % (test_config.dbaas_url, self.auth.tenant) def get_token(self): return self.auth.tenant return FakeCatalog(self) auth_strategy = FakeAuth if auth_strategy: kwargs['auth_strategy'] = auth_strategy if not user.requirements.is_admin: auth_url = test_config.trove_auth_url else: auth_url = test_config.values.get('trove_admin_auth_url', test_config.trove_auth_url) if test_config.values.get('trove_client_cls'): cls_name = test_config.trove_client_cls kwargs['client_cls'] = import_class(cls_name) dbaas = Dbaas(user.auth_user, user.auth_key, tenant=user.tenant, auth_url=auth_url, **kwargs) dbaas.authenticate() with Check() as check: check.is_not_none(dbaas.client.auth_token, "Auth token not set!") if not force_url and user.requirements.is_admin: expected_prefix = test_config.dbaas_url actual = dbaas.client.service_url msg = "Dbaas management url was expected to start with %s, but " \ "was %s." % (expected_prefix, actual) check.true(actual.startswith(expected_prefix), msg) return TestClient(dbaas) def create_nova_client(user, service_type=None): """Creates a rich client for the Nova API using the test config.""" if test_config.nova_client is None: raise SkipTest("No nova_client info specified in the Test Config " "so this test will be skipped.") from novaclient.client import Client if not service_type: service_type = test_config.nova_client['nova_service_type'] openstack = Client(CONF.nova_client_version, user.auth_user, user.auth_key, user.tenant, test_config.nova_client['auth_url'], service_type=service_type, no_cache=True, cacert=test_config.values.get('cacert', None)) openstack.authenticate() return TestClient(openstack) def dns_checker(mgmt_instance): """Given a MGMT instance, ensures DNS provisioning worked. Uses a helper class which, given a mgmt instance (returned by the mgmt API) can confirm that the DNS record provisioned correctly. """ if CONFIG.values.get('trove_dns_checker') is not None: checker = import_class(CONFIG.trove_dns_checker) checker()(mgmt_instance) else: raise SkipTest("Can't access DNS system to check if DNS provisioned.") def process(cmd): process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = process.communicate() return result def string_in_list(str, substr_list): """Returns True if the string appears in the list.""" return any([str.find(x) >= 0 for x in substr_list]) def unquote_user_host(user_hostname): unquoted = unquote(user_hostname) if '@' not in unquoted: return unquoted, '%' if unquoted.endswith('@'): return unquoted, '%' splitup = unquoted.split('@') host = splitup[-1] user = '@'.join(splitup[:-1]) return user, host def iso_time(time_string): """Return a iso formated datetime: 2013-04-15T19:50:23Z.""" ts = time_string.replace(' ', 'T') try: micro = ts.rindex('.') ts = ts[:micro] except ValueError: pass return '%sZ' % ts def assert_contains(exception_message, substrings): for substring in substrings: assert_true(substring in exception_message, message="'%s' not in '%s'" % (substring, exception_message)) # TODO(dukhlov): Still required by trove integration # Should be removed after trove integration fix # https://bugs.launchpad.net/trove-integration/+bug/1228306 # TODO(cp16net): DO NOT USE needs to be removed def mysql_connection(): cls = CONFIG.get('mysql_connection', "local.MySqlConnection") if cls == "local.MySqlConnection": return MySqlConnection() return import_object(cls)() class MySqlConnection(object): def assert_fails(self, ip, user_name, password): from trove.tests.util import mysql try: with mysql.create_mysql_connection(ip, user_name, password): pass fail("Should have failed to connect: mysql --host %s -u %s -p%s" % (ip, user_name, password)) except mysql.MySqlPermissionsFailure: return # Good, this is what we wanted. except mysql.MySqlConnectionFailure as mcf: fail("Expected to see permissions failure. Instead got message:" "%s" % mcf.message) def create(self, ip, user_name, password): from trove.tests.util import mysql return mysql.create_mysql_connection(ip, user_name, password) class LocalSqlClient(object): """A sqlalchemy wrapper to manage transactions.""" def __init__(self, engine, use_flush=True): self.engine = engine self.use_flush = use_flush def __enter__(self): self.conn = self.engine.connect() self.trans = self.conn.begin() return self.conn def __exit__(self, type, value, traceback): if self.trans: if type is not None: # An error occurred self.trans.rollback() else: if self.use_flush: self.conn.execute(FLUSH) self.trans.commit() self.conn.close() def execute(self, t, **kwargs): try: return self.conn.execute(t, kwargs) except Exception: self.trans.rollback() self.trans = None raise @staticmethod def init_engine(user, password, host): return create_engine("mysql://%s:%s@%s:3306" % (user, password, host), pool_recycle=1800, echo=True) trove-5.0.0/trove/tests/util/event_simulator.py0000664000567000056710000002536712701410320023067 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Simulates time itself to make the fake mode tests run even faster. Specifically, this forces all various threads of execution to run one at a time based on when they would have been scheduled using the various eventlet spawn functions. Because only one thing is running at a given time, it eliminates race conditions that would normally be present from testing multi-threaded scenarios. It also means that the simulated time.sleep does not actually have to sit around for the designated time, which greatly speeds up the time it takes to run the tests. Event Simulator Overview ======================== We use this to simulate all the threads of Trove running. i.e (api,taskmanager,probocsis tests) All the services end up sleeping and having to wait for something to happen at times. Monkey Patching Methods ----------------------- We monkey patch a few method to make this happen. A few sleep methods with a fake_sleep. * time.sleep * eventlet.sleep * greenthread.sleep A few spawn methods with a fake_spawn * eventlet.spawn_after * eventlet.spawn_n Raise an error if you try this one. * eventlet.spawn Replace the poll_until with a fake_poll_until. Coroutine Object ---------------- There is a Coroutine object here that mimics the behavior of a thread. It takes in a function with args and kwargs and executes it. If at any point that method calls time.sleep(seconds) then the event simulator will put that method on the stack of threads and run the fake_sleep method that will then iterate over all the threads in the stack updating the time they still need to sleep. Then as the threads hit the end of their sleep time period they will continue to execute. fake_threads ------------ One thing to note here is the idea of a stack of threads being kept in fake_threads list. Any new thread created is added to this stack. A fake_thread attributes: fake_thread = { 'sleep': time_from_now_in_seconds, 'greenlet': Coroutine(method_to_execute), 'name': str(func) } 'sleep' is the time it should wait to execute this method. 'greenlet' is the thread object 'name' is the unique name of the thread to track main_loop Method ---------------- The main_loop method is a loop that runs forever waiting on all the threads to complete while running pulse every 0.1 seconds. This is the key to simulated the threads quickly. We are pulsing every 0.1 seconds looking to make sure there are no threads just waiting around for no reason rather than waiting a full second to respond. pulse Method ------------ The pulse method is going through the stack(list) of threads looking for the the next thread to execute while updating the 'sleep' time and the if the 'sleep' time is <=0 then it will run this thread until it calls for another time.sleep. If the method/thread running calls time.sleep for what ever reason then the thread's 'sleep' parameter is updated to the new 'next_sleep_time'. If the method/thread running completes without calling time.sleep because it finished all work needed to be done then there the 'next_sleep_time' is set to None and the method/thread is deleted from the stack(list) of threads. """ import eventlet from eventlet.event import Event from eventlet.semaphore import Semaphore from eventlet import spawn as true_spawn class Coroutine(object): """ This class simulates a coroutine, which is ironic, as greenlet actually *is* a coroutine. But trying to use greenlet here gives nasty results since eventlet thoroughly monkey-patches things, making it difficult to run greenlet on its own. Essentially think of this as a wrapper for eventlet's threads which has a run and sleep function similar to old school coroutines, meaning it won't start until told and when asked to sleep it won't wake back up without permission. """ ALL = [] def __init__(self, func, *args, **kwargs): self.my_sem = Semaphore(0) # This is held by the thread as it runs. self.caller_sem = None self.dead = False started = Event() self.id = 5 self.ALL.append(self) def go(): self.id = eventlet.corolocal.get_ident() started.send(True) self.my_sem.acquire(blocking=True, timeout=None) try: func(*args, **kwargs) # except Exception as e: # print("Exception in coroutine! %s" % e) finally: self.dead = True self.caller_sem.release() # Relinquish control back to caller. for i in range(len(self.ALL)): if self.ALL[i].id == self.id: del self.ALL[i] break true_spawn(go) started.wait() @classmethod def get_current(cls): """Finds the coroutine associated with the thread which calls it.""" return cls.get_by_id(eventlet.corolocal.get_ident()) @classmethod def get_by_id(cls, id): for cr in cls.ALL: if cr.id == id: return cr raise RuntimeError("Coroutine with id %s not found!" % id) def sleep(self): """Puts the coroutine to sleep until run is called again. This should only be called by the thread which owns this object. """ # Only call this from it's own thread. assert eventlet.corolocal.get_ident() == self.id self.caller_sem.release() # Relinquish control back to caller. self.my_sem.acquire(blocking=True, timeout=None) def run(self): """Starts up the thread. Should be called from a different thread.""" # Don't call this from the thread which it represents. assert eventlet.corolocal.get_ident() != self.id self.caller_sem = Semaphore(0) self.my_sem.release() self.caller_sem.acquire() # Wait for it to finish. # Main global thread to run. main_greenlet = None # Stack of threads currently running or sleeping fake_threads = [] # Allow a sleep method to be called at least this number of times before # raising an error that there are not other active threads waiting to run. allowable_empty_sleeps = 1 sleep_allowance = allowable_empty_sleeps def other_threads_are_active(): """Returns True if concurrent activity is being simulated. Specifically, this means there is a fake thread in action other than the "pulse" thread and the main test thread. """ return len(fake_threads) >= 2 def fake_sleep(time_to_sleep): """Simulates sleep. Puts the coroutine which calls it to sleep. If a coroutine object is not associated with the caller this will fail. """ if time_to_sleep: global sleep_allowance sleep_allowance -= 1 if not other_threads_are_active(): if sleep_allowance < -1: raise RuntimeError("Sleeping for no reason.") else: return # Forgive the thread for calling this for one time. sleep_allowance = allowable_empty_sleeps cr = Coroutine.get_current() for ft in fake_threads: if ft['greenlet'].id == cr.id: ft['next_sleep_time'] = time_to_sleep cr.sleep() def fake_poll_until(retriever, condition=lambda value: value, sleep_time=1, time_out=None): """Fakes out poll until.""" from trove.common import exception slept_time = 0 while True: resource = retriever() if condition(resource): return resource fake_sleep(sleep_time) slept_time += sleep_time if time_out and slept_time >= time_out: raise exception.PollTimeOut() def run_main(func): """Runs the given function as the initial thread of the event simulator.""" global main_greenlet main_greenlet = Coroutine(main_loop) fake_spawn(0, func) main_greenlet.run() def main_loop(): """The coroutine responsible for calling each "fake thread." The Coroutine which calls this is the only one that won't end up being associated with the fake_threads list. The reason is this loop needs to wait on whatever thread is running, meaning it has to be a Coroutine as well. """ while len(fake_threads) > 0: pulse(0.1) def fake_spawn_n(func, *args, **kw): fake_spawn(0, func, *args, **kw) def fake_spawn(time_from_now_in_seconds, func, *args, **kw): """Fakes eventlet's spawn function by adding a fake thread.""" def thread_start(): # fake_sleep(time_from_now_in_seconds) return func(*args, **kw) cr = Coroutine(thread_start) fake_threads.append({'sleep': time_from_now_in_seconds, 'greenlet': cr, 'name': str(func)}) def pulse(seconds): """ Runs the event simulator for the amount of simulated time denoted by "seconds". """ index = 0 while index < len(fake_threads): t = fake_threads[index] t['sleep'] -= seconds if t['sleep'] <= 0: t['sleep'] = 0 t['next_sleep_time'] = None t['greenlet'].run() sleep_time = t['next_sleep_time'] if sleep_time is None or isinstance(sleep_time, tuple): del fake_threads[index] index -= 1 else: t['sleep'] = sleep_time index += 1 def wait_until_all_activity_stops(): """In fake mode, wait for all simulated events to chill out. This can be useful in situations where you need simulated activity (such as calls running in TaskManager) to "bleed out" and finish before running another test. """ if main_greenlet is None: return while other_threads_are_active(): fake_sleep(1) def monkey_patch(): """ Changes global functions such as time.sleep, eventlet.spawn* and others to their event_simulator equivalents. """ import time time.sleep = fake_sleep import eventlet from eventlet import greenthread eventlet.sleep = fake_sleep greenthread.sleep = fake_sleep eventlet.spawn_after = fake_spawn def raise_error(): raise RuntimeError("Illegal operation!") eventlet.spawn_n = fake_spawn_n eventlet.spawn = raise_error from trove.common import utils utils.poll_until = fake_poll_until trove-5.0.0/trove/tests/util/usage.py0000664000567000056710000000565212701410316020753 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict from oslo_log import log as logging import proboscis.asserts as asserts from proboscis.dependencies import SkipTest from trove.common import utils from trove.tests.config import CONFIG LOG = logging.getLogger(__name__) MESSAGE_QUEUE = defaultdict(list) def create_usage_verifier(): return utils.import_object(CONFIG.usage_endpoint) class UsageVerifier(object): def clear_events(self): """Hook that is called to allow endpoints to clean up.""" pass def check_message(self, resource_id, event_type, **attrs): messages = utils.poll_until(lambda: self.get_messages(resource_id), lambda x: len(x) > 0, time_out=30) found = None for message in messages: if message['event_type'] == event_type: found = message asserts.assert_is_not_none(found, "No message type %s for resource %s" % (event_type, resource_id)) with asserts.Check() as check: for key, value in attrs.iteritems(): check.equal(found[key], value) def get_messages(self, resource_id, expected_messages=None): global MESSAGE_QUEUE msgs = MESSAGE_QUEUE.get(resource_id, []) if expected_messages is not None: asserts.assert_equal(len(msgs), expected_messages) return msgs class FakeVerifier(object): """This is the default handler in fake mode, it is basically a no-op.""" def clear_events(self): pass def check_message(self, *args, **kwargs): raise SkipTest("Notifications not available") def get_messages(self, *args, **kwargs): pass def notify(event_type, payload): """Simple test notify function which saves the messages to global list.""" payload['event_type'] = event_type if 'instance_id' in payload and 'server_type' not in payload: LOG.debug('Received Usage Notification: %s' % event_type) resource_id = payload['instance_id'] global MESSAGE_QUEUE MESSAGE_QUEUE[resource_id].append(payload) LOG.debug('Message Queue for %(id)s now has %(msg_count)d messages' % {'id': resource_id, 'msg_count': len(MESSAGE_QUEUE[resource_id])}) trove-5.0.0/trove/tests/util/users.py0000664000567000056710000001233512701410316021004 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Information on users / identities we can hit the services on behalf of. This code allows tests to grab from a set of users based on the features they possess instead of specifying exact identities in the test code. """ class Requirements(object): """Defines requirements a test has of a user.""" def __init__(self, is_admin=None, services=None): self.is_admin = is_admin self.services = services or ["trove"] # Make sure they're all the same kind of string. self.services = [str(service) for service in self.services] def satisfies(self, reqs): """True if these requirements conform to the given requirements.""" if reqs.is_admin is not None: # Only check if it was specified. if reqs.is_admin != self.is_admin: return False for service in reqs.services: if service not in self.services: return False return True def __str__(self): return "is_admin=%s, services=%s" % (self.is_admin, self.services) class ServiceUser(object): """Represents a user who uses a service. Importantly, this represents general information, such that a test can be written to state the general information about a user it needs (for example, if the user is an admin or not) rather than explicitly list users. """ def __init__(self, auth_user=None, auth_key=None, services=None, tenant=None, tenant_id=None, requirements=None): """Creates info on a user.""" self.auth_user = auth_user self.auth_key = auth_key self.tenant = tenant self.tenant_id = tenant_id self.requirements = requirements self.test_count = 0 if self.requirements.is_admin is None: raise ValueError("'is_admin' must be specified for a user.") def __str__(self): return ("{ user_name=%s, tenant=%s, tenant_id=%s, reqs=%s, tests=%d }" % (self.auth_user, self.tenant, self.tenant_id, self.requirements, self.test_count)) class Users(object): """Collection of users with methods to find them via requirements.""" def __init__(self, user_list): self.users = [] for user_dict in user_list: reqs = Requirements(**user_dict["requirements"]) user = ServiceUser(auth_user=user_dict["auth_user"], auth_key=user_dict["auth_key"], tenant=user_dict["tenant"], tenant_id=user_dict.get("tenant_id", None), requirements=reqs) self.users.append(user) def find_all_users_who_satisfy(self, requirements, black_list=None): """Returns a list of all users who satisfy the given requirements.""" black_list = black_list or [] print("Searching for a user who meets requirements %s in our list..." % requirements) print("Users:") for user in self.users: print("\t" + str(user)) print("Black list") for item in black_list: print("\t" + str(item)) black_list = black_list or [] return (user for user in self.users if user.auth_user not in black_list and user.requirements.satisfies(requirements)) def find_user(self, requirements, black_list=None): """Finds a user who meets the requirements and has been used least.""" users = self.find_all_users_who_satisfy(requirements, black_list) try: user = min(users, key=lambda user: user.test_count) except ValueError: # Raised when "users" is empty. raise RuntimeError("The test configuration data lacks a user " "who meets these requirements: %s" % requirements) user.test_count += 1 return user def _find_user_by_condition(self, condition): users = (user for user in self.users if condition(user)) try: user = min(users, key=lambda user: user.test_count) except ValueError: raise RuntimeError('Did not find a user with name "%s".' % name) user.test_count += 1 return user def find_user_by_name(self, name): """Finds a user who meets the requirements and has been used least.""" condition = lambda user: user.auth_user == name return self._find_user_by_condition(condition) def find_user_by_tenant_id(self, tenant_id): condition = lambda user: user.tenant_id == tenant_id return self._find_user_by_condition(condition) trove-5.0.0/trove/tests/util/mysql.py0000664000567000056710000001411512701410316021006 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import re from oslo_db.sqlalchemy import session import pexpect from sqlalchemy.exc import OperationalError try: from sqlalchemy.exc import ResourceClosedError except ImportError: ResourceClosedError = Exception from trove import tests from trove.tests.config import CONFIG def create_mysql_connection(host, user, password): connection = CONFIG.mysql_connection_method if connection['type'] == "direct": return SqlAlchemyConnection(host, user, password) elif connection['type'] == "tunnel": if 'ssh' not in connection: raise RuntimeError("If connection type is 'tunnel' then a " "property 'ssh' is expected.") return PexpectMySqlConnection(connection['ssh'], host, user, password) else: raise RuntimeError("Unknown Bad test configuration for " "mysql_connection_method") class MySqlConnectionFailure(RuntimeError): def __init__(self, msg): super(MySqlConnectionFailure, self).__init__(msg) class MySqlPermissionsFailure(RuntimeError): def __init__(self, msg): super(MySqlPermissionsFailure, self).__init__(msg) class SqlAlchemyConnection(object): def __init__(self, host, user, password): self.host = host self.user = user self.password = password try: self.engine = self._init_engine(user, password, host) except OperationalError as oe: if self._exception_is_permissions_issue(str(oe)): raise MySqlPermissionsFailure(oe) else: raise MySqlConnectionFailure(oe) @staticmethod def _exception_is_permissions_issue(msg): """Assert message cited a permissions issue and not something else.""" pos_error = re.compile(".*Host '[\w\.]*' is not allowed to connect to " "this MySQL server.*") pos_error1 = re.compile(".*Access denied for user " "'[\w\*\!\@\#\^\&]*'@'[\w\.]*'.*") if (pos_error.match(msg) or pos_error1.match(msg)): return True def __enter__(self): try: self.conn = self.engine.connect() except OperationalError as oe: if self._exception_is_permissions_issue(str(oe)): raise MySqlPermissionsFailure(oe) else: raise MySqlConnectionFailure(oe) self.trans = self.conn.begin() return self def execute(self, cmd): """Execute some code.""" cmd = cmd.replace("%", "%%") try: return self.conn.execute(cmd).fetchall() except Exception: self.trans.rollback() self.trans = None try: raise except ResourceClosedError: return [] def __exit__(self, type, value, traceback): if self.trans: if type is not None: # An error occurred self.trans.rollback() else: self.trans.commit() self.conn.close() @staticmethod def _init_engine(user, password, host): return session.EngineFacade( "mysql://%s:%s@%s:3306" % (user, password, host), pool_recycle=1800, echo=True ).get_engine() class PexpectMySqlConnection(object): TIME_OUT = 30 def __init__(self, ssh_args, host, user, password): self.host = host self.user = user self.password = password cmd = '%s %s' % (tests.SSH_CMD, ssh_args) self.proc = pexpect.spawn(cmd) print(cmd) self.proc.expect(":~\$", timeout=self.TIME_OUT) cmd2 = "mysql --host '%s' -u '%s' '-p%s'\n" % \ (self.host, self.user, self.password) print(cmd2) self.proc.send(cmd2) result = self.proc.expect([ 'mysql>', 'Access denied', "Can't connect to MySQL server"], timeout=self.TIME_OUT) if result == 1: raise MySqlPermissionsFailure(self.proc.before) elif result == 2: raise MySqlConnectionFailure(self.proc.before) def __enter__(self): return self def __exit__(self, type, value, traceback): self.proc.close() def execute(self, cmd): self.proc.send(cmd + "\G\n") outcome = self.proc.expect(['Empty set', 'mysql>'], timeout=self.TIME_OUT) if outcome == 0: return [] else: # This next line might be invaluable for long test runs. print("Interpreting output: %s" % self.proc.before) lines = self.proc.before.split("\r\n") result = [] row = None for line in lines: plural_s = "s" if len(result) != 0 else "" end_line = "%d row%s in set" % ((len(result) + 1), plural_s) if len(result) == 0: end_line = "1 row in set" if (line.startswith("***************************") or line.startswith(end_line)): if row is not None: result.append(row) row = {} elif row is not None: colon = line.find(": ") field = line[:colon] value = line[colon + 2:] row[field] = value return result trove-5.0.0/trove/tests/util/check.py0000664000567000056710000001577212701410316020730 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Like asserts, but does not raise an exception until the end of a block.""" import traceback from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_true from proboscis.asserts import ASSERTION_ERROR from proboscis.asserts import Check def get_stack_trace_of_caller(level_up): """Gets the stack trace at the point of the caller.""" level_up += 1 st = traceback.extract_stack() caller_index = len(st) - level_up if caller_index < 0: caller_index = 0 new_st = st[0:caller_index] return new_st def raise_blame_caller(level_up, ex): """Raises an exception, changing the stack trace to point to the caller.""" new_st = get_stack_trace_of_caller(level_up + 2) raise type(ex), ex, new_st class Checker(object): def __init__(self): self.messages = [] self.odd = True self.protected = False def _add_exception(self, _type, value, tb): """Takes an exception, and adds it as a string.""" if self.odd: prefix = "* " else: prefix = "- " start = "Check failure! Traceback:" middle = prefix.join(traceback.format_list(tb)) end = '\n'.join(traceback.format_exception_only(_type, value)) msg = '\n'.join([start, middle, end]) self.messages.append(msg) self.odd = not self.odd def equal(self, *args, **kwargs): self._run_assertion(assert_equal, *args, **kwargs) def false(self, *args, **kwargs): self._run_assertion(assert_false, *args, **kwargs) def not_equal(self, *args, **kwargs): _run_assertion(assert_not_equal, *args, **kwargs) def _run_assertion(self, assert_func, *args, **kwargs): """ Runs an assertion method, but catches any failure and adds it as a string to the messages list. """ if self.protected: try: assert_func(*args, **kwargs) except ASSERTION_ERROR as ae: st = get_stack_trace_of_caller(2) self._add_exception(ASSERTION_ERROR, ae, st) else: assert_func(*args, **kwargs) def __enter__(self): self.protected = True return self def __exit__(self, _type, value, tb): self.protected = False if _type is not None: # An error occurred other than an assertion failure. # Return False to allow the Exception to be raised return False if len(self.messages) != 0: final_message = '\n'.join(self.messages) raise ASSERTION_ERROR(final_message) def true(self, *args, **kwargs): self._run_assertion(assert_true, *args, **kwargs) class AttrCheck(Check): """Class for attr checks, links and other common items.""" def __init__(self): super(AttrCheck, self).__init__() def fail(self, msg): self.true(False, msg) def contains_allowed_attrs(self, list, allowed_attrs, msg=None): # Check these attrs only are returned in create response for attr in list: if attr not in allowed_attrs: self.fail("%s should not contain '%s'" % (msg, attr)) def links(self, links): allowed_attrs = ['href', 'rel'] for link in links: self.contains_allowed_attrs(link, allowed_attrs, msg="Links") class CollectionCheck(Check): """Checks for elements in a dictionary.""" def __init__(self, name, collection): self.name = name self.collection = collection super(CollectionCheck, self).__init__() def element_equals(self, key, expected_value): if key not in self.collection: message = 'Element "%s.%s" does not exist.' % (self.name, key) self.fail(message) else: value = self.collection[key] self.equal(value, expected_value) def has_element(self, key, element_type): if key not in self.collection: message = 'Element "%s.%s" does not exist.' % (self.name, key) self.fail(message) else: value = self.collection[key] match = False if not isinstance(element_type, tuple): type_list = [element_type] else: type_list = element_type for possible_type in type_list: if possible_type is None: if value is None: match = True else: if isinstance(value, possible_type): match = True if not match: self.fail('Element "%s.%s" does not match any of these ' 'expected types: %s' % (self.name, key, type_list)) class TypeCheck(Check): """Checks for attributes in an object.""" def __init__(self, name, instance): self.name = name self.instance = instance super(TypeCheck, self).__init__() def _check_type(value, attribute_type): if not isinstance(value, attribute_type): self.fail("%s attribute %s is of type %s (expected %s)." % (self.name, attribute_name, type(value), attribute_type)) def has_field(self, attribute_name, attribute_type, additional_checks=None): if not hasattr(self.instance, attribute_name): self.fail("%s missing attribute %s." % (self.name, attribute_name)) else: value = getattr(self.instance, attribute_name) match = False if isinstance(attribute_type, tuple): type_list = attribute_type else: type_list = [attribute_type] for possible_type in type_list: if possible_type is None: if value is None: match = True else: if isinstance(value, possible_type): match = True if not match: self.fail("%s attribute %s is of type %s (expected one of " "the following: %s)." % (self.name, attribute_name, type(value), attribute_type)) if match and additional_checks: additional_checks(value) trove-5.0.0/trove/tests/util/client.py0000664000567000056710000000724012701410316021120 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`tests` -- Utility methods for tests. =================================== .. automodule:: utils :platform: Unix :synopsis: Tests for Nova. """ from proboscis import asserts from trove.tests.config import CONFIG def add_report_event_to(home, name): """Takes a module, class, etc, and an attribute name to decorate.""" func = getattr(home, name) def __cb(*args, **kwargs): # While %s turns a var into a string but in some rare cases explicit # str() is less likely to raise an exception. arg_strs = [repr(arg) for arg in args] arg_strs += ['%s=%s' % (repr(key), repr(value)) for (key, value) in kwargs.items()] CONFIG.get_reporter().log("[RDC] Calling : %s(%s)..." % (name, ','.join(arg_strs))) value = func(*args, **kwargs) CONFIG.get_reporter.log("[RDC] returned %s." % str(value)) return value setattr(home, name, __cb) class TestClient(object): """Decorates the rich clients with some extra methods. These methods are filled with test asserts, meaning if you use this you get the tests for free. """ def __init__(self, real_client): """Accepts a normal client.""" self.real_client = real_client def assert_http_code(self, expected_http_code): resp, body = self.real_client.client.last_response asserts.assert_equal(resp.status, expected_http_code) @property def last_http_code(self): resp, body = self.real_client.client.last_response return resp.status @staticmethod def find_flavor_self_href(flavor): self_links = [link for link in flavor.links if link['rel'] == 'self'] asserts.assert_true(len(self_links) > 0, "Flavor had no self href!") flavor_href = self_links[0]['href'] asserts.assert_false(flavor_href is None, "Flavor link self href missing.") return flavor_href def find_flavors_by(self, condition, flavor_manager=None): flavor_manager = flavor_manager or self.flavors flavors = flavor_manager.list() return [flavor for flavor in flavors if condition(flavor)] def find_flavors_by_name(self, name, flavor_manager=None): return self.find_flavors_by(lambda flavor: flavor.name == name, flavor_manager) def find_flavors_by_ram(self, ram, flavor_manager=None): return self.find_flavors_by(lambda flavor: flavor.ram == ram, flavor_manager) def find_flavor_and_self_href(self, flavor_id, flavor_manager=None): """Given an ID, returns flavor and its self href.""" flavor_manager = flavor_manager or self.flavors asserts.assert_false(flavor_id is None) flavor = flavor_manager.get(flavor_id) asserts.assert_false(flavor is None) flavor_href = self.find_flavor_self_href(flavor) return flavor, flavor_href def __getattr__(self, item): return getattr(self.real_client, item) trove-5.0.0/trove/tests/__init__.py0000664000567000056710000000061412701410316020422 0ustar jenkinsjenkins00000000000000import os DBAAS_API = "dbaas.api" PRE_INSTANCES = "dbaas.api.pre_instances" INSTANCES = "dbaas.api.instances" POST_INSTANCES = "dbaas.api.post_instances" SSH_CMD = ('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ' + ('-o LogLevel=quiet -i ' + os.environ.get('TROVE_TEST_SSH_KEY_FILE') if 'TROVE_TEST_SSH_KEY_FILE' in os.environ else "")) trove-5.0.0/trove/tests/fakes/0000775000567000056710000000000012701410521017377 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/fakes/limits.py0000664000567000056710000000143712701410316021261 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common import limits ENABLED = False class FakeRateLimitingMiddleware(limits.RateLimitingMiddleware): def enabled(self): return ENABLED trove-5.0.0/trove/tests/fakes/guestagent.py0000664000567000056710000003262512701410316022131 0ustar jenkinsjenkins00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import time import eventlet from oslo_log import log as logging from trove.common import exception as rd_exception from trove.common import instance as rd_instance from trove.tests.util import unquote_user_host DB = {} LOG = logging.getLogger(__name__) BACKUP_SIZE = 0.14 class FakeGuest(object): def __init__(self, id): self.id = id self.users = {} self.dbs = {} self.root_was_enabled = False self.version = 1 self.grants = {} self.overrides = {} # Our default admin user. self._create_user({ "_name": "os_admin", "_host": "%", "_password": "12345", "_databases": [], }) def get_hwinfo(self): return {'mem_total': 524288, 'num_cpus': 1} def get_diagnostics(self): return { 'version': str(self.version), 'fd_size': 64, 'vm_size': 29096, 'vm_peak': 29160, 'vm_rss': 2872, 'vm_hwm': 2872, 'threads': 2 } def update_guest(self): LOG.debug("Updating guest %s" % self.id) self.version += 1 def _check_username(self, username): unsupported_chars = re.compile("^\s|\s$|'|\"|;|`|,|/|\\\\") if (not username or unsupported_chars.search(username) or ("%r" % username).find("\\") != -1): raise ValueError("'%s' is not a valid user name." % username) if len(username) > 16: raise ValueError("User name '%s' is too long. Max length = 16" % username) def change_passwords(self, users): for user in users: # Use the model to check validity. username = user['name'] self._check_username(username) hostname = user['host'] password = user['password'] if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s@%s cannot be found on the instance." % (username, hostname)) self.users[(username, hostname)]['password'] = password def update_attributes(self, username, hostname, user_attrs): LOG.debug("Updating attributes") self._check_username(username) if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s@%s cannot be found on the instance." % (username, hostname)) new_name = user_attrs.get('name') new_host = user_attrs.get('host') new_password = user_attrs.get('password') old_name = username old_host = hostname name = new_name or old_name host = new_host or old_host if new_name or new_host: old_grants = self.grants.get((old_name, old_host), set()) self._create_user({ "_name": name, "_host": host, "_password": self.users[(old_name, host)]['_password'], "_databases": [], }) self.grants[(name, host)] = old_grants del self.users[(old_name, old_host)] if new_password: self.users[(name, host)]['_password'] = new_password def create_database(self, databases): for db in databases: self.dbs[db['_name']] = db def create_user(self, users): for user in users: self._create_user(user) def _create_user(self, user): username = user['_name'] self._check_username(username) hostname = user['_host'] if hostname is None: hostname = '%' self.users[(username, hostname)] = user print("CREATING %s @ %s" % (username, hostname)) databases = [db['_name'] for db in user['_databases']] self.grant_access(username, hostname, databases) return user def delete_database(self, database): if database['_name'] in self.dbs: del self.dbs[database['_name']] def enable_root(self): self.root_was_enabled = True return self._create_user({ "_name": "root", "_host": "%", "_password": "12345", "_databases": [], }) def enable_root_with_password(self, root_password=None): self.root_was_enabled = True return self._create_user({ "_name": "root", "_host": "%", "_password": "12345", "_databases": [], }) def disable_root(self): self.delete_user({ "_name": "root", "_host": "%"}) def delete_user(self, user): username = user['_name'] self._check_username(username) hostname = user['_host'] self.grants[(username, hostname)] = set() if (username, hostname) in self.users: del self.users[(username, hostname)] def is_root_enabled(self): return self.root_was_enabled def _list_resource(self, resource, limit=None, marker=None, include_marker=False): names = sorted([name for name in resource]) if marker in names: if not include_marker: # Cut off everything left of and including the marker item. names = names[names.index(marker) + 1:] else: names = names[names.index(marker):] next_marker = None if limit: if len(names) > limit: next_marker = names[limit - 1] names = names[:limit] return [resource[name] for name in names], next_marker def list_databases(self, limit=None, marker=None, include_marker=False): return self._list_resource(self.dbs, limit, marker, include_marker) def list_users(self, limit=None, marker=None, include_marker=False): # The markers for users are a composite of the username and hostname. names = sorted(["%s@%s" % (name, host) for (name, host) in self.users]) if marker in names: if not include_marker: # Cut off everything left of and including the marker item. names = names[names.index(marker) + 1:] else: names = names[names.index(marker):] next_marker = None if limit: if len(names) > limit: next_marker = names[limit - 1] names = names[:limit] return ([self.users[unquote_user_host(userhost)] for userhost in names], next_marker) def get_user(self, username, hostname): self._check_username(username) for (u, h) in self.users: print("%r @ %r" % (u, h)) if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s@%s cannot be found on the instance." % (username, hostname)) return self.users.get((username, hostname), None) def prepare(self, memory_mb, packages, databases, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, modules=None): from trove.guestagent.models import AgentHeartBeat from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus LOG.debug("users... %s" % users) LOG.debug("databases... %s" % databases) instance_name = DBInstance.find_by(id=self.id).name self.create_user(users) self.create_database(databases) self.overrides = overrides or {} def update_db(): status = InstanceServiceStatus.find_by(instance_id=self.id) if instance_name.endswith('GUEST_ERROR'): status.status = rd_instance.ServiceStatuses.FAILED else: status.status = rd_instance.ServiceStatuses.RUNNING status.save() AgentHeartBeat.create(instance_id=self.id) eventlet.spawn_after(3.0, update_db) def _set_task_status(self, new_status='RUNNING'): from trove.instance.models import InstanceServiceStatus print("Setting status to %s" % new_status) states = {'RUNNING': rd_instance.ServiceStatuses.RUNNING, 'SHUTDOWN': rd_instance.ServiceStatuses.SHUTDOWN, } status = InstanceServiceStatus.find_by(instance_id=self.id) status.status = states[new_status] status.save() def restart(self): # All this does is restart, and shut off the status updates while it # does so. So there's actually nothing to do to fake this out except # take a nap. print("Sleeping for a second.") time.sleep(1) self._set_task_status('RUNNING') def reset_configuration(self, config): # There's nothing to do here, since there is no config to update. pass def start_db_with_conf_changes(self, config_contents): time.sleep(2) self._set_task_status('RUNNING') def stop_db(self, do_not_start_on_reboot=False): self._set_task_status('SHUTDOWN') def get_volume_info(self): """Return used and total volume filesystem information in GB.""" return {'used': 0.16, 'total': 4.0} def grant_access(self, username, hostname, databases): """Add a database to a users's grant list.""" if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s cannot be found on the instance." % username) current_grants = self.grants.get((username, hostname), set()) for db in databases: current_grants.add(db) self.grants[(username, hostname)] = current_grants def revoke_access(self, username, hostname, database): """Remove a database from a users's grant list.""" if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s cannot be found on the instance." % username) if database not in self.grants.get((username, hostname), set()): raise rd_exception.DatabaseNotFound( "Database %s cannot be found on the instance." % database) current_grants = self.grants.get((username, hostname), set()) if database in current_grants: current_grants.remove(database) self.grants[(username, hostname)] = current_grants def list_access(self, username, hostname): if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s cannot be found on the instance." % username) current_grants = self.grants.get((username, hostname), set()) dbs = [{'_name': db, '_collate': '', '_character_set': '', } for db in current_grants] return dbs def create_backup(self, backup_info): from trove.backup.models import Backup from trove.backup.state import BackupState backup = Backup.get_by_id(context=None, backup_id=backup_info['id']) def finish_create_backup(): backup.state = BackupState.COMPLETED backup.location = 'http://localhost/path/to/backup' backup.checksum = 'fake-md5-sum' backup.size = BACKUP_SIZE backup.save() eventlet.spawn_after(8, finish_create_backup) def mount_volume(self, device_path=None, mount_point=None): pass def unmount_volume(self, device_path=None, mount_point=None): pass def resize_fs(self, device_path=None, mount_point=None): pass def update_overrides(self, overrides, remove=False): self.overrides = overrides def apply_overrides(self, overrides): self.overrides = overrides def get_replication_snapshot(self, snapshot_info, replica_source_config=None): self.create_backup(snapshot_info) return { 'dataset': { 'datastore_manager': 'mysql', 'dataset_size': '0.0', 'volume_size': '10.0', 'snapshot_id': None }, 'replication_strategy': 'replication_strategy', 'master': '1', 'log_position': '100' } def attach_replication_slave(self, snapshot, slave_config): pass def backup_required_for_replication(self): return True def module_list(self, context, include_contents=False): return [] def module_apply(self, context, modules=None): return [] def module_remove(self, context, module=None): pass def get_or_create(id): if id not in DB: DB[id] = FakeGuest(id) return DB[id] def fake_create_guest_client(context, id): return get_or_create(id) trove-5.0.0/trove/tests/fakes/dns.py0000664000567000056710000000636512701410316020551 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from proboscis.asserts import assert_equal from proboscis.asserts import assert_true from proboscis.asserts import fail from trove.dns import driver LOG = logging.getLogger(__name__) ENTRIES = {} class FakeDnsDriver(driver.DnsDriver): def create_entry(self, entry, content): """Pretend to create a DNS entry somewhere. Since nothing else tests that this works, there's nothing more to do here. """ entry.content = content assert_true(entry.name not in ENTRIES) LOG.debug("Adding fake DNS entry for hostname %s." % entry.name) ENTRIES[entry.name] = entry def delete_entry(self, name, type, dns_zone=None): LOG.debug("Deleting fake DNS entry for hostname %s" % name) ENTRIES.pop(name, None) class FakeDnsInstanceEntryFactory(driver.DnsInstanceEntryFactory): def create_entry(self, instance_id): # Construct hostname using pig-latin. hostname = "%s-lay" % instance_id LOG.debug("Mapping instance_id %s to hostname %s" % (instance_id, hostname)) return driver.DnsEntry(name=hostname, content=None, type="A", ttl=42, dns_zone=None) class FakeDnsChecker(object): """Used by tests to make sure a DNS record was written in fake mode.""" def __call__(self, mgmt_instance): """ Given an instance ID and ip address, confirm that the proper DNS record was stored in Designate or some other DNS system. """ entry = FakeDnsInstanceEntryFactory().create_entry(mgmt_instance.id) # Confirm DNS entry shown to user is what we expect. assert_equal(entry.name, mgmt_instance.hostname) hostname = entry.name for i in ENTRIES: print(i) print("\t%s" % ENTRIES[i]) assert_true(hostname in ENTRIES, "Hostname %s not found in DNS entries!" % hostname) entry = ENTRIES[hostname] # See if the ip address assigned to the record is what we expect. # This isn't perfect, but for Fake Mode its good enough. If we # really want to know exactly what it should be then we should restore # the ability to return the IP from the API as well as a hostname, # since that lines up to the DnsEntry's content field. ip_addresses = mgmt_instance.server['addresses'] for network_name, ip_list in ip_addresses.items(): for ip in ip_list: if entry.content == ip['addr']: return fail("Couldn't find IP address %s among these values: %s" % (entry.content, ip_addresses)) trove-5.0.0/trove/tests/fakes/taskmanager.py0000664000567000056710000000370212701410320022245 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from collections import defaultdict import eventlet from oslo_log import log as logging from trove import rpc from trove.taskmanager.api import API from trove.taskmanager.manager import Manager import trove.tests.util.usage as usage LOG = logging.getLogger(__name__) MESSAGE_QUEUE = defaultdict(list) class FakeRpcClient(object): def call(self, context, method_name, *args, **kwargs): manager, method = self._get_tm_method(method_name) return method(manager, context, *args, **kwargs) def cast(self, context, method_name, *args, **kwargs): manager, method = self._get_tm_method(method_name) def func(): method(manager, context, *args, **kwargs) eventlet.spawn_after(0.1, func) def _get_tm_method(self, method_name): manager = Manager() method = getattr(Manager, method_name) return manager, method def prepare(self, *args, **kwargs): return self class FakeNotifier: def info(self, ctxt, event_type, payload): usage.notify(event_type, payload) def monkey_patch(): def fake_get_client(self, *args, **kwargs): return FakeRpcClient() def fake_get_notifier(service=None, host=None, publisher_id=None): return FakeNotifier() API.get_client = fake_get_client rpc.get_notifier = fake_get_notifier trove-5.0.0/trove/tests/fakes/__init__.py0000664000567000056710000000136012701410316021512 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implements a fake version of the models code so that the server can be stood up and run under test quickly. """ trove-5.0.0/trove/tests/fakes/common.py0000664000567000056710000000171612701410316021250 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code to help in faking the models.""" from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.common import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) def authorize(context): if not context.is_admin: raise nova_exceptions.Forbidden(403, "Forbidden") trove-5.0.0/trove/tests/fakes/conf.py0000664000567000056710000000137012701410316020701 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class FakeConf(object): def __init__(self, conf_dict): self._conf = conf_dict def __getattr__(self, name): return self._conf[name] trove-5.0.0/trove/tests/fakes/keystone.py0000664000567000056710000000514312701410316021617 0ustar jenkinsjenkins00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class AuthProtocol(object): def __init__(self, app, conf): self.conf = conf self.app = app def __call__(self, env, start_response): token = self._get_user_token_from_header(env) user_headers = self._get_info_from_token(token) self._add_headers(env, user_headers) return self.app(env, start_response) def _header_to_env_var(self, key): """Convert header to wsgi env variable. :param key: http header name (ex. 'X-Auth-Token') :return wsgi env variable name (ex. 'HTTP_X_AUTH_TOKEN') """ return 'HTTP_%s' % key.replace('-', '_').upper() def _add_headers(self, env, headers): """Add http headers to environment.""" for (k, v) in headers.iteritems(): env_key = self._header_to_env_var(k) env[env_key] = v def get_admin_token(self): return "ABCDEF0123456789" def _get_info_from_token(self, token): if token.startswith("admin"): role = "admin,%s" % token else: role = token return { 'X_IDENTITY_STATUS': 'Confirmed', 'X_TENANT_ID': token, 'X_TENANT_NAME': token, 'X_USER_ID': token, 'X_USER_NAME': token, 'X_ROLE': role, } def _get_header(self, env, key, default=None): # Copied from keystone. env_key = self._header_to_env_var(key) return env.get(env_key, default) def _get_user_token_from_header(self, env): token = self._get_header(env, 'X-Auth-Token', self._get_header(env, 'X-Storage-Token')) if token: return token else: raise RuntimeError('Unable to find token in headers') def filter_factory(global_conf, **local_conf): """Fakes a keystone filter.""" conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return AuthProtocol(app, conf) return auth_filter trove-5.0.0/trove/tests/fakes/nova.py0000664000567000056710000006577012701410316020735 0ustar jenkinsjenkins00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.common.exception import PollTimeOut from trove.common import instance as rd_instance from trove.tests.fakes.common import authorize import collections import eventlet import uuid LOG = logging.getLogger(__name__) FAKE_HOSTS = ["fake_host_1", "fake_host_2"] class FakeFlavor(object): def __init__(self, id, disk, name, ram, ephemeral=0, vcpus=10): self.id = id self.disk = disk self.name = name self.ram = ram self.vcpus = vcpus self.ephemeral = ephemeral @property def links(self): url = ("http://localhost:8774/v2/5064d71eb09c47e1956cf579822bae9a/" "flavors/%s") % self.id return [{"href": url, "rel": link_type} for link_type in ['self', 'bookmark']] @property def href_suffix(self): return "flavors/%s" % self.id class FakeFlavors(object): def __init__(self): self.db = {} self._add(1, 0, "m1.tiny", 512) self._add(2, 20, "m1.small", 2048) self._add(3, 40, "m1.medium", 4096) self._add(4, 80, "m1.large", 8192) self._add(5, 160, "m1.xlarge", 16384) self._add(6, 0, "m1.nano", 64) self._add(7, 0, "m1.micro", 128) self._add(8, 2, "m1.rd-smaller", 768) self._add(9, 10, "tinier", 506) self._add(10, 2, "m1.rd-tiny", 512) self._add(11, 0, "eph.rd-tiny", 512, 1) self._add(12, 20, "eph.rd-smaller", 768, 2) self._add("custom", 25, "custom.small", 512, 1) # self._add(13, 20, "m1.heat", 512) def _add(self, *args, **kwargs): new_flavor = FakeFlavor(*args, **kwargs) self.db[new_flavor.id] = new_flavor def get(self, id): try: id = int(id) except ValueError: pass if id not in self.db: raise nova_exceptions.NotFound(404, "Flavor id not found %s" % id) return self.db[id] def get_by_href(self, href): for id in self.db: value = self.db[id] # Use inexact match since faking the exact endpoints would be # difficult. if href.endswith(value.href_suffix): return value raise nova_exceptions.NotFound(404, "Flavor href not found %s" % href) def list(self): return [self.get(id) for id in self.db] class FakeServer(object): next_local_id = 0 def __init__(self, parent, owner, id, name, image_id, flavor_ref, block_device_mapping, volumes): self.owner = owner # This is a context. self.id = id self.parent = parent self.name = name self.image_id = image_id self.flavor_ref = flavor_ref self.old_flavor_ref = None self._current_status = "BUILD" self.volumes = volumes # This is used by "RdServers". Its easier to compute the # fake value in this class's initializer. self._local_id = self.next_local_id self.next_local_id += 1 info_vols = [] for volume in self.volumes: info_vols.append({'id': volume.id}) volume.set_attachment(id) volume.schedule_status("in-use", 1) self.host = FAKE_HOSTS[0] self.old_host = None setattr(self, 'OS-EXT-AZ:availability_zone', 'nova') self._info = {'os:volumes': info_vols} @property def addresses(self): return {"private": [{"addr": "123.123.123.123"}]} def confirm_resize(self): if self.status != "VERIFY_RESIZE": raise RuntimeError("Not in resize confirm mode.") self._current_status = "ACTIVE" def revert_resize(self): if self.status != "VERIFY_RESIZE": raise RuntimeError("Not in resize confirm mode.") self.host = self.old_host self.old_host = None self.flavor_ref = self.old_flavor_ref self.old_flavor_ref = None self._current_status = "ACTIVE" def reboot(self): LOG.debug("Rebooting server %s" % (self.id)) def set_to_active(): self._current_status = "ACTIVE" self.parent.schedule_simulate_running_server(self.id, 1.5) self._current_status = "REBOOT" eventlet.spawn_after(1, set_to_active) def delete(self): self.schedule_status = [] # TODO(pdmars): This is less than ideal, but a quick way to force it # into the error state before scheduling the delete. if (self.name.endswith("_ERROR_ON_DELETE") and self._current_status != "SHUTDOWN"): # Fail to delete properly the first time, just set the status # to SHUTDOWN and break. It's important that we only fail to delete # once in fake mode. self._current_status = "SHUTDOWN" return self._current_status = "SHUTDOWN" self.parent.schedule_delete(self.id, 1.5) @property def flavor(self): return FLAVORS.get_by_href(self.flavor_ref).__dict__ @property def links(self): url = "https://localhost:9999/v1.0/1234/instances/%s" % self.id return [{"href": url, "rel": link_type} for link_type in ['self', 'bookmark']] def migrate(self, force_host=None): self.resize(None, force_host) def resize(self, new_flavor_id=None, force_host=None): self._current_status = "RESIZE" if self.name.endswith("_RESIZE_TIMEOUT"): raise PollTimeOut() def set_to_confirm_mode(): self._current_status = "VERIFY_RESIZE" def set_to_active(): self.parent.schedule_simulate_running_server(self.id, 1.5) eventlet.spawn_after(1, set_to_active) def change_host(): self.old_host = self.host if not force_host: self.host = [host for host in FAKE_HOSTS if host != self.host][0] else: self.host = force_host def set_flavor(): if self.name.endswith("_RESIZE_ERROR"): self._current_status = "ACTIVE" return if new_flavor_id is None: # Migrations are flavorless flavor resizes. # A resize MIGHT change the host, but a migrate # deliberately does. LOG.debug("Migrating fake instance.") eventlet.spawn_after(0.75, change_host) else: LOG.debug("Resizing fake instance.") self.old_flavor_ref = self.flavor_ref flavor = self.parent.flavors.get(new_flavor_id) self.flavor_ref = flavor.links[0]['href'] eventlet.spawn_after(1, set_to_confirm_mode) eventlet.spawn_after(0.8, set_flavor) def schedule_status(self, new_status, time_from_now): """Makes a new status take effect at the given time.""" def set_status(): self._current_status = new_status eventlet.spawn_after(time_from_now, set_status) @property def status(self): return self._current_status @property def created(self): return "2012-01-25T21:55:51Z" @property def updated(self): return "2012-01-25T21:55:51Z" @property def tenant(self): # This is on the RdServer extension type. return self.owner.tenant @property def tenant_id(self): return self.owner.tenant # The global var contains the servers dictionary in use for the life of these # tests. FAKE_SERVERS_DB = {} class FakeServers(object): def __init__(self, context, flavors): self.context = context self.db = FAKE_SERVERS_DB self.flavors = flavors def can_see(self, id): """Can this FakeServers, with its context, see some resource?""" server = self.db[id] return (self.context.is_admin or server.owner.tenant == self.context.tenant) def create(self, name, image_id, flavor_ref, files=None, userdata=None, block_device_mapping=None, volume=None, security_groups=None, availability_zone=None, nics=None, config_drive=False): id = "FAKE_%s" % uuid.uuid4() if volume: volume = self.volumes.create(volume['size'], volume['name'], volume['description']) while volume.status == "BUILD": eventlet.sleep(0.1) if volume.status != "available": LOG.info(_("volume status = %s") % volume.status) raise nova_exceptions.ClientException("Volume was bad!") mapping = "%s::%s:%s" % (volume.id, volume.size, 1) block_device_mapping = {'vdb': mapping} volumes = [volume] LOG.debug("Fake Volume Create %(volumeid)s with " "status %(volumestatus)s" % {'volumeid': volume.id, 'volumestatus': volume.status}) else: volumes = self._get_volumes_from_bdm(block_device_mapping) for volume in volumes: volume.schedule_status('in-use', 1) server = FakeServer(self, self.context, id, name, image_id, flavor_ref, block_device_mapping, volumes) self.db[id] = server if name.endswith('SERVER_ERROR'): raise nova_exceptions.ClientException("Fake server create error.") if availability_zone == 'BAD_ZONE': raise nova_exceptions.ClientException("The requested availability " "zone is not available.") if nics: if 'port-id' in nics[0] and nics[0]['port-id'] == "UNKNOWN": raise nova_exceptions.ClientException("The requested " "port-id is not " "available.") server.schedule_status("ACTIVE", 1) LOG.info("FAKE_SERVERS_DB : %s" % str(FAKE_SERVERS_DB)) return server def _get_volumes_from_bdm(self, block_device_mapping): volumes = [] if block_device_mapping is not None: # block_device_mapping is a dictionary, where the key is the # device name on the compute instance and the mapping info is a # set of fields in a string, separated by colons. # For each device, find the volume, and record the mapping info # to another fake object and attach it to the volume # so that the fake API can later retrieve this. for device in block_device_mapping: mapping = block_device_mapping[device] (id, _type, size, delete_on_terminate) = mapping.split(":") volume = self.volumes.get(id) volume.mapping = FakeBlockDeviceMappingInfo( id, device, _type, size, delete_on_terminate) volumes.append(volume) return volumes def get(self, id): if id not in self.db: LOG.error(_("Couldn't find server id %(id)s, collection=%(db)s") % {'id': id, 'db': self.db}) raise nova_exceptions.NotFound(404, "Not found") else: if self.can_see(id): return self.db[id] else: raise nova_exceptions.NotFound(404, "Bad permissions") def get_server_volumes(self, server_id): """Fake method we've added to grab servers from the volume.""" return [volume.mapping for volume in self.get(server_id).volumes if volume.mapping is not None] def list(self): return [v for (k, v) in self.db.items() if self.can_see(v.id)] def schedule_delete(self, id, time_from_now): def delete_server(): LOG.info(_("Simulated event ended, deleting server %s.") % id) del self.db[id] eventlet.spawn_after(time_from_now, delete_server) def schedule_simulate_running_server(self, id, time_from_now): from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus def set_server_running(): instance = DBInstance.find_by(compute_instance_id=id) LOG.debug("Setting server %s to running" % instance.id) status = InstanceServiceStatus.find_by(instance_id=instance.id) status.status = rd_instance.ServiceStatuses.RUNNING status.save() eventlet.spawn_after(time_from_now, set_server_running) class FakeRdServer(object): def __init__(self, server): self.server = server self.deleted = False self.deleted_at = None # Not sure how to simulate "True" for this. self.local_id = server._local_id def __getattr__(self, name): return getattr(self.server, name) class FakeRdServers(object): def __init__(self, servers): self.servers = servers def get(self, id): return FakeRdServer(self.servers.get(id)) def list(self): # Attach the extra Rd Server stuff to the normal server. return [FakeRdServer(server) for server in self.servers.list()] class FakeServerVolumes(object): def __init__(self, context): self.context = context def get_server_volumes(self, server_id): class ServerVolumes(object): def __init__(self, block_device_mapping): LOG.debug("block_device_mapping = %s" % block_device_mapping) device = block_device_mapping['vdb'] (self.volumeId, self.type, self.size, self.delete_on_terminate) = device.split(":") fake_servers = FakeServers(self.context, FLAVORS) server = fake_servers.get(server_id) return [ServerVolumes(server.block_device_mapping)] class FakeVolume(object): def __init__(self, parent, owner, id, size, name, description, volume_type): self.attachments = [] self.parent = parent self.owner = owner # This is a context. self.id = id self.size = size self.name = name self.description = description self._current_status = "BUILD" # For some reason we grab this thing from device then call it mount # point. self.device = "vdb" self.volume_type = volume_type def __repr__(self): msg = ("FakeVolume(id=%s, size=%s, name=%s, " "description=%s, _current_status=%s)") params = (self.id, self.size, self.name, self.description, self._current_status) return (msg % params) @property def availability_zone(self): return "fake-availability-zone" @property def created_at(self): return "2001-01-01-12:30:30" def get(self, key): return getattr(self, key) def schedule_status(self, new_status, time_from_now): """Makes a new status take effect at the given time.""" def set_status(): self._current_status = new_status eventlet.spawn_after(time_from_now, set_status) def set_attachment(self, server_id): """Fake method we've added to set attachments. Idempotent.""" for attachment in self.attachments: if attachment['server_id'] == server_id: return # Do nothing self.attachments.append({'server_id': server_id, 'device': self.device}) @property def status(self): return self._current_status class FakeBlockDeviceMappingInfo(object): def __init__(self, id, device, _type, size, delete_on_terminate): self.volumeId = id self.device = device self.type = _type self.size = size self.delete_on_terminate = delete_on_terminate FAKE_VOLUMES_DB = {} class FakeVolumes(object): def __init__(self, context): self.context = context self.db = FAKE_VOLUMES_DB def can_see(self, id): """Can this FakeVolumes, with its context, see some resource?""" server = self.db[id] return (self.context.is_admin or server.owner.tenant == self.context.tenant) def get(self, id): if id not in self.db: LOG.error(_("Couldn't find volume id %(id)s, collection=%(db)s") % {'id': id, 'db': self.db}) raise nova_exceptions.NotFound(404, "Not found") else: if self.can_see(id): return self.db[id] else: raise nova_exceptions.NotFound(404, "Bad permissions") def create(self, size, name=None, description=None, volume_type=None): id = "FAKE_VOL_%s" % uuid.uuid4() volume = FakeVolume(self, self.context, id, size, name, description, volume_type) self.db[id] = volume if size == 9: volume.schedule_status("error", 2) elif size == 13: raise Exception("No volume for you!") else: volume.schedule_status("available", 2) LOG.debug("Fake volume created %(volumeid)s with " "status %(volumestatus)s" % {'volumeid': volume.id, 'volumestatus': volume.status}) LOG.info("FAKE_VOLUMES_DB : %s" % FAKE_VOLUMES_DB) return volume def list(self, detailed=True): return [self.db[key] for key in self.db] def extend(self, volume_id, new_size): LOG.debug("Resize volume id (%(volumeid)s) to size (%(size)s)" % {'volumeid': volume_id, 'size': new_size}) volume = self.get(volume_id) if volume._current_status != 'available': raise Exception("Invalid volume status: " "expected 'in-use' but was '%s'" % volume._current_status) def finish_resize(): volume.size = new_size eventlet.spawn_after(1.0, finish_resize) def delete_server_volume(self, server_id, volume_id): volume = self.get(volume_id) if volume._current_status != 'in-use': raise Exception("Invalid volume status: " "expected 'in-use' but was '%s'" % volume._current_status) def finish_detach(): volume._current_status = "available" eventlet.spawn_after(1.0, finish_detach) def create_server_volume(self, server_id, volume_id, device_path): volume = self.get(volume_id) if volume._current_status != "available": raise Exception("Invalid volume status: " "expected 'available' but was '%s'" % volume._current_status) def finish_attach(): volume._current_status = "in-use" eventlet.spawn_after(1.0, finish_attach) class FakeAccount(object): def __init__(self, id, servers): self.id = id self.servers = self._servers_to_dict(servers) def _servers_to_dict(self, servers): ret = [] for server in servers: server_dict = {} server_dict['id'] = server.id server_dict['name'] = server.name server_dict['status'] = server.status server_dict['host'] = server.host ret.append(server_dict) return ret class FakeAccounts(object): def __init__(self, context, servers): self.context = context self.db = FAKE_SERVERS_DB self.servers = servers def _belongs_to_tenant(self, tenant, id): server = self.db[id] return server.tenant == tenant def get_instances(self, id): authorize(self.context) servers = [v for (k, v) in self.db.items() if self._belongs_to_tenant(id, v.id)] return FakeAccount(id, servers) FLAVORS = FakeFlavors() class FakeHost(object): def __init__(self, name, servers): self.name = name self.servers = servers self.instances = [] self.percentUsed = 0 self.totalRAM = 0 self.usedRAM = 0 @property def instanceCount(self): return len(self.instances) def recalc(self): """ This fake-mode exclusive method recalculates the fake data this object passes back. """ self.instances = [] self.percentUsed = 0 self.totalRAM = 32000 # 16384 self.usedRAM = 0 for server in self.servers.list(): print(server) if server.host != self.name: print("\t...not on this host.") continue self.instances.append({ 'uuid': server.id, 'name': server.name, 'status': server.status }) if (str(server.flavor_ref).startswith('http:') or str(server.flavor_ref).startswith('https:')): flavor = FLAVORS.get_by_href(server.flavor_ref) else: flavor = FLAVORS.get(server.flavor_ref) ram = flavor.ram self.usedRAM += ram decimal = float(self.usedRAM) / float(self.totalRAM) self.percentUsed = int(decimal * 100) class FakeHosts(object): def __init__(self, servers): # Use an ordered dict to make the results of the fake api call # return in the same order for the example generator. self.hosts = collections.OrderedDict() for host in FAKE_HOSTS: self.add_host(FakeHost(host, servers)) def add_host(self, host): self.hosts[host.name] = host return host def get(self, name): try: self.hosts[name].recalc() return self.hosts[name] except KeyError: raise nova_exceptions.NotFound(404, "Host not found %s" % name) def list(self): for name in self.hosts: self.hosts[name].recalc() return [self.hosts[name] for name in self.hosts] class FakeRdStorage(object): def __init__(self, name): self.name = name self.type = "" self.used = 0 self.capacity = {} self.provision = {} def recalc(self): self.type = "test_type" self.used = 10 self.capacity['total'] = 100 self.capacity['available'] = 90 self.provision['total'] = 50 self.provision['available'] = 40 self.provision['percent'] = 10 class FakeRdStorages(object): def __init__(self): self.storages = {} self.add_storage(FakeRdStorage("fake_storage")) def add_storage(self, storage): self.storages[storage.name] = storage return storage def list(self): for name in self.storages: self.storages[name].recalc() return [self.storages[name] for name in self.storages] class FakeSecurityGroup(object): def __init__(self, name=None, description=None, context=None): self.name = name self.description = description self.id = "FAKE_SECGRP_%s" % uuid.uuid4() self.rules = {} def get_id(self): return self.id def add_rule(self, fakeSecGroupRule): self.rules.append(fakeSecGroupRule) return self.rules def get_rules(self): result = "" for rule in self.rules: result = result + rule.data() return result def data(self): return { 'id': self.id, 'name': self.name, 'description': self.description } class FakeSecurityGroups(object): def __init__(self, context=None): self.context = context self.securityGroups = {} def create(self, name=None, description=None): secGrp = FakeSecurityGroup(name, description) self.securityGroups[secGrp.get_id()] = secGrp return secGrp def delete(self, group_id): pass def list(self): pass class FakeSecurityGroupRule(object): def __init__(self, ip_protocol=None, from_port=None, to_port=None, cidr=None, parent_group_id=None, context=None): self.group_id = parent_group_id self.protocol = ip_protocol self.from_port = from_port self.to_port = to_port self.cidr = cidr self.context = context self.id = "FAKE_SECGRP_RULE_%s" % uuid.uuid4() def get_id(self): return self.id def data(self): return { 'id': self.id, 'group_id': self.group_id, 'protocol': self.protocol, 'from_port': self.from_port, 'to_port': self.to_port, 'cidr': self.cidr } class FakeSecurityGroupRules(object): def __init__(self, context=None): self.context = context self.securityGroupRules = {} def create(self, parent_group_id, ip_protocol, from_port, to_port, cidr): secGrpRule = FakeSecurityGroupRule(ip_protocol, from_port, to_port, cidr, parent_group_id) self.securityGroupRules[secGrpRule.get_id()] = secGrpRule return secGrpRule def delete(self, id): if id in self.securityGroupRules: del self.securityGroupRules[id] class FakeClient(object): def __init__(self, context): self.context = context self.flavors = FLAVORS self.servers = FakeServers(context, self.flavors) self.volumes = FakeVolumes(context) self.servers.volumes = self.volumes self.accounts = FakeAccounts(context, self.servers) self.rdhosts = FakeHosts(self.servers) self.rdstorage = FakeRdStorages() self.rdservers = FakeRdServers(self.servers) self.security_groups = FakeSecurityGroups(context) self.security_group_rules = FakeSecurityGroupRules(context) def get_server_volumes(self, server_id): return self.servers.get_server_volumes(server_id) def rescan_server_volume(self, server, volume_id): LOG.info("FAKE rescanning volume.") CLIENT_DATA = {} def get_client_data(context): if context not in CLIENT_DATA: nova_client = FakeClient(context) volume_client = FakeClient(context) volume_client.servers = nova_client CLIENT_DATA[context] = { 'nova': nova_client, 'volume': volume_client } return CLIENT_DATA[context] def fake_create_nova_client(context): return get_client_data(context)['nova'] def fake_create_nova_volume_client(context): return get_client_data(context)['volume'] def fake_create_cinder_client(context): return get_client_data(context)['volume'] trove-5.0.0/trove/tests/fakes/swift.py0000664000567000056710000005104412701410316021113 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from hashlib import md5 from mock import MagicMock, patch import httplib import json import os import socket import swiftclient import swiftclient.client as swift_client import uuid from oslo_log import log as logging from swiftclient import client as swift from trove.common.i18n import _ # noqa LOG = logging.getLogger(__name__) class FakeSwiftClient(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Connection(self, *args, **kargs): LOG.debug("fake FakeSwiftClient Connection") return FakeSwiftConnection() class FakeSwiftConnection(object): """Logging calls instead of executing.""" MANIFEST_HEADER_KEY = 'X-Object-Manifest' url = 'http://mockswift/v1' def __init__(self, *args, **kwargs): self.manifest_prefix = None self.manifest_name = None self.container_objects = {} def get_auth(self): return ( u"http://127.0.0.1:8080/v1/AUTH_c7b038976df24d96bf1980f5da17bd89", u'MIINrwYJKoZIhvcNAQcCoIINoDCCDZwCAQExCTAHBgUrDgMCGjCCDIgGCSqGSIb3' u'DQEHAaCCDHkEggx1eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAi' u'MjAxMy0wMy0xOFQxODoxMzoyMC41OTMyNzYiLCAiZXhwaXJlcyI6ICIyMDEzLTAz' u'LTE5VDE4OjEzOjIwWiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7' u'ImVuYWJsZWQiOiB0cnVlLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJy' u'ZWRkd2FyZiIsICJpZCI6ICJjN2IwMzg5NzZkZjI0ZDk2YmYxOTgwZjVkYTE3YmQ4' u'OSJ9fSwgInNlcnZpY2VDYXRhbG9nIjogW3siZW5kcG9pbnRzIjogW3siYWRtaW5') def get_account(self): return ({'content-length': '2', 'accept-ranges': 'bytes', 'x-timestamp': '1363049003.92304', 'x-trans-id': 'tx9e5da02c49ed496395008309c8032a53', 'date': 'Tue, 10 Mar 2013 00:43:23 GMT', 'x-account-bytes-used': '0', 'x-account-container-count': '0', 'content-type': 'application/json; charset=utf-8', 'x-account-object-count': '0'}, []) def head_container(self, container): LOG.debug("fake head_container(%s)" % container) if container == 'missing_container': raise swift.ClientException('fake exception', http_status=httplib.NOT_FOUND) elif container == 'unauthorized_container': raise swift.ClientException('fake exception', http_status=httplib.UNAUTHORIZED) elif container == 'socket_error_on_head': raise socket.error(111, 'ECONNREFUSED') pass def put_container(self, container): LOG.debug("fake put_container(%s)" % container) pass def get_container(self, container, **kwargs): LOG.debug("fake get_container(%s)" % container) fake_header = None fake_body = [{'name': 'backup_001'}, {'name': 'backup_002'}, {'name': 'backup_003'}] return fake_header, fake_body def head_object(self, container, name): LOG.debug("fake put_container(%(container)s, %(name)s)" % {'container': container, 'name': name}) checksum = md5() if self.manifest_prefix and self.manifest_name == name: for object_name in sorted(self.container_objects.iterkeys()): object_checksum = md5(self.container_objects[object_name]) # The manifest file etag for a HEAD or GET is the checksum of # the concatenated checksums. checksum.update(object_checksum.hexdigest()) # this is included to test bad swift segment etags if name.startswith("bad_manifest_etag_"): return {'etag': '"this_is_an_intentional_bad_manifest_etag"'} else: if name in self.container_objects: checksum.update(self.container_objects[name]) else: return {'etag': 'fake-md5-sum'} # Currently a swift HEAD object returns etag with double quotes return {'etag': '"%s"' % checksum.hexdigest()} def get_object(self, container, name, resp_chunk_size=None): LOG.debug("fake get_object(%(container)s, %(name)s)" % {'container': container, 'name': name}) if container == 'socket_error_on_get': raise socket.error(111, 'ECONNREFUSED') if 'metadata' in name: fake_object_header = None metadata = {} if container == 'unsupported_version': metadata['version'] = '9.9.9' else: metadata['version'] = '1.0.0' metadata['backup_id'] = 123 metadata['volume_id'] = 123 metadata['backup_name'] = 'fake backup' metadata['backup_description'] = 'fake backup description' metadata['created_at'] = '2013-02-19 11:20:54,805' metadata['objects'] = [{ 'backup_001': {'compression': 'zlib', 'length': 10}, 'backup_002': {'compression': 'zlib', 'length': 10}, 'backup_003': {'compression': 'zlib', 'length': 10} }] metadata_json = json.dumps(metadata, sort_keys=True, indent=2) fake_object_body = metadata_json return (fake_object_header, fake_object_body) fake_header = {'etag': '"fake-md5-sum"'} if resp_chunk_size: def _object_info(): length = 0 while length < (1024 * 1024): yield os.urandom(resp_chunk_size) length += resp_chunk_size fake_object_body = _object_info() else: fake_object_body = os.urandom(1024 * 1024) return (fake_header, fake_object_body) def put_object(self, container, name, contents, **kwargs): LOG.debug("fake put_object(%(container)s, %(name)s)" % {'container': container, 'name': name}) if container == 'socket_error_on_put': raise socket.error(111, 'ECONNREFUSED') headers = kwargs.get('headers', {}) object_checksum = md5() if self.MANIFEST_HEADER_KEY in headers: # the manifest prefix format is / where # container is where the object segments are in and prefix is the # common prefix for all segments. self.manifest_prefix = headers.get(self.MANIFEST_HEADER_KEY) self.manifest_name = name object_checksum.update(contents) else: if hasattr(contents, 'read'): chunk_size = 128 object_content = "" chunk = contents.read(chunk_size) while chunk: object_content += chunk object_checksum.update(chunk) chunk = contents.read(chunk_size) self.container_objects[name] = object_content else: object_checksum.update(contents) self.container_objects[name] = contents # this is included to test bad swift segment etags if name.startswith("bad_segment_etag_"): return "this_is_an_intentional_bad_segment_etag" return object_checksum.hexdigest() def post_object(self, container, name, headers={}): LOG.debug("fake post_object(%(container)s, %(name)s, %(head)s)" % {'container': container, 'name': name, 'head': str(headers)}) def delete_object(self, container, name): LOG.debug("fake delete_object(%(container)s, %(name)s)" % {'container': container, 'name': name}) if container == 'socket_error_on_delete': raise socket.error(111, 'ECONNREFUSED') pass class Patcher(object): """Objects that need to mock global symbols throughout their existence should extend this base class. The object acts as a context manager which, when used in conjunction with the 'with' statement, terminates all running patchers when it leaves the scope. """ def __init__(self): self.__patchers = None def __enter__(self): self.__patchers = [] return self def __exit__(self, type, value, traceback): # Stop patchers in the LIFO order. while self.__patchers: self.__patchers.pop().stop() def _start_patcher(self, patcher): """All patchers started by this method will be automatically terminated on __exit__(). """ self.__patchers.append(patcher) return patcher.start() class SwiftClientStub(Patcher): """ Component for controlling behavior of Swift Client Stub. Instantiated before tests are invoked in "fake" mode. Invoke methods to control behavior so that systems under test can interact with this as it is a real swift client with a real backend example: if FAKE: swift_stub = SwiftClientStub() swift_stub.with_account('xyz') # returns swift account info and auth token component_using_swift.get_swift_account() if FAKE: swift_stub.with_container('test-container-name') # returns swift container information - mostly faked component_using.swift.create_container('test-container-name') component_using_swift.get_container_info('test-container-name') if FAKE: swift_stub.with_object('test-container-name', 'test-object-name', 'test-object-contents') # returns swift object info and contents component_using_swift.create_object('test-container-name', 'test-object-name', 'test-contents') component_using_swift.get_object('test-container-name', 'test-object-name') if FAKE: swift_stub.without_object('test-container-name', 'test-object-name') # allows object to be removed ONCE component_using_swift.remove_object('test-container-name', 'test-object-name') # throws ClientException - 404 component_using_swift.get_object('test-container-name', 'test-object-name') component_using_swift.remove_object('test-container-name', 'test-object-name') if FAKE: swift_stub.without_object('test-container-name', 'test-object-name') # allows container to be removed ONCE component_using_swift.remove_container('test-container-name') # throws ClientException - 404 component_using_swift.get_container('test-container-name') component_using_swift.remove_container('test-container-name') """ def __init__(self): super(SwiftClientStub, self).__init__() self._connection = swift_client.Connection() self._containers = {} self._containers_list = [] self._objects = {} def _remove_object(self, name, some_list): idx = [i for i, obj in enumerate(some_list) if obj['name'] == name] if len(idx) == 1: del some_list[idx[0]] def _ensure_object_exists(self, container, name): self._connection.get_object(container, name) def with_account(self, account_id): """ setups up account headers example: if FAKE: swift_stub = SwiftClientStub() swift_stub.with_account('xyz') # returns swift account info and auth token component_using_swift.get_swift_account() :param account_id: account id """ def account_resp(): return ({'content-length': '2', 'accept-ranges': 'bytes', 'x-timestamp': '1363049003.92304', 'x-trans-id': 'tx9e5da02c49ed496395008309c8032a53', 'date': 'Tue, 10 Mar 2013 00:43:23 GMT', 'x-account-bytes-used': '0', 'x-account-container-count': '0', 'content-type': 'application/json; charset=utf-8', 'x-account-object-count': '0'}, self._containers_list) get_auth_return_value = ( u"http://127.0.0.1:8080/v1/AUTH_c7b038976df24d96bf1980f5da17bd89", u'MIINrwYJKoZIhvcNAQcCoIINoDCCDZwCAQExCTAHBgUrDgMCGjCCDIgGCSqGSIb3' u'DQEHAaCCDHkEggx1eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAi' u'MjAxMy0wMy0xOFQxODoxMzoyMC41OTMyNzYiLCAiZXhwaXJlcyI6ICIyMDEzLTAz' u'LTE5VDE4OjEzOjIwWiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7' u'ImVuYWJsZWQiOiB0cnVlLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJy' u'ZWRkd2FyZiIsICJpZCI6ICJjN2IwMzg5NzZkZjI0ZDk2YmYxOTgwZjVkYTE3YmQ4' u'OSJ9fSwgInNlcnZpY2VDYXRhbG9nIjogW3siZW5kcG9pbnRzIjogW3siYWRtaW5') get_auth_patcher = patch.object( swift_client.Connection, 'get_auth', MagicMock(return_value=get_auth_return_value)) self._start_patcher(get_auth_patcher) get_account_patcher = patch.object( swift_client.Connection, 'get_account', MagicMock(return_value=account_resp())) self._start_patcher(get_account_patcher) return self def _create_container(self, container_name): container = {'count': 0, 'bytes': 0, 'name': container_name} self._containers[container_name] = container self._containers_list.append(container) self._objects[container_name] = [] def _ensure_container_exists(self, container): self._connection.get_container(container) def _delete_container(self, container): self._remove_object(container, self._containers_list) del self._containers[container] del self._objects[container] def with_container(self, container_name): """ sets expectations for creating a container and subsequently getting its information example: if FAKE: swift_stub.with_container('test-container-name') # returns swift container information - mostly faked component_using.swift.create_container('test-container-name') component_using_swift.get_container_info('test-container-name') :param container_name: container name that is expected to be created """ def container_resp(container): return ({'content-length': '2', 'x-container-object-count': '0', 'accept-ranges': 'bytes', 'x-container-bytes-used': '0', 'x-timestamp': '1363370869.72356', 'x-trans-id': 'tx7731801ac6ec4e5f8f7da61cde46bed7', 'date': 'Fri, 10 Mar 2013 18:07:58 GMT', 'content-type': 'application/json; charset=utf-8'}, self._objects[container]) # if this is called multiple times then nothing happens put_container_patcher = patch.object(swift_client.Connection, 'put_container') self._start_patcher(put_container_patcher) def side_effect_func(*args, **kwargs): if args[0] in self._containers: return container_resp(args[0]) else: raise swiftclient.ClientException('Resource Not Found', http_status=404) self._create_container(container_name) # return container headers get_container_patcher = patch.object( swift_client.Connection, 'get_container', MagicMock(side_effect=side_effect_func)) self._start_patcher(get_container_patcher) return self def without_container(self, container): """ sets expectations for removing a container and subsequently throwing an exception for further interactions example: if FAKE: swift_stub.without_container('test-container-name') # returns swift container information - mostly faked component_using.swift.remove_container('test-container-name') # throws exception "Resource Not Found - 404" component_using_swift.get_container_info('test-container-name') :param container: container name that is expected to be removed """ # first ensure container self._ensure_container_exists(container) self._delete_container(container) return self def with_object(self, container, name, contents): """ sets expectations for creating an object and subsequently getting its contents example: if FAKE: swift_stub.with_object('test-container-name', 'test-object-name', 'test-object-contents') # returns swift object info and contents component_using_swift.create_object('test-container-name', 'test-object-name', 'test-contents') component_using_swift.get_object('test-container-name', 'test-object-name') :param container: container name that is the object belongs :param name: the name of the object expected to be created :param contents: the contents of the object """ put_object_patcher = patch.object( swift_client.Connection, 'put_object', MagicMock(return_value=uuid.uuid1())) self._start_patcher(put_object_patcher) def side_effect_func(*args, **kwargs): if (args[0] in self._containers and args[1] in map(lambda x: x['name'], self._objects[args[0]])): return ( {'content-length': len(contents), 'accept-ranges': 'bytes', 'last-modified': 'Mon, 10 Mar 2013 01:06:34 GMT', 'etag': 'eb15a6874ce265e2c3eb1b4891567bab', 'x-timestamp': '1363568794.67584', 'x-trans-id': 'txef3aaf26c897420c8e77c9750ce6a501', 'date': 'Mon, 10 Mar 2013 05:35:14 GMT', 'content-type': 'application/octet-stream'}, [obj for obj in self._objects[args[0]] if obj['name'] == args[1]][0]['contents']) else: raise swiftclient.ClientException('Resource Not Found', http_status=404) get_object_patcher = patch.object( swift_client.Connection, 'get_object', MagicMock(side_effect=side_effect_func)) self._start_patcher(get_object_patcher) self._remove_object(name, self._objects[container]) self._objects[container].append( {'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950', 'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': name, 'content_type': 'application/octet-stream', 'contents': contents}) return self def without_object(self, container, name): """ sets expectations for deleting an object example: if FAKE: swift_stub.without_object('test-container-name', 'test-object-name') # allows container to be removed ONCE component_using_swift.remove_container('test-container-name') # throws ClientException - 404 component_using_swift.get_container('test-container-name') component_using_swift.remove_container('test-container-name') :param container: container name that is the object belongs :param name: the name of the object expected to be removed """ self._ensure_container_exists(container) self._ensure_object_exists(container, name) def side_effect_func(*args, **kwargs): if not [obj for obj in self._objects[args[0]] if obj['name'] == [args[1]]]: raise swiftclient.ClientException('Resource Not found', http_status=404) else: return None delete_object_patcher = patch.object( swift_client.Connection, 'delete_object', MagicMock(side_effect=side_effect_func)) self._start_patcher(delete_object_patcher) self._remove_object(name, self._objects[container]) return self def fake_create_swift_client(calculate_etag=False, *args): return FakeSwiftClient.Connection(*args) trove-5.0.0/trove/tests/unittests/0000775000567000056710000000000012701410521020350 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/backup/0000775000567000056710000000000012701410521021615 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/backup/test_backupagent.py0000664000567000056710000005004012701410316025513 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import mock import os from mock import Mock, MagicMock, patch, ANY, DEFAULT from oslo_utils import netutils from webob.exc import HTTPNotFound from trove.backup.state import BackupState from trove.common.context import TroveContext from trove.common.strategies.storage.base import Storage from trove.common import utils from trove.conductor import api as conductor_api from trove.guestagent.backup import backupagent from trove.guestagent.common import configuration from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.strategies.backup.base import BackupRunner from trove.guestagent.strategies.backup.base import UnknownBackupType from trove.guestagent.strategies.backup.experimental import couchbase_impl from trove.guestagent.strategies.backup.experimental import db2_impl from trove.guestagent.strategies.backup.experimental import mongo_impl from trove.guestagent.strategies.backup.experimental import redis_impl from trove.guestagent.strategies.backup import mysql_impl from trove.guestagent.strategies.backup.mysql_impl import MySqlApp from trove.guestagent.strategies.restore.base import RestoreRunner from trove.tests.unittests import trove_testtools def create_fake_data(): from random import choice from string import ascii_letters return ''.join([choice(ascii_letters) for _ in range(1024)]) class MockBackup(BackupRunner): """Create a large temporary file to 'backup' with subprocess.""" backup_type = 'mock_backup' def __init__(self, *args, **kwargs): self.data = create_fake_data() self.cmd = 'echo %s' % self.data super(MockBackup, self).__init__(*args, **kwargs) def cmd(self): return self.cmd class MockCheckProcessBackup(MockBackup): """Backup runner that fails confirming the process.""" def check_process(self): return False class MockLossyBackup(MockBackup): """Fake Incomplete writes to swift.""" def read(self, *args): results = super(MockLossyBackup, self).read(*args) if results: # strip a few chars from the stream return results[20:] class MockSwift(object): """Store files in String.""" def __init__(self, *args, **kwargs): self.store = '' self.containers = [] self.container = "database_backups" self.url = 'http://mockswift/v1' self.etag = hashlib.md5() def put_container(self, container): if container not in self.containers: self.containers.append(container) return None def put_object(self, container, obj, contents, **kwargs): if container not in self.containers: raise HTTPNotFound while True: if not hasattr(contents, 'read'): break content = contents.read(2 ** 16) if not content: break self.store += content self.etag.update(self.store) return self.etag.hexdigest() def save(self, filename, stream): location = '%s/%s/%s' % (self.url, self.container, filename) return True, 'w00t', 'fake-checksum', location def load(self, context, storage_url, container, filename, backup_checksum): pass def load_metadata(self, location, checksum): return {} def save_metadata(self, location, metadata): pass class MockStorage(Storage): def __call__(self, *args, **kwargs): return self def load(self, location, backup_checksum): pass def save(self, filename, stream): pass def load_metadata(self, location, checksum): return {} def save_metadata(self, location, metadata={}): pass def is_enabled(self): return True class MockRestoreRunner(RestoreRunner): def __init__(self, storage, **kwargs): pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def restore(self): pass def is_zipped(self): return False class MockStats: f_blocks = 1024 ** 2 f_bsize = 4096 f_bfree = 512 * 1024 class BackupAgentTest(trove_testtools.TestCase): def setUp(self): super(BackupAgentTest, self).setUp() self.patch_ope = patch.multiple('os.path', exists=DEFAULT) self.mock_ope = self.patch_ope.start() self.addCleanup(self.patch_ope.stop) self.patch_pc = patch('trove.guestagent.datastore.service.' 'BaseDbStatus.prepare_completed') self.mock_pc = self.patch_pc.start() self.mock_pc.__get__ = Mock(return_value=True) self.addCleanup(self.patch_pc.stop) self.get_auth_pwd_patch = patch.object( MySqlApp, 'get_auth_password', MagicMock(return_value='123')) self.get_auth_pwd_mock = self.get_auth_pwd_patch.start() self.addCleanup(self.get_auth_pwd_patch.stop) self.get_ss_patch = patch.object( backupagent, 'get_storage_strategy', MagicMock(return_value=MockSwift)) self.get_ss_mock = self.get_ss_patch.start() self.addCleanup(self.get_ss_patch.stop) self.statvfs_patch = patch.object( os, 'statvfs', MagicMock(return_value=MockStats)) self.statvfs_mock = self.statvfs_patch.start() self.addCleanup(self.statvfs_patch.stop) self.orig_utils_execute_with_timeout = utils.execute_with_timeout self.orig_os_get_ip_address = netutils.get_my_ipv4 def tearDown(self): super(BackupAgentTest, self).tearDown() utils.execute_with_timeout = self.orig_utils_execute_with_timeout netutils.get_my_ipv4 = self.orig_os_get_ip_address def test_backup_impl_MySQLDump(self): """This test is for guestagent/strategies/backup/mysql_impl """ mysql_dump = mysql_impl.MySQLDump( 'abc', extra_opts='') self.assertIsNotNone(mysql_dump.cmd) str_mysql_dump_cmd = ('mysqldump' ' --all-databases' ' %(extra_opts)s' ' --opt' ' --password=123' ' -u os_admin' ' 2>/tmp/mysqldump.log' ' | gzip |' ' openssl enc -aes-256-cbc -salt ' '-pass pass:default_aes_cbc_key') self.assertEqual(str_mysql_dump_cmd, mysql_dump.cmd) self.assertIsNotNone(mysql_dump.manifest) self.assertEqual('abc.gz.enc', mysql_dump.manifest) @mock.patch.object( MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') def test_backup_impl_InnoBackupEx(self, mock_datadir): """This test is for guestagent/strategies/backup/mysql_impl """ inno_backup_ex = mysql_impl.InnoBackupEx('innobackupex', extra_opts='') self.assertIsNotNone(inno_backup_ex.cmd) str_innobackup_cmd = ('sudo innobackupex' ' --stream=xbstream' ' %(extra_opts)s' ' /var/lib/mysql/data 2>/tmp/innobackupex.log' ' | gzip |' ' openssl enc -aes-256-cbc -salt ' '-pass pass:default_aes_cbc_key') self.assertEqual(str_innobackup_cmd, inno_backup_ex.cmd) self.assertIsNotNone(inno_backup_ex.manifest) str_innobackup_manifest = 'innobackupex.xbstream.gz.enc' self.assertEqual(str_innobackup_manifest, inno_backup_ex.manifest) def test_backup_impl_CbBackup(self): netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") utils.execute_with_timeout = Mock(return_value=None) cbbackup = couchbase_impl.CbBackup('cbbackup', extra_opts='') self.assertIsNotNone(cbbackup) str_cbbackup_cmd = ("tar cpPf - /tmp/backups | " "gzip | openssl enc -aes-256-cbc -salt -pass " "pass:default_aes_cbc_key") self.assertEqual(str_cbbackup_cmd, cbbackup.cmd) self.assertIsNotNone(cbbackup.manifest) self.assertIn('gz.enc', cbbackup.manifest) def test_backup_impl_DB2Backup(self): netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") db2_backup = db2_impl.DB2Backup('db2backup', extra_opts='') self.assertIsNotNone(db2_backup) str_db2_backup_cmd = ("sudo tar cPf - /home/db2inst1/db2inst1/backup " "| gzip | openssl enc -aes-256-cbc -salt -pass " "pass:default_aes_cbc_key") self.assertEqual(str_db2_backup_cmd, db2_backup.cmd) self.assertIsNotNone(db2_backup.manifest) self.assertIn('gz.enc', db2_backup.manifest) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_backup_impl_MongoDump(self, _): netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") utils.execute_with_timeout = Mock(return_value=None) mongodump = mongo_impl.MongoDump('mongodump', extra_opts='') self.assertIsNotNone(mongodump) str_mongodump_cmd = ("sudo tar cPf - /var/lib/mongodb/dump | " "gzip | openssl enc -aes-256-cbc -salt -pass " "pass:default_aes_cbc_key") self.assertEqual(str_mongodump_cmd, mongodump.cmd) self.assertIsNotNone(mongodump.manifest) self.assertIn('gz.enc', mongodump.manifest) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) @patch.object(configuration.ConfigurationManager, 'parse_configuration', Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) def test_backup_impl_RedisBackup(self, *mocks): netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") redis_backup = redis_impl.RedisBackup('redisbackup', extra_opts='') self.assertIsNotNone(redis_backup) str_redis_backup_cmd = ("sudo cat /var/lib/redis/dump.rdb | " "gzip | openssl enc -aes-256-cbc -salt -pass " "pass:default_aes_cbc_key") self.assertEqual(str_redis_backup_cmd, redis_backup.cmd) self.assertIsNotNone(redis_backup.manifest) self.assertIn('gz.enc', redis_backup.manifest) def test_backup_base(self): """This test is for guestagent/strategies/backup/base """ BackupRunner.cmd = "%s" backup_runner = BackupRunner('sample', cmd='echo command') if backup_runner.is_zipped: self.assertEqual('.gz', backup_runner.zip_manifest) self.assertIsNotNone(backup_runner.zip_manifest) self.assertIsNotNone(backup_runner.zip_cmd) self.assertEqual(' | gzip', backup_runner.zip_cmd) else: self.assertIsNone(backup_runner.zip_manifest) self.assertIsNone(backup_runner.zip_cmd) self.assertEqual('BackupRunner', backup_runner.backup_type) @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(conductor_api.API, 'update_backup', Mock(return_value=Mock())) def test_execute_backup(self): """This test should ensure backup agent ensures that backup and storage is not running resolves backup instance starts backup starts storage reports status """ agent = backupagent.BackupAgent() backup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'datastore': 'mysql', 'datastore_version': '5.5' } agent.execute_backup(context=None, backup_info=backup_info, runner=MockBackup) self.assertTrue( conductor_api.API.update_backup.called_once_with( ANY, backup_id=backup_info['id'], state=BackupState.NEW)) self.assertTrue( conductor_api.API.update_backup.called_once_with( ANY, backup_id=backup_info['id'], size=ANY, state=BackupState.BUILDING)) self.assertTrue( conductor_api.API.update_backup.called_once_with( ANY, backup_id=backup_info['id'], checksum=ANY, location=ANY, note=ANY, backup_type=backup_info['type'], state=BackupState.COMPLETED)) @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(conductor_api.API, 'update_backup', Mock(return_value=Mock())) def test_execute_bad_process_backup(self): agent = backupagent.BackupAgent() backup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'datastore': 'mysql', 'datastore_version': '5.5' } self.assertRaises(backupagent.BackupError, agent.execute_backup, context=None, backup_info=backup_info, runner=MockCheckProcessBackup) self.assertTrue( conductor_api.API.update_backup.called_once_with( ANY, backup_id=backup_info['id'], state=BackupState.NEW)) self.assertTrue( conductor_api.API.update_backup.called_once_with( ANY, backup_id=backup_info['id'], size=ANY, state=BackupState.BUILDING)) self.assertTrue( conductor_api.API.update_backup.called_once_with( ANY, backup_id=backup_info['id'], checksum=ANY, location=ANY, note=ANY, backup_type=backup_info['type'], state=BackupState.FAILED)) @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(conductor_api.API, 'update_backup', Mock(return_value=Mock())) @patch('trove.guestagent.backup.backupagent.LOG') def test_execute_lossy_backup(self, mock_logging): """This test verifies that incomplete writes to swift will fail.""" with patch.object(MockSwift, 'save', return_value=(False, 'Error', 'y', 'z')): agent = backupagent.BackupAgent() backup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } self.assertRaises(backupagent.BackupError, agent.execute_backup, context=None, backup_info=backup_info, runner=MockLossyBackup) self.assertTrue( conductor_api.API.update_backup.called_once_with( ANY, backup_id=backup_info['id'], state=BackupState.FAILED)) def test_execute_restore(self): """This test should ensure backup agent resolves backup instance determines backup/restore type transfers/downloads data and invokes the restore module reports status """ with patch.object(backupagent, 'get_storage_strategy', return_value=MockStorage): with patch.object(backupagent, 'get_restore_strategy', return_value=MockRestoreRunner): agent = backupagent.BackupAgent() bkup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } agent.execute_restore(TroveContext(), bkup_info, '/var/lib/mysql/data') @patch('trove.guestagent.backup.backupagent.LOG') def test_restore_unknown(self, mock_logging): with patch.object(backupagent, 'get_restore_strategy', side_effect=ImportError): agent = backupagent.BackupAgent() bkup_info = {'id': '123', 'location': 'fake-location', 'type': 'foo', 'checksum': 'fake-checksum', } self.assertRaises(UnknownBackupType, agent.execute_restore, context=None, backup_info=bkup_info, restore_location='/var/lib/mysql/data') @patch.object(MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(MockSwift, 'load_metadata', return_value={'lsn': '54321'}) @patch.object(MockStorage, 'save_metadata') @patch.object(backupagent, 'get_storage_strategy', return_value=MockSwift) @patch('trove.guestagent.backup.backupagent.LOG') def test_backup_incremental_metadata(self, mock_logging, get_storage_strategy_mock, save_metadata_mock, load_metadata_mock, get_datadir_mock): meta = { 'lsn': '12345', 'parent_location': 'fake', 'parent_checksum': 'md5', } with patch.multiple(mysql_impl.InnoBackupExIncremental, metadata=MagicMock(return_value=meta), _run=MagicMock(return_value=True), __exit__=MagicMock(return_value=True)): agent = backupagent.BackupAgent() bkup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'parent': {'location': 'fake', 'checksum': 'md5'}, 'datastore': 'mysql', 'datastore_version': 'bo.gus' } agent.execute_backup(TroveContext(), bkup_info, '/var/lib/mysql/data') self.assertTrue(MockStorage.save_metadata.called_once_with( ANY, meta)) @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) def test_backup_incremental_bad_metadata(self): with patch.object(backupagent, 'get_storage_strategy', return_value=MockSwift): agent = backupagent.BackupAgent() bkup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'parent': {'location': 'fake', 'checksum': 'md5'} } self.assertRaises( AttributeError, agent.execute_backup, TroveContext(), bkup_info, 'location') trove-5.0.0/trove/tests/unittests/backup/test_backup_controller.py0000664000567000056710000001000712701410316026736 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from testtools.matchers import Equals from trove.backup.service import BackupController from trove.common import apischema from trove.tests.unittests import trove_testtools class TestBackupController(trove_testtools.TestCase): def setUp(self): super(TestBackupController, self).setUp() self.uuid = "d6338c9c-3cc8-4313-b98f-13cc0684cf15" self.invalid_uuid = "ead-edsa-e23-sdf-23" self.controller = BackupController() def test_validate_create_complete(self): body = {"backup": {"instance": self.uuid, "name": "testback-backup"}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_with_blankname(self): body = {"backup": {"instance": self.uuid, "name": ' '}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("' ' does not match '^.*[0-9a-zA-Z]+.*$'", errors[0].message) def test_validate_create_with_invalidname(self): body = {"backup": {"instance": self.uuid, "name": '$#@&?'}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("'$#@&?' does not match '^.*[0-9a-zA-Z]+.*$'", errors[0].message) def test_validate_create_invalid_uuid(self): body = {"backup": {"instance": self.invalid_uuid, "name": "testback-backup"}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].message, Equals("'%s' does not match '%s'" % (self.invalid_uuid, apischema.uuid['pattern']))) def test_validate_create_incremental(self): body = {"backup": {"instance": self.uuid, "name": "testback-backup", "parent_id": self.uuid}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_invalid_parent_id(self): body = {"backup": {"instance": self.uuid, "name": "testback-backup", "parent_id": self.invalid_uuid}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].message, Equals("'%s' does not match '%s'" % (self.invalid_uuid, apischema.uuid['pattern']))) trove-5.0.0/trove/tests/unittests/backup/test_backup_models.py0000664000567000056710000004753612701410316026057 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from mock import DEFAULT from mock import MagicMock from mock import patch from trove.backup import models from trove.backup import state from trove.common import context from trove.common import exception from trove.common import utils from trove.instance import models as instance_models from trove.taskmanager import api from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util def _prep_conf(current_time): current_time = str(current_time) _context = context.TroveContext(tenant='TENANT-' + current_time) instance_id = 'INSTANCE-' + current_time return _context, instance_id BACKUP_NAME = 'WORKS' BACKUP_NAME_2 = 'IT-WORKS' BACKUP_NAME_3 = 'SECOND-LAST-ONE' BACKUP_NAME_4 = 'LAST-ONE-FULL' BACKUP_NAME_5 = 'LAST-ONE-INCREMENTAL' BACKUP_NAME_6 = 'LAST-ONE-DELETED' BACKUP_STATE = state.BackupState.NEW BACKUP_STATE_COMPLETED = state.BackupState.COMPLETED BACKUP_DESC = 'Backup test' BACKUP_FILENAME = '45a3d8cb-ade8-484c-a8a5-0c3c7286fb2f.xbstream.gz' BACKUP_LOCATION = 'https://hpcs.com/tenant/database_backups/' + BACKUP_FILENAME class BackupCreateTest(trove_testtools.TestCase): def setUp(self): super(BackupCreateTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) self.created = False def tearDown(self): super(BackupCreateTest, self).tearDown() if self.created: models.DBBackup.find_by( tenant_id=self.context.tenant).delete() @patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock())) def test_create(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) instance.datastore_version = MagicMock() instance.datastore_version.id = 'datastore-id-999' instance.cluster_id = None with patch.multiple(models.Backup, validate_can_perform_action=DEFAULT, verify_swift_auth_token=DEFAULT): with patch.object(api.API, 'create_backup', MagicMock(return_value=None)): bu = models.Backup.create(self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) self.created = True self.assertEqual(BACKUP_NAME, bu.name) self.assertEqual(BACKUP_DESC, bu.description) self.assertEqual(self.instance_id, bu.instance_id) self.assertEqual(state.BackupState.NEW, bu.state) db_record = models.DBBackup.find_by(id=bu.id) self.assertEqual(bu.id, db_record['id']) self.assertEqual(BACKUP_NAME, db_record['name']) self.assertEqual(BACKUP_DESC, db_record['description']) self.assertEqual(self.instance_id, db_record['instance_id']) self.assertEqual(state.BackupState.NEW, db_record['state']) self.assertEqual(instance.datastore_version.id, db_record['datastore_version_id']) @patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock())) def test_create_incremental(self): instance = MagicMock() parent = MagicMock(spec=models.DBBackup) with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) instance.validate_can_perform_action = MagicMock( return_value=None) instance.datastore_version = MagicMock() instance.datastore_version.id = 'datastore-id-999' instance.cluster_id = None with patch.multiple(models.Backup, validate_can_perform_action=DEFAULT, verify_swift_auth_token=DEFAULT, get_by_id=MagicMock(return_value=parent)): with patch.object(api.API, 'create_backup', MagicMock(return_value=None)): incremental = models.Backup.create( self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC, parent_id='parent_uuid') self.created = True db_record = models.DBBackup.find_by(id=incremental.id) self.assertEqual(incremental.id, db_record['id']) self.assertEqual(BACKUP_NAME, db_record['name']) self.assertEqual(BACKUP_DESC, db_record['description']) self.assertEqual(self.instance_id, db_record['instance_id']) self.assertEqual(state.BackupState.NEW, db_record['state']) self.assertEqual('parent_uuid', db_record['parent_id']) self.assertEqual(instance.datastore_version.id, db_record['datastore_version_id']) def test_create_instance_not_found(self): self.assertRaises(exception.NotFound, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) def test_create_incremental_not_found(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) instance.cluster_id = None with patch.object(models.Backup, 'validate_can_perform_action', return_value=None): with patch.object(models.Backup, 'verify_swift_auth_token', return_value=None): self.assertRaises(exception.NotFound, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC, parent_id='BAD') def test_create_instance_not_active(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( side_effect=exception.UnprocessableEntity) self.assertRaises(exception.UnprocessableEntity, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) def test_create_backup_swift_token_invalid(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) with patch.object(models.Backup, 'validate_can_perform_action', return_value=None): with patch.object(models.Backup, 'verify_swift_auth_token', side_effect=exception.SwiftAuthError): self.assertRaises(exception.SwiftAuthError, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) def test_create_backup_datastore_operation_not_supported(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) with patch.object( models.Backup, 'validate_can_perform_action', side_effect=exception.DatastoreOperationNotSupported ): self.assertRaises(exception.DatastoreOperationNotSupported, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) class BackupDeleteTest(trove_testtools.TestCase): def setUp(self): super(BackupDeleteTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) def tearDown(self): super(BackupDeleteTest, self).tearDown() def test_delete_backup_not_found(self): self.assertRaises(exception.NotFound, models.Backup.delete, self.context, 'backup-id') def test_delete_backup_is_running(self): backup = MagicMock() backup.is_running = True with patch.object(models.Backup, 'get_by_id', return_value=backup): self.assertRaises(exception.UnprocessableEntity, models.Backup.delete, self.context, 'backup_id') def test_delete_backup_swift_token_invalid(self): backup = MagicMock() backup.is_running = False with patch.object(models.Backup, 'get_by_id', return_value=backup): with patch.object(models.Backup, 'verify_swift_auth_token', side_effect=exception.SwiftAuthError): self.assertRaises(exception.SwiftAuthError, models.Backup.delete, self.context, 'backup_id') class BackupORMTest(trove_testtools.TestCase): def setUp(self): super(BackupORMTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) self.backup = models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME, state=BACKUP_STATE, instance_id=self.instance_id, deleted=False, size=2.0, location=BACKUP_LOCATION) self.deleted = False def tearDown(self): super(BackupORMTest, self).tearDown() if not self.deleted: models.DBBackup.find_by(tenant_id=self.context.tenant).delete() def test_list(self): backups, marker = models.Backup.list(self.context) self.assertIsNone(marker) self.assertEqual(1, len(backups)) def test_list_for_instance(self): models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME_2, state=BACKUP_STATE, instance_id=self.instance_id, size=2.0, deleted=False) backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertIsNone(marker) self.assertEqual(2, len(backups)) def test_get_last_completed(self): models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME_3, state=BACKUP_STATE_COMPLETED, instance_id=self.instance_id, size=2.0, deleted=False) models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME_4, state=BACKUP_STATE_COMPLETED, instance_id=self.instance_id, size=2.0, deleted=False) models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME_5, state=BACKUP_STATE_COMPLETED, instance_id=self.instance_id, parent_id='parent_uuid', size=2.0, deleted=False) models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME_6, state=BACKUP_STATE_COMPLETED, instance_id=self.instance_id, size=2.0, deleted=True) backup = models.Backup.get_last_completed( self.context, self.instance_id, include_incremental=True) self.assertEqual(BACKUP_NAME_5, backup.name) backup = models.Backup.get_last_completed( self.context, self.instance_id, include_incremental=False) self.assertEqual(BACKUP_NAME_4, backup.name) def test_running(self): running = models.Backup.running(instance_id=self.instance_id) self.assertTrue(running) def test_not_running(self): not_running = models.Backup.running(instance_id='non-existent') self.assertFalse(not_running) def test_running_exclude(self): not_running = models.Backup.running(instance_id=self.instance_id, exclude=self.backup.id) self.assertFalse(not_running) def test_is_running(self): self.assertTrue(self.backup.is_running) def test_is_done(self): self.backup.state = state.BackupState.COMPLETED self.backup.save() self.assertTrue(self.backup.is_done) def test_not_is_running(self): self.backup.state = state.BackupState.COMPLETED self.backup.save() self.assertFalse(self.backup.is_running) def test_not_is_done(self): self.assertFalse(self.backup.is_done) def test_backup_size(self): db_record = models.DBBackup.find_by(id=self.backup.id) self.assertEqual(self.backup.size, db_record.size) def test_backup_delete(self): backup = models.DBBackup.find_by(id=self.backup.id) backup.delete() backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertIsNone(marker) self.assertEqual(0, len(backups)) def test_delete(self): self.backup.delete() db_record = models.DBBackup.find_by(id=self.backup.id, deleted=True) self.assertEqual(self.instance_id, db_record['instance_id']) def test_deleted_not_running(self): self.backup.delete() self.assertFalse(models.Backup.running(self.instance_id)) def test_filename(self): self.assertEqual(BACKUP_FILENAME, self.backup.filename) class PaginationTests(trove_testtools.TestCase): def setUp(self): super(PaginationTests, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) # Create a bunch of backups bkup_info = { 'tenant_id': self.context.tenant, 'state': BACKUP_STATE, 'instance_id': self.instance_id, 'size': 2.0, 'deleted': False } for backup in range(50): bkup_info.update({'name': 'Backup-%s' % backup}) models.DBBackup.create(**bkup_info) def tearDown(self): super(PaginationTests, self).tearDown() query = models.DBBackup.query() query.filter_by(instance_id=self.instance_id).delete() def test_pagination_list(self): # page one backups, marker = models.Backup.list(self.context) self.assertEqual(20, marker) self.assertEqual(20, len(backups)) # page two self.context.marker = 20 backups, marker = models.Backup.list(self.context) self.assertEqual(40, marker) self.assertEqual(20, len(backups)) # page three self.context.marker = 40 backups, marker = models.Backup.list(self.context) self.assertIsNone(marker) self.assertEqual(10, len(backups)) def test_pagination_list_for_instance(self): # page one backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertEqual(20, marker) self.assertEqual(20, len(backups)) # page two self.context.marker = 20 backups, marker = models.Backup.list(self.context) self.assertEqual(40, marker) self.assertEqual(20, len(backups)) # page three self.context.marker = 40 backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertIsNone(marker) self.assertEqual(10, len(backups)) class OrderingTests(trove_testtools.TestCase): def setUp(self): super(OrderingTests, self).setUp() util.init_db() now = utils.utcnow() self.context, self.instance_id = _prep_conf(now) info = { 'tenant_id': self.context.tenant, 'state': BACKUP_STATE, 'instance_id': self.instance_id, 'size': 2.0, 'deleted': False } four = now - datetime.timedelta(days=4) one = now - datetime.timedelta(days=1) three = now - datetime.timedelta(days=3) two = now - datetime.timedelta(days=2) # Create backups out of order, save/create set the 'updated' field, # so we need to use the db_api directly. models.DBBackup().db_api.save( models.DBBackup(name='four', updated=four, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='one', updated=one, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='three', updated=three, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='two', updated=two, id=utils.generate_uuid(), **info)) def tearDown(self): super(OrderingTests, self).tearDown() query = models.DBBackup.query() query.filter_by(instance_id=self.instance_id).delete() def test_list(self): backups, marker = models.Backup.list(self.context) self.assertIsNone(marker) actual = [b.name for b in backups] expected = [u'one', u'two', u'three', u'four'] self.assertEqual(expected, actual) def test_list_for_instance(self): backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertIsNone(marker) actual = [b.name for b in backups] expected = [u'one', u'two', u'three', u'four'] self.assertEqual(expected, actual) trove-5.0.0/trove/tests/unittests/backup/__init__.py0000664000567000056710000000000012701410316023716 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/backup/test_storage.py0000664000567000056710000003222012701410316024673 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib from mock import Mock, MagicMock, patch from trove.common.strategies.storage import swift from trove.common.strategies.storage.swift import StreamReader from trove.common.strategies.storage.swift \ import SwiftDownloadIntegrityError from trove.common.strategies.storage.swift import SwiftStorage from trove.tests.fakes.swift import FakeSwiftConnection from trove.tests.unittests.backup.test_backupagent \ import MockBackup as MockBackupRunner from trove.tests.unittests import trove_testtools class SwiftStorageSaveChecksumTests(trove_testtools.TestCase): """SwiftStorage.save is used to save a backup to Swift.""" def setUp(self): super(SwiftStorageSaveChecksumTests, self).setUp() def tearDown(self): super(SwiftStorageSaveChecksumTests, self).tearDown() def test_swift_checksum_save(self): """This tests that SwiftStorage.save returns the swift checksum.""" context = trove_testtools.TroveTestContext(self) backup_id = '123' user = 'user' password = 'password' swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) with MockBackupRunner(filename=backup_id, user=user, password=password) as runner: (success, note, checksum, location) = storage_strategy.save(runner.manifest, runner) self.assertTrue(success, "The backup should have been successful.") self.assertIsNotNone(note, "A note should have been returned.") self.assertEqual('http://mockswift/v1/database_backups/123.gz.enc', location, "Incorrect swift location was returned.") @patch('trove.common.strategies.storage.swift.LOG') def test_swift_segment_checksum_etag_mismatch(self, mock_logging): """This tests that when etag doesn't match segment uploaded checksum False is returned and None for checksum and location """ context = trove_testtools.TroveTestContext(self) # this backup_id will trigger fake swift client with calculate_etag # enabled to spit out a bad etag when a segment object is uploaded backup_id = 'bad_segment_etag_123' user = 'user' password = 'password' swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) with MockBackupRunner(filename=backup_id, user=user, password=password) as runner: (success, note, checksum, location) = storage_strategy.save(runner.manifest, runner) self.assertFalse(success, "The backup should have failed!") self.assertTrue(note.startswith("Error saving data to Swift!")) self.assertIsNone(checksum, "Swift checksum should be None for failed backup.") self.assertEqual('http://mockswift/v1/database_backups/' 'bad_segment_etag_123.gz.enc', location, "Incorrect swift location was returned.") @patch('trove.common.strategies.storage.swift.LOG') def test_swift_checksum_etag_mismatch(self, mock_logging): """This tests that when etag doesn't match swift checksum False is returned and None for checksum and location """ context = trove_testtools.TroveTestContext(self) # this backup_id will trigger fake swift client with calculate_etag # enabled to spit out a bad etag when a segment object is uploaded backup_id = 'bad_manifest_etag_123' user = 'user' password = 'password' swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) with MockBackupRunner(filename=backup_id, user=user, password=password) as runner: (success, note, checksum, location) = storage_strategy.save(runner.manifest, runner) self.assertFalse(success, "The backup should have failed!") self.assertTrue(note.startswith("Error saving data to Swift!")) self.assertIsNone(checksum, "Swift checksum should be None for failed backup.") self.assertEqual('http://mockswift/v1/database_backups/' 'bad_manifest_etag_123.gz.enc', location, "Incorrect swift location was returned.") class SwiftStorageUtils(trove_testtools.TestCase): def setUp(self): super(SwiftStorageUtils, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.swift_client = FakeSwiftConnection() self.create_swift_client_patch = patch.object( swift, 'create_swift_client', MagicMock(return_value=self.swift_client)) self.create_swift_client_mock = self.create_swift_client_patch.start() self.addCleanup(self.create_swift_client_patch.stop) self.swift = SwiftStorage(self.context) def tearDown(self): super(SwiftStorageUtils, self).tearDown() def test_explode_location(self): location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' url, container, filename = self.swift._explodeLocation(location) self.assertEqual('http://mockswift.com/v1/545433', url) self.assertEqual('backups', container) self.assertEqual('mybackup.tar', filename) def test_validate_checksum_good(self): match = self.swift._verify_checksum('"my-good-etag"', 'my-good-etag') self.assertTrue(match) @patch('trove.common.strategies.storage.swift.LOG') def test_verify_checksum_bad(self, mock_logging): self.assertRaises(SwiftDownloadIntegrityError, self.swift._verify_checksum, '"THE-GOOD-THE-BAD"', 'AND-THE-UGLY') class SwiftStorageLoad(trove_testtools.TestCase): """SwiftStorage.load is used to return SwiftDownloadStream which is used to download a backup object from Swift """ def setUp(self): super(SwiftStorageLoad, self).setUp() def tearDown(self): super(SwiftStorageLoad, self).tearDown() def test_run_verify_checksum(self): """This tests that swift download cmd runs if original backup checksum matches swift object etag """ context = trove_testtools.TroveTestContext(self) location = "/backup/location/123" backup_checksum = "fake-md5-sum" swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) download_stream = storage_strategy.load(location, backup_checksum) self.assertIsNotNone(download_stream) @patch('trove.common.strategies.storage.swift.LOG') def test_run_verify_checksum_mismatch(self, mock_logging): """This tests that SwiftDownloadIntegrityError is raised and swift download cmd does not run when original backup checksum does not match swift object etag """ context = trove_testtools.TroveTestContext(self) location = "/backup/location/123" backup_checksum = "checksum_different_then_fake_swift_etag" swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) self.assertRaises(SwiftDownloadIntegrityError, storage_strategy.load, location, backup_checksum) class MockBackupStream(MockBackupRunner): def read(self, chunk_size): return 'X' * chunk_size class StreamReaderTests(trove_testtools.TestCase): def setUp(self): super(StreamReaderTests, self).setUp() self.runner = MockBackupStream(filename='123.xbstream.enc.gz', user='user', password='password') self.stream = StreamReader(self.runner, self.runner.manifest, max_file_size=100) def test_base_filename(self): self.assertEqual('123', self.stream.base_filename) def test_base_filename_no_extension(self): stream_reader = StreamReader(self.runner, 'foo') self.assertEqual('foo', stream_reader.base_filename) def test_prefix(self): self.assertEqual('database_backups/123_', self.stream.prefix) def test_segment(self): self.assertEqual('123_00000000', self.stream.segment) def test_end_of_file(self): self.assertFalse(self.stream.end_of_file) def test_end_of_segment(self): self.assertFalse(self.stream.end_of_segment) def test_segment_almost_complete(self): self.stream.segment_length = 98 results = self.stream.read(2) self.assertEqual('XX', results) self.assertEqual('123_00000000', self.stream.segment, "The Segment should still be the same") self.assertEqual(100, self.stream.segment_length) checksum = hashlib.md5('XX') checksum = checksum.hexdigest() segment_checksum = self.stream.segment_checksum.hexdigest() self.assertEqual(checksum, segment_checksum, "Segment checksum did not match") def test_segment_complete(self): self.stream.segment_length = 99 results = self.stream.read(2) self.assertEqual('', results, "Results should be empty.") self.assertEqual('123_00000001', self.stream.segment) def test_stream_complete(self): results = self.stream.read(0) self.assertEqual('', results, "Results should be empty.") self.assertTrue(self.stream.end_of_file) class SwiftMetadataTests(trove_testtools.TestCase): def setUp(self): super(SwiftMetadataTests, self).setUp() self.swift_client = FakeSwiftConnection() self.context = trove_testtools.TroveTestContext(self) self.create_swift_client_patch = patch.object( swift, 'create_swift_client', MagicMock(return_value=self.swift_client)) self.create_swift_client_mock = self.create_swift_client_patch.start() self.addCleanup(self.create_swift_client_patch.stop) self.swift = SwiftStorage(self.context) def tearDown(self): super(SwiftMetadataTests, self).tearDown() def test__get_attr(self): normal_header = self.swift._get_attr('content-type') self.assertEqual('content_type', normal_header) meta_header = self.swift._get_attr('x-object-meta-foo') self.assertEqual('foo', meta_header) meta_header_two = self.swift._get_attr('x-object-meta-foo-bar') self.assertEqual('foo_bar', meta_header_two) def test__set_attr(self): meta_header = self.swift._set_attr('foo') self.assertEqual('X-Object-Meta-foo', meta_header) meta_header_two = self.swift._set_attr('foo_bar') self.assertEqual('X-Object-Meta-foo-bar', meta_header_two) def test_load_metadata(self): location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' headers = { 'etag': '"fake-md5-sum"', 'x-object-meta-lsn': '1234567' } with patch.object(self.swift_client, 'head_object', return_value=headers): metadata = self.swift.load_metadata(location, 'fake-md5-sum') self.assertEqual({'lsn': '1234567'}, metadata) def test_save_metadata(self): location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' metadata = {'lsn': '1234567'} self.swift_client.post_object = Mock() self.swift.save_metadata(location, metadata=metadata) headers = { 'X-Object-Meta-lsn': '1234567', 'X-Object-Manifest': None } self.swift_client.post_object.assert_called_with( 'backups', 'mybackup.tar', headers=headers) trove-5.0.0/trove/tests/unittests/quota/0000775000567000056710000000000012701410521021501 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/quota/test_quota.py0000664000567000056710000006271512701410316024260 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock, MagicMock, patch from testtools import skipIf from trove.common import cfg from trove.common import exception from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from trove.quota.models import Quota from trove.quota.models import QuotaUsage from trove.quota.models import Reservation from trove.quota.models import Resource from trove.quota.quota import DbQuotaDriver from trove.quota.quota import QUOTAS from trove.quota.quota import run_with_quotas from trove.tests.unittests import trove_testtools """ Unit tests for the classes and functions in DbQuotaDriver.py. """ CONF = cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_tenant'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_tenant') } FAKE_TENANT1 = "123456" FAKE_TENANT2 = "654321" class Run_with_quotasTest(trove_testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback = Mock() QUOTAS.commit = Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f = Mock(side_effect=exception.TroveError()) self.assertRaises(exception.TroveError, run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(trove_testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp() context = MagicMock() context.is_admin = True req = MagicMock() req.environ = MagicMock() req.environ.get = MagicMock(return_value=context) self.req = req self.controller = QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown() def test_update_unknown_resource(self): body = {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota = MagicMock(spec=Quota) with patch.object(DatabaseModelBase, 'find_by', return_value=quota): body = {'quotas': {'instances': None}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) self.assertEqual(0, quota.save.call_count) self.assertEqual(200, result.status) def test_update_resource_instance(self): instance_quota = MagicMock(spec=Quota) with patch.object(DatabaseModelBase, 'find_by', return_value=instance_quota): body = {'quotas': {'instances': 2}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) self.assertEqual(1, instance_quota.save.call_count) self.assertTrue('instances' in result._data['quotas']) self.assertEqual(200, result.status) self.assertEqual(2, result._data['quotas']['instances']) @skipIf(not CONF.trove_volume_support, 'Volume support is not enabled') def test_update_resource_volume(self): instance_quota = MagicMock(spec=Quota) volume_quota = MagicMock(spec=Quota) def side_effect_func(*args, **kwargs): return (instance_quota if kwargs['resource'] == 'instances' else volume_quota) with patch.object(DatabaseModelBase, 'find_by', side_effect=side_effect_func): body = {'quotas': {'instances': None, 'volumes': 10}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) self.assertEqual(0, instance_quota.save.call_count) self.assertFalse('instances' in result._data['quotas']) self.assertEqual(1, volume_quota.save.call_count) self.assertEqual(200, result.status) self.assertEqual(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(trove_testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result = Mock() self.mock_usage_result = Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_tenant, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_tenant, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEqual(FAKE_TENANT1, quota.tenant_id) self.assertEqual(Resource.INSTANCES, quota.resource) self.assertEqual(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEqual(FAKE_TENANT1, quota.tenant_id) self.assertEqual(Resource.VOLUMES, quota.resource) self.assertEqual(CONF.max_volumes_per_tenant, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEqual(22, quotas[Resource.INSTANCES].hard_limit) self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEqual(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEqual(CONF.max_instances_per_tenant, quotas[Resource.INSTANCES].hard_limit) self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEqual(CONF.max_volumes_per_tenant, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEqual(22, quotas[Resource.INSTANCES].hard_limit) self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEqual(CONF.max_volumes_per_tenant, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEqual(FAKE_TENANT1, usage.tenant_id) self.assertEqual(Resource.VOLUMES, usage.resource) self.assertEqual(3, usage.in_use) self.assertEqual(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEqual(FAKE_TENANT1, usage.tenant_id) self.assertEqual(Resource.VOLUMES, usage.resource) self.assertEqual(0, usage.in_use) self.assertEqual(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEqual(2, usages[Resource.INSTANCES].in_use) self.assertEqual(1, usages[Resource.INSTANCES].reserved) self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEqual(1, usages[Resource.VOLUMES].in_use) self.assertEqual(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] def side_effect_func(*args, **kwargs): return (FAKE_QUOTAS[0] if kwargs['resource'] == 'instances' else FAKE_QUOTAS[1]) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=side_effect_func) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEqual(0, usages[Resource.INSTANCES].in_use) self.assertEqual(0, usages[Resource.INSTANCES].reserved) self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEqual(0, usages[Resource.VOLUMES].in_use) self.assertEqual(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEqual(0, usages[Resource.INSTANCES].in_use) self.assertEqual(0, usages[Resource.INSTANCES].reserved) self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEqual(0, usages[Resource.VOLUMES].in_use) self.assertEqual(0, usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() # Set up the deltas with the intention that after the reserve call # the deltas should match usage_id + 1 for both instances and volumes delta = {'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta) for _, kw in Reservation.create.call_args_list: self.assertEqual(kw['usage_id'] + 1, kw['delta']) self.assertEqual(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1, 'volumes': CONF.max_volumes_per_tenant + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() # Set up the deltas with the intention that after the reserve call # the deltas should match -usage_id for both instances and volumes delta = {'instances': -1, 'volumes': -2} self.driver.reserve(FAKE_TENANT1, resources, delta) for _, kw in Reservation.create.call_args_list: self.assertEqual(-kw['usage_id'], kw['delta']) self.assertEqual(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_commit_cannot_be_less_than_zero(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=-1)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=-1, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(0, FAKE_QUOTAS[0].in_use) self.assertEqual(0, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) def test_rollback(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[1].status) trove-5.0.0/trove/tests/unittests/quota/__init__.py0000664000567000056710000000000012701410316023602 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/configuration/0000775000567000056710000000000012701410521023217 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/configuration/__init__.py0000664000567000056710000000000012701410316025320 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/configuration/test_configuration_controller.py0000664000567000056710000002264112701410316031751 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from mock import MagicMock from trove.common import configurations from trove.common.exception import UnprocessableEntity from trove.configuration.service import ConfigurationsController from trove.extensions.mgmt.configuration import service from trove.tests.unittests import trove_testtools class TestConfigurationParser(trove_testtools.TestCase): def setUp(self): super(TestConfigurationParser, self).setUp() def test_parse_my_cnf_correctly(self): config = """ [mysqld] pid-file = /var/run/mysqld/mysqld.pid connect_timeout = 15 # we need to test no value params skip-external-locking ;another comment !includedir /etc/mysql/conf.d/ """ cfg_parser = configurations.MySQLConfParser(config) parsed = cfg_parser.parse() d_parsed = dict(parsed) self.assertIsNotNone(d_parsed) self.assertEqual("/var/run/mysqld/mysqld.pid", d_parsed["pid-file"]) self.assertEqual('15', d_parsed["connect_timeout"]) self.assertEqual('1', d_parsed["skip-external-locking"]) class TestConfigurationController(trove_testtools.TestCase): def setUp(self): super(TestConfigurationController, self).setUp() self.controller = ConfigurationsController() def test_validate_create_configuration(self): body = { "configuration": { "values": {}, "name": "test", "datastore": { "type": "test_type", "version": "test_version" } } } schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_configuration_no_datastore(self): body = { "configuration": { "values": {}, "name": "test" } } schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_invalid_values_param(self): body = { "configuration": { "values": '', "name": "test", "datastore": { "type": "test_type", "version": "test_version" } } } schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertIn("'' is not of type 'object'", error_messages) def test_validate_create_invalid_name_param(self): body = { "configuration": { "values": {}, "name": "", "datastore": { "type": "test_type", "version": "test_version" } } } schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertIn("'' is too short", error_messages) def test_validate_edit_configuration(self): body = { "configuration": { "values": {} } } schema = self.controller.get_schema('edit', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def _test_validate_configuration(self, input_values, config_rules=None): if config_rules is None: config_val1 = MagicMock() config_val1.name = 'max_connections' config_val1.restart_required = 'false' config_val1.datastore_version_id = 5.5 config_val1.max = 1 config_val1.min = 0 config_val1.data_type = 'integer' config_rules = [config_val1] data_version = MagicMock() data_version.id = 42 data_version.name = 5.5 data_version.datastore_name = 'test' self.assertRaises(UnprocessableEntity, ConfigurationsController._validate_configuration, input_values, data_version, config_rules) def test_validate_configuration_with_no_rules(self): self._test_validate_configuration({'max_connections': 5}, []) def test_validate_configuration_with_invalid_param(self): self._test_validate_configuration({'test': 5}) def test_validate_configuration_with_invalid_type(self): self._test_validate_configuration({'max_connections': '1'}) def test_validate_configuration_with_invalid_max(self): self._test_validate_configuration({'max_connections': 5}) def test_validate_configuration_with_invalid_min(self): self._test_validate_configuration({'max_connections': -1}) def test_validate_long_value(self): config_val1 = MagicMock() config_val1.name = 'myisam_sort_buffer_size' config_val1.max_size = 18446744073709551615 config_val1.min_size = 4096 config_val1.data_type = 'integer' config_rules = [config_val1] ConfigurationsController._validate_configuration( {'myisam_sort_buffer_size': 18446744073709551615}, None, config_rules) class TestConfigurationsParameterController(trove_testtools.TestCase): def setUp(self): super(TestConfigurationsParameterController, self).setUp() self.controller = service.ConfigurationsParameterController() def test_validate_create_configuration_param(self): body = { 'configuration-parameter': { 'name': 'test', 'restart_required': 1, 'data_type': 'string', 'min': '0', 'max': '255' } } schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_invalid_restart_required(self): body = { 'configuration-parameter': { 'name': 'test', 'restart_required': 5, 'data_type': 'string', 'min': 0, 'max': 255 } } schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertIn("5 is greater than the maximum of 1", error_messages) self.assertIn("0 is not of type 'string'", error_messages) self.assertIn("255 is not of type 'string'", error_messages) def test_validate_create_invalid_restart_required_2(self): body = { 'configuration-parameter': { 'name': 'test', 'restart_required': -1, 'data_type': 'string', 'min': '0', 'max': '255' } } schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertIn("-1 is less than the minimum of 0", error_messages) def test_validate_create_invalid_restart_required_3(self): body = { 'configuration-parameter': { 'name': 'test', 'restart_required': 'yes', 'data_type': 'string', 'min': '0', 'max': '255' } } schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertIn("'yes' is not of type 'integer'", error_messages) trove-5.0.0/trove/tests/unittests/upgrade/0000775000567000056710000000000012701410521021777 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/upgrade/__init__.py0000664000567000056710000000000012701410316024100 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/upgrade/test_models.py0000664000567000056710000000626512701410316024706 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import patch from trove.extensions.mgmt.upgrade.models import UpgradeMessageSender from trove import rpc from trove.tests.unittests import trove_testtools class TestUpgradeModel(trove_testtools.TestCase): def setUp(self): super(TestUpgradeModel, self).setUp() def tearDown(self): super(TestUpgradeModel, self).tearDown() def test_validate(self): """ Test validation method """ param = None self.assertRaises( ValueError, UpgradeMessageSender._validate, param, 36) param = '' self.assertRaises( ValueError, UpgradeMessageSender._validate, param, 36) param = '7169f46a-ac53-401a-ba35-f461db948b8c7' self.assertRaises( ValueError, UpgradeMessageSender._validate, param, 36) param = '7169f46a-ac53-401a-ba35-f461db948b8c' self.assertTrue(UpgradeMessageSender._validate(param, 36)) param = '7169f46a-ac53-401a-ba35' self.assertTrue(UpgradeMessageSender._validate(param, 36)) def test_create(self): self._assert_create_with_metadata() def test_create_with_metadata_none(self): self._assert_create_with_metadata(metadata=None) def test_create_with_empty_metadata(self): self._assert_create_with_metadata(metadata={}) def test_create_with_metadata(self): self._assert_create_with_metadata( metadata={"is_public": True, "is_encrypted": True, "config_location": "http://swift/trove-guestagent.conf"}) @patch('trove.guestagent.api.API.upgrade') @patch.object(rpc, 'get_client') def _assert_create_with_metadata(self, mock_client, api_upgrade_mock, metadata=None): """Exercise UpgradeMessageSender.create() call. """ context = trove_testtools.TroveTestContext(self) instance_id = "27e25b73-88a1-4526-b2b9-919a28b8b33f" instance_version = "v1.0.1" location = "http://swift/trove-guestagent-v1.0.1.tar.gz" func = (UpgradeMessageSender.create( context, instance_id, instance_version, location, metadata) if metadata is not None else UpgradeMessageSender.create( context, instance_id, instance_version, location)) self.assertTrue(callable(func)) func() # This call should translate to the API call asserted below. api_upgrade_mock.assert_called_once_with(instance_version, location, metadata) trove-5.0.0/trove/tests/unittests/upgrade/test_controller.py0000664000567000056710000001157112701410316025602 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from mock import Mock, MagicMock, patch from trove.extensions.mgmt.upgrade.models import UpgradeMessageSender from trove.extensions.mgmt.upgrade.service import UpgradeController from trove.tests.unittests import trove_testtools class TestUpgradeController(trove_testtools.TestCase): def setUp(self): super(TestUpgradeController, self).setUp() self.controller = UpgradeController() self.body = { "upgrade": { "instance_id": "27e25b73-88a1-4526-b2b9-919a28b8b33f", "instance_version": "v1.0.1", "location": "http://swift/trove-guestagent-v1.0.1.tar.gz"} } def tearDown(self): super(TestUpgradeController, self).tearDown() self.body = {} def _get_validator(self, body): """ Helper method to return a validator """ schema = self.controller.get_schema('create', body) return jsonschema.Draft4Validator(schema) def test_validate_create(self): """ Test for valid payload in body """ validator = self._get_validator(self.body) self.assertTrue(validator.is_valid(self.body)) def test_validate_create_additional_params(self): """ Test for valid payload with additional params """ self.body["upgrade"]["description"] = "upgrade" validator = self._get_validator(self.body) self.assertTrue(validator.is_valid(self.body)) @patch.object(UpgradeMessageSender, 'create', Mock(return_value=Mock())) def test_controller_with_no_metadata(self): """ Test the mock controller w/out metadata """ tenant_id = '77889991010' instance_id = '27e25b73-88a1-4526-b2b9-919a28b8b33f' context = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) resp = self.controller.create(req, self.body, tenant_id, instance_id) instance_version = self.body["upgrade"]["instance_version"] location = self.body["upgrade"]["location"] metadata = None UpgradeMessageSender.create.assert_called_once_with( context, instance_id, instance_version, location, metadata) self.assertEqual(202, resp.status) @patch.object(UpgradeMessageSender, 'create', Mock(return_value=Mock())) def test_controller_with_metadata(self): """ Test the mock controller with metadata """ tenant_id = '77889991010' instance_id = '27e25b73-88a1-4526-b2b9-919a28b8b33f' context = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) # append the body w/ metadata self.body["upgrade"]["metadata"] = { "config_location": "swift://my.conf.location", "is_public": True, "is_encypted": True} resp = self.controller.create(req, self.body, tenant_id, instance_id) instance_version = self.body["upgrade"]["instance_version"] location = self.body["upgrade"]["location"] metadata = self.body["upgrade"]["metadata"] UpgradeMessageSender.create.assert_called_once_with( context, instance_id, instance_version, location, metadata) self.assertEqual(202, resp.status) @patch.object(UpgradeMessageSender, 'create', Mock(return_value=Mock())) def test_controller_with_empty_metadata(self): """ Test the mock controller with metadata """ tenant_id = '77889991010' instance_id = '27e25b73-88a1-4526-b2b9-919a28b8b33f' context = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) # append the body w/ empty metadata self.body["upgrade"]["metadata"] = {} resp = self.controller.create(req, self.body, tenant_id, instance_id) instance_version = self.body["upgrade"]['instance_version'] location = self.body["upgrade"]["location"] metadata = self.body["upgrade"]["metadata"] UpgradeMessageSender.create.assert_called_once_with( context, instance_id, instance_version, location, metadata) self.assertEqual(202, resp.status) trove-5.0.0/trove/tests/unittests/mgmt/0000775000567000056710000000000012701410521021314 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/mgmt/test_datastore_controller.py0000664000567000056710000001546212701410316027170 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from mock import Mock, patch, MagicMock, PropertyMock from testtools.matchers import Is, Equals from trove.common import exception from trove.common import remote from trove.datastore import models as datastore_models from trove.extensions.mgmt.datastores.service import DatastoreVersionController from trove.tests.unittests import trove_testtools class TestDatastoreVersionController(trove_testtools.TestCase): def setUp(self): super(TestDatastoreVersionController, self).setUp() self.controller = DatastoreVersionController() self.version = { "version": { "datastore_name": "test_dsx", "name": "test_vr1", "datastore_manager": "mysql", "image": "154b350d-4d86-4214-9067-9c54b230c0da", "packages": ["mysql-server-5.6"], "active": True, "default": False } } self.tenant_id = Mock() context = trove_testtools.TroveTestContext(self) self.req = Mock() self.req.environ = Mock() self.req.environ.__getitem__ = Mock(return_value=context) def test_get_schema_create(self): schema = self.controller.get_schema('create', self.version) self.assertIsNotNone(schema) self.assertTrue('version' in schema['properties']) def test_validate_create(self): body = self.version schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.version body['version']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.version body['version']['datastore_name'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) @patch.object(remote, 'create_nova_client') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load', side_effect=exception.DatastoreVersionNotFound) @patch.object(datastore_models, 'update_datastore_version') def test_create_datastore_versions(self, mock_ds_version_create, mock_ds_version_load, mock_ds_load, mock_nova_client): body = self.version mock_ds_load.return_value.name = 'test_dsx' self.controller.create(self.req, body, self.tenant_id) mock_ds_version_create.assert_called_with( 'test_dsx', 'test_vr1', 'mysql', '154b350d-4d86-4214-9067-9c54b230c0da', 'mysql-server-5.6', True) @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_show_ds_version(self, mock_ds_version_load): id = Mock() self.controller.show(self.req, self.tenant_id, id) mock_ds_version_load.assert_called_with(id) @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_delete_ds_version(self, mock_ds_version_load, mock_ds_load): ds_version_id = Mock() ds_version = Mock() mock_ds_version_load.return_value = ds_version self.controller.delete(self.req, self.tenant_id, ds_version_id) ds_version.delete.assert_called_with() @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(datastore_models.DatastoreVersions, 'load_all') def test_index_ds_version(self, mock_ds_version_load_all, mock_ds_version_load_by_uuid): mock_id = Mock() mock_ds_version = Mock() mock_ds_version.id = mock_id mock_ds_version_load_all.return_value = [mock_ds_version] self.controller.index(self.req, self.tenant_id) mock_ds_version_load_all.assert_called_with(only_active=False) mock_ds_version_load_by_uuid.assert_called_with(mock_id) @patch.object(remote, 'create_nova_client') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(datastore_models, 'update_datastore_version') def test_edit_datastore_versions(self, mock_ds_version_update, mock_ds_version_load, mock_nova_client): body = {'image': '21c8805a-a800-4bca-a192-3a5a2519044d'} mock_ds_version = MagicMock() type(mock_ds_version).datastore_name = PropertyMock( return_value=self.version['version']['datastore_name']) type(mock_ds_version).name = PropertyMock( return_value=self.version['version']['name']) type(mock_ds_version).image_id = PropertyMock( return_value=self.version['version']['image']) type(mock_ds_version).packages = PropertyMock( return_value=self.version['version']['packages']) type(mock_ds_version).active = PropertyMock( return_value=self.version['version']['active']) type(mock_ds_version).manager = PropertyMock( return_value=self.version['version']['datastore_manager']) mock_ds_version_load.return_value = mock_ds_version self.controller.edit(self.req, body, self.tenant_id, Mock()) mock_ds_version_update.assert_called_with( 'test_dsx', 'test_vr1', 'mysql', '21c8805a-a800-4bca-a192-3a5a2519044d', 'mysql-server-5.6', True) trove-5.0.0/trove/tests/unittests/mgmt/test_datastores.py0000664000567000056710000001631012701410316025101 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import Mock, patch from novaclient import exceptions as nova_exceptions from trove.common import exception from trove.common import remote from trove.datastore import models from trove.extensions.mgmt.datastores.service import DatastoreVersionController from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class TestDatastoreVersion(trove_testtools.TestCase): def setUp(self): super(TestDatastoreVersion, self).setUp() util.init_db() models.update_datastore(name='test_ds', default_version=None) models.update_datastore_version( 'test_ds', 'test_vr1', 'mysql', '154b350d-4d86-4214-9067-9c54b230c0da', 'pkg-1', '1') models.update_datastore_version( 'test_ds', 'test_vr2', 'mysql', '154b350d-4d86-4214-9067-9c54b230c0da', 'pkg-1', '1') self.ds = models.Datastore.load('test_ds') self.ds_version2 = models.DatastoreVersion.load(self.ds, 'test_vr2') self.context = trove_testtools.TroveTestContext(self) self.req = Mock() self.req.environ = Mock() self.req.environ.__getitem__ = Mock(return_value=self.context) self.tenant_id = Mock() self.version_controller = DatastoreVersionController() def tearDown(self): super(TestDatastoreVersion, self).tearDown() @patch.object(remote, 'create_nova_client') def test_version_create(self, mock_nova_client): body = {"version": { "datastore_name": "test_ds", "name": "test_vr", "datastore_manager": "mysql", "image": "image-id", "packages": "test-pkg", "active": True, "default": True}} output = self.version_controller.create( self.req, body, self.tenant_id) self.assertEqual(202, output.status) @patch.object(remote, 'create_nova_client') @patch.object(models.DatastoreVersion, 'load') def test_fail_already_exists_version_create(self, mock_load, mock_nova_client): body = {"version": { "datastore_name": "test_ds", "name": "test_new_vr", "datastore_manager": "mysql", "image": "image-id", "packages": "test-pkg", "active": True, "default": True}} self.assertRaisesRegexp( exception.DatastoreVersionAlreadyExists, "A datastore version with the name 'test_new_vr' already exists", self.version_controller.create, self.req, body, self.tenant_id) @patch.object(remote, 'create_nova_client') def test_fail_image_not_found_version_create(self, mock_nova_client): mock_nova_client.return_value.images.get = Mock( side_effect=nova_exceptions.NotFound(404, "Image id not found image-id" )) body = {"version": { "datastore_name": "test_ds", "name": "test_vr", "datastore_manager": "mysql", "image": "image-id", "packages": "test-pkg", "active": True, "default": True}} self.assertRaisesRegexp( exception.ImageNotFound, "Image image-id cannot be found.", self.version_controller.create, self.req, body, self.tenant_id) def test_version_delete(self): ds_version1 = models.DatastoreVersion.load(self.ds, 'test_vr1') output = self.version_controller.delete(self.req, self.tenant_id, ds_version1.id) err_msg = ("Datastore version '%s' cannot be found." % ds_version1.id) self.assertEqual(202, output.status) # Try to find deleted version, this should raise exception. self.assertRaisesRegexp( exception.DatastoreVersionNotFound, err_msg, models.DatastoreVersion.load_by_uuid, ds_version1.id) @patch.object(remote, 'create_nova_client') def test_version_update(self, mock_client): body = {"image": "c022f4dc-76ed-4e3f-a25e-33e031f43f8b"} output = self.version_controller.edit(self.req, body, self.tenant_id, self.ds_version2.id) self.assertEqual(202, output.status) # Find the details of version updated and match the updated attribute. test_ds_version = models.DatastoreVersion.load_by_uuid( self.ds_version2.id) self.assertEqual(body['image'], test_ds_version.image_id) @patch.object(remote, 'create_nova_client') def test_version_update_fail_image_not_found(self, mock_nova_client): mock_nova_client.return_value.images.get = Mock( side_effect=nova_exceptions.NotFound(404, "Image id not found image-id" )) body = {"image": "non-existent-image-id"} self.assertRaisesRegexp( exception.ImageNotFound, "Image non-existent-image-id cannot be found.", self.version_controller.edit, self.req, body, self.tenant_id, self.ds_version2.id) @patch.object(models.DatastoreVersion, 'load_by_uuid') def test_version_index(self, mock_load): output = self.version_controller.index( self.req, self.tenant_id) self.assertEqual(200, output.status) def test_version_show(self): output = self.version_controller.show( self.req, self.tenant_id, self.ds_version2.id) self.assertEqual(200, output.status) self.assertEqual(self.ds_version2.id, output._data['version']['id']) self.assertEqual(self.ds_version2.name, output._data['version']['name']) self.assertEqual(self.ds_version2.datastore_id, output._data['version']['datastore_id']) self.assertEqual(self.ds_version2.datastore_name, output._data['version']['datastore_name']) self.assertEqual(self.ds_version2.manager, output._data['version']['datastore_manager']) self.assertEqual(self.ds_version2.image_id, output._data['version']['image']) self.assertEqual(self.ds_version2.packages.split(','), output._data['version']['packages']) self.assertEqual(self.ds_version2.active, output._data['version']['active']) trove-5.0.0/trove/tests/unittests/mgmt/__init__.py0000664000567000056710000000000012701410316023415 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/mgmt/test_models.py0000664000567000056710000005022412701410316024215 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import uuid from mock import MagicMock, patch, ANY from novaclient.client import Client from novaclient.v2.flavors import FlavorManager, Flavor from novaclient.v2.servers import Server, ServerManager from oslo_config import cfg from testtools.matchers import Equals, Is, Not from trove.backup.models import Backup from trove.common import exception from trove.common import instance as rd_instance from trove.common import remote from trove.datastore import models as datastore_models import trove.extensions.mgmt.instances.models as mgmtmodels from trove.guestagent.api import API from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.instance.tasks import InstanceTasks from trove import rpc from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util CONF = cfg.CONF class MockMgmtInstanceTest(trove_testtools.TestCase): @classmethod def setUpClass(cls): util.init_db() cls.version_id = str(uuid.uuid4()) cls.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='mysql' + str(uuid.uuid4()), default_version_id=cls.version_id ) cls.version = datastore_models.DBDatastoreVersion.create( id=cls.version_id, datastore_id=cls.datastore.id, name='5.5' + str(uuid.uuid4()), manager='mysql', image_id=str(uuid.uuid4()), active=1, packages="mysql-server-5.5" ) super(MockMgmtInstanceTest, cls).setUpClass() @classmethod def tearDownClass(cls): cls.version.delete() cls.datastore.delete() super(MockMgmtInstanceTest, cls).tearDownClass() def setUp(self): self.context = trove_testtools.TroveTestContext(self) self.context.auth_token = 'some_secret_password' self.client = MagicMock(spec=Client) self.server_mgr = MagicMock(spec=ServerManager) self.client.servers = self.server_mgr self.flavor_mgr = MagicMock(spec=FlavorManager) self.client.flavors = self.flavor_mgr self.admin_client_patch = patch.object( remote, 'create_admin_nova_client', return_value=self.client) self.addCleanup(self.admin_client_patch.stop) self.admin_client_patch.start() CONF.set_override('host', '127.0.0.1', enforce_type=True) CONF.set_override('exists_notification_interval', 1, enforce_type=True) CONF.set_override('notification_service_id', {'mysql': '123'}, enforce_type=True) super(MockMgmtInstanceTest, self).setUp() def do_cleanup(self, instance, status): instance.delete() status.delete() def build_db_instance(self, status, task_status=InstanceTasks.NONE): instance = DBInstance(InstanceTasks.NONE, name='test_name', id=str(uuid.uuid4()), flavor_id='flavor_1', datastore_version_id=self.version.id, compute_instance_id='compute_id_1', server_id='server_id_1', tenant_id='tenant_id_1', server_status=rd_instance.ServiceStatuses. BUILDING.api_status, deleted=False) instance.save() service_status = InstanceServiceStatus( rd_instance.ServiceStatuses.RUNNING, id=str(uuid.uuid4()), instance_id=instance.id, ) service_status.save() instance.set_task_status(task_status) instance.server_status = status instance.save() return instance, service_status class TestNotificationTransformer(MockMgmtInstanceTest): @classmethod def setUpClass(cls): super(TestNotificationTransformer, cls).setUpClass() @patch('trove.instance.models.LOG') def test_transformer(self, mock_logging): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, InstanceTasks.BUILDING) payloads = mgmtmodels.NotificationTransformer( context=self.context)() self.assertIsNotNone(payloads) payload = payloads[0] self.assertThat(payload['audit_period_beginning'], Not(Is(None))) self.assertThat(payload['audit_period_ending'], Not(Is(None))) self.assertTrue(status.lower() in [db['state'] for db in payloads]) self.addCleanup(self.do_cleanup, instance, service_status) def test_get_service_id(self): id_map = { 'mysql': '123', 'percona': 'abc' } transformer = mgmtmodels.NotificationTransformer(context=self.context) self.assertThat(transformer._get_service_id('mysql', id_map), Equals('123')) @patch('trove.extensions.mgmt.instances.models.LOG') def test_get_service_id_unknown(self, mock_logging): id_map = { 'mysql': '123', 'percona': 'abc' } transformer = mgmtmodels.NotificationTransformer(context=self.context) self.assertThat(transformer._get_service_id('m0ng0', id_map), Equals('unknown-service-id-error')) class TestNovaNotificationTransformer(MockMgmtInstanceTest): @classmethod def setUpClass(cls): super(TestNovaNotificationTransformer, cls).setUpClass() def test_transformer_cache(self): flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' with patch.object(self.flavor_mgr, 'get', return_value=flavor): transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) transformer2 = mgmtmodels.NovaNotificationTransformer( context=self.context) self.assertThat(transformer._flavor_cache, Not(Is(transformer2._flavor_cache))) def test_lookup_flavor(self): flavor = MagicMock(spec=Flavor) flavor.name = 'flav_1' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(self.flavor_mgr, 'get', side_effect=[flavor, None]): self.assertThat(transformer._lookup_flavor('1'), Equals(flavor.name)) self.assertThat(transformer._lookup_flavor('2'), Equals('unknown')) def test_transformer(self): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, InstanceTasks.BUILDING) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' server = MagicMock(spec=Server) server.user_id = 'test_user_id' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): payloads = transformer() self.assertIsNotNone(payloads) payload = payloads[0] self.assertThat(payload['audit_period_beginning'], Not(Is(None))) self.assertThat(payload['audit_period_ending'], Not(Is(None))) self.assertThat(payload['state'], Not(Is(None))) self.assertThat(payload['instance_type'], Equals('db.small')) self.assertThat(payload['instance_type_id'], Equals('flavor_1')) self.assertThat(payload['user_id'], Equals('test_user_id')) self.assertThat(payload['service_id'], Equals('123')) self.addCleanup(self.do_cleanup, instance, service_status) @patch('trove.extensions.mgmt.instances.models.LOG') def test_transformer_invalid_datastore_manager(self, mock_logging): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, InstanceTasks.BUILDING) version = datastore_models.DBDatastoreVersion.get_by( id=instance.datastore_version_id) version.update(manager='something invalid') server = MagicMock(spec=Server) server.user_id = 'test_user_id' flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): payloads = transformer() # assertions self.assertIsNotNone(payloads) payload = payloads[0] self.assertThat(payload['audit_period_beginning'], Not(Is(None))) self.assertThat(payload['audit_period_ending'], Not(Is(None))) self.assertIn(status.lower(), [db['state'] for db in payloads]) self.assertThat(payload['instance_type'], Equals('db.small')) self.assertThat(payload['instance_type_id'], Equals('flavor_1')) self.assertThat(payload['user_id'], Equals('test_user_id')) self.assertThat(payload['service_id'], Equals('unknown-service-id-error')) version.update(manager='mysql') self.addCleanup(self.do_cleanup, instance, service_status) def test_transformer_shutdown_instance(self): status = rd_instance.ServiceStatuses.SHUTDOWN.api_status instance, service_status = self.build_db_instance(status) service_status.set_status(rd_instance.ServiceStatuses.SHUTDOWN) server = MagicMock(spec=Server) server.user_id = 'test_user_id' mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(Backup, 'running', return_value=None): self.assertThat(mgmt_instance.status, Equals('SHUTDOWN')) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): payloads = transformer() # assertion that SHUTDOWN instances are not reported self.assertIsNotNone(payloads) self.assertNotIn(status.lower(), [db['status'] for db in payloads]) self.addCleanup(self.do_cleanup, instance, service_status) def test_transformer_no_nova_instance(self): status = rd_instance.ServiceStatuses.SHUTDOWN.api_status instance, service_status = self.build_db_instance(status) service_status.set_status(rd_instance.ServiceStatuses.SHUTDOWN) mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, None, service_status) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(Backup, 'running', return_value=None): self.assertThat(mgmt_instance.status, Equals('SHUTDOWN')) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): payloads = transformer() # assertion that SHUTDOWN instances are not reported self.assertIsNotNone(payloads) self.assertNotIn(status.lower(), [db['status'] for db in payloads]) self.addCleanup(self.do_cleanup, instance, service_status) def test_transformer_flavor_cache(self): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, InstanceTasks.BUILDING) server = MagicMock(spec=Server) server.user_id = 'test_user_id' mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): transformer() payloads = transformer() self.assertIsNotNone(payloads) self.assertThat(len(payloads), Equals(1)) payload = payloads[0] self.assertThat(payload['audit_period_beginning'], Not(Is(None))) self.assertThat(payload['audit_period_ending'], Not(Is(None))) self.assertIn(status.lower(), [db['state'] for db in payloads]) self.assertThat(payload['instance_type'], Equals('db.small')) self.assertThat(payload['instance_type_id'], Equals('flavor_1')) self.assertThat(payload['user_id'], Equals('test_user_id')) # ensure cache was used to get flavor second time self.flavor_mgr.get.assert_any_call('flavor_1') self.addCleanup(self.do_cleanup, instance, service_status) class TestMgmtInstanceTasks(MockMgmtInstanceTest): @classmethod def setUpClass(cls): super(TestMgmtInstanceTasks, cls).setUpClass() def test_public_exists_events(self): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, task_status=InstanceTasks.BUILDING) server = MagicMock(spec=Server) server.user_id = 'test_user_id' mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' notifier = MagicMock() with patch.object(rpc, 'get_notifier', return_value=notifier): with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): self.assertThat(self.context.auth_token, Is('some_secret_password')) with patch.object(notifier, 'info', return_value=None): # invocation mgmtmodels.publish_exist_events( mgmtmodels.NovaNotificationTransformer( context=self.context), self.context) # assertion notifier.info.assert_any_call( self.context, 'trove.instance.exists', ANY) self.assertThat(self.context.auth_token, Is(None)) self.addCleanup(self.do_cleanup, instance, service_status) class TestMgmtInstanceDeleted(MockMgmtInstanceTest): def test_show_deleted_mgmt_instances(self): args = {'deleted': 0, 'cluster_id': None} db_infos_active = DBInstance.find_all(**args) args = {'deleted': 1, 'cluster_id': None} db_infos_deleted = DBInstance.find_all(**args) args = {'cluster_id': None} # db_infos_all = DBInstance.find_all(**args) # TODO(SlickNik) Fix this assert to work reliably in the gate. # This fails intermittenly when the unit tests run in parallel. # self.assertTrue(db_infos_all.count() == # db_infos_active.count() + # db_infos_deleted.count()) with patch.object(self.context, 'is_admin', return_value=True): deleted_instance = db_infos_deleted.all()[0] if len( db_infos_deleted.all()) > 0 else None active_instance = db_infos_active.all()[0] if len( db_infos_active.all()) > 0 else None if active_instance: instance = DBInstance.find_by(context=self.context, id=active_instance.id) self.assertEqual(active_instance.id, instance.id) if deleted_instance: self.assertRaises( exception.ModelNotFoundError, DBInstance.find_by, context=self.context, id=deleted_instance.id, deleted=False) instance = DBInstance.find_by(context=self.context, id=deleted_instance.id, deleted=True) self.assertEqual(deleted_instance.id, instance.id) class TestMgmtInstancePing(MockMgmtInstanceTest): def test_rpc_ping(self): status = rd_instance.ServiceStatuses.RUNNING.api_status instance, service_status = self.build_db_instance( status, task_status=InstanceTasks.NONE) mgmt_instance = mgmtmodels.MgmtInstance(instance, instance, None, service_status) with patch.object(API, 'rpc_ping', return_value=True): with patch.object(API, 'get_client'): self.assertTrue(mgmt_instance.rpc_ping()) self.addCleanup(self.do_cleanup, instance, service_status) trove-5.0.0/trove/tests/unittests/mgmt/test_clusters.py0000664000567000056710000000721012701410316024573 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import Mock, patch from trove.common import exception from trove.extensions.mgmt.clusters.models import MgmtCluster from trove.extensions.mgmt.clusters.service import MgmtClusterController from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.req = Mock() self.req.environ = Mock() self.req.environ.__getitem__ = Mock(return_value=self.context) mock_cluster1 = Mock() mock_cluster1.datastore_version.manager = 'vertica' mock_cluster1.instances = [] mock_cluster1.instances_without_server = [] mock_cluster2 = Mock() mock_cluster2.datastore_version.manager = 'vertica' mock_cluster2.instances = [] mock_cluster2.instances_without_server = [] self.mock_clusters = [mock_cluster1, mock_cluster2] self.controller = MgmtClusterController() def tearDown(self): super(TestClusterController, self).tearDown() def test_get_action_schema(self): body = {'do_stuff': {}} action_schema = Mock() action_schema.get = Mock() self.controller.get_action_schema(body, action_schema) action_schema.get.assert_called_with('do_stuff', {}) @patch.object(MgmtCluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() mock_cluster_load.return_value = self.mock_clusters[0] self.controller.show(self.req, tenant_id, id) mock_cluster_load.assert_called_with(self.context, id) @patch.object(MgmtCluster, 'load_all') def test_index_cluster(self, mock_cluster_load_all): tenant_id = Mock() mock_cluster_load_all.return_value = self.mock_clusters self.controller.index(self.req, tenant_id) mock_cluster_load_all.assert_called_with(self.context, deleted=None) @patch.object(MgmtCluster, 'load') def test_controller_action_found(self, mock_cluster_load): body = {'reset-task': {}} tenant_id = Mock() id = Mock() mock_cluster_load.return_value = self.mock_clusters[0] result = self.controller.action(self.req, body, tenant_id, id) self.assertEqual(202, result.status) self.assertIsNotNone(result.data) def test_controller_no_body_action_found(self): tenant_id = Mock() id = Mock() self.assertRaisesRegexp( exception.BadRequest, 'Invalid request body.', self.controller.action, self.req, None, tenant_id, id) @patch.object(MgmtCluster, 'load') def test_controller_invalid_action_found(self, mock_cluster_load): body = {'do_stuff': {}} tenant_id = Mock() id = Mock() mock_cluster_load.return_value = self.mock_clusters[0] self.assertRaisesRegexp( exception.BadRequest, 'Invalid cluster action requested.', self.controller.action, self.req, body, tenant_id, id) trove-5.0.0/trove/tests/unittests/util/0000775000567000056710000000000012701410521021325 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/util/matchers.py0000664000567000056710000001473212701410316023516 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Matcher classes to be used inside of the testtools assertThat framework.""" import pprint class DictKeysMismatch(object): def __init__(self, d1only, d2only): self.d1only = d1only self.d2only = d2only def describe(self): return ('Keys in d1 and not d2: %(d1only)s.' ' Keys in d2 and not d1: %(d2only)s' % self.__dict__) def get_details(self): return {} class DictMismatch(object): def __init__(self, key, d1_value, d2_value): self.key = key self.d1_value = d1_value self.d2_value = d2_value def describe(self): return ("Dictionaries do not match at %(key)s." " d1: %(d1_value)s d2: %(d2_value)s" % self.__dict__) def get_details(self): return {} class DictMatches(object): def __init__(self, d1, approx_equal=False, tolerance=0.001): self.d1 = d1 self.approx_equal = approx_equal self.tolerance = tolerance def __str__(self): return 'DictMatches(%s)' % (pprint.pformat(self.d1)) # Useful assertions def match(self, d2): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested dictionaries appropriately. NOTE: If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. """ d1keys = set(self.d1.keys()) d2keys = set(d2.keys()) if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys return DictKeysMismatch(d1only, d2only) for key in d1keys: d1value = self.d1[key] d2value = d2[key] try: error = abs(float(d1value) - float(d2value)) within_tolerance = error <= self.tolerance except (ValueError, TypeError): # If both values aren't convertible to float, just ignore # ValueError if arg is a str, TypeError if it's something else # (like None) within_tolerance = False if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): matcher = DictMatches(d1value) did_match = matcher.match(d2value) if did_match is not None: return did_match elif 'DONTCARE' in (d1value, d2value): continue elif self.approx_equal and within_tolerance: continue elif d1value != d2value: return DictMismatch(key, d1value, d2value) class ListLengthMismatch(object): def __init__(self, len1, len2): self.len1 = len1 self.len2 = len2 def describe(self): return ('Length mismatch: len(L1)=%(len1)d != ' 'len(L2)=%(len2)d' % self.__dict__) def get_details(self): return {} class DictListMatches(object): def __init__(self, l1, approx_equal=False, tolerance=0.001): self.l1 = l1 self.approx_equal = approx_equal self.tolerance = tolerance def __str__(self): return 'DictListMatches(%s)' % (pprint.pformat(self.l1)) # Useful assertions def match(self, l2): """Assert a list of dicts are equivalent.""" l1count = len(self.l1) l2count = len(l2) if l1count != l2count: return ListLengthMismatch(l1count, l2count) for d1, d2 in zip(self.l1, l2): matcher = DictMatches(d2, approx_equal=self.approx_equal, tolerance=self.tolerance) did_match = matcher.match(d1) if did_match: return did_match class SubDictMismatch(object): def __init__(self, key=None, sub_value=None, super_value=None, keys=False): self.key = key self.sub_value = sub_value self.super_value = super_value self.keys = keys def describe(self): if self.keys: return "Keys between dictionaries did not match" else: return ("Dictionaries do not match at %s. d1: %s d2: %s" % (self.key, self.super_value, self.sub_value)) def get_details(self): return {} class IsSubDictOf(object): def __init__(self, super_dict): self.super_dict = super_dict def __str__(self): return 'IsSubDictOf(%s)' % (self.super_dict) def match(self, sub_dict): """Assert a sub_dict is subset of super_dict.""" if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())): return SubDictMismatch(keys=True) for k, sub_value in sub_dict.items(): super_value = self.super_dict[k] if isinstance(sub_value, dict): matcher = IsSubDictOf(super_value) did_match = matcher.match(sub_value) if did_match is not None: return did_match elif 'DONTCARE' in (sub_value, super_value): continue else: if sub_value != super_value: return SubDictMismatch(k, sub_value, super_value) class FunctionCallMatcher(object): def __init__(self, expected_func_calls): self.expected_func_calls = expected_func_calls self.actual_func_calls = [] def call(self, *args, **kwargs): func_call = {'args': args, 'kwargs': kwargs} self.actual_func_calls.append(func_call) def match(self): dict_list_matcher = DictListMatches(self.expected_func_calls) return dict_list_matcher.match(self.actual_func_calls) trove-5.0.0/trove/tests/unittests/util/__init__.py0000664000567000056710000000000012701410316023426 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/util/util.py0000664000567000056710000000165012701410316022660 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. DB_SETUP = None def init_db(): global DB_SETUP if DB_SETUP: return from trove.common import cfg from trove.db import get_db_api from trove.db.sqlalchemy import session CONF = cfg.CONF db_api = get_db_api() db_api.db_sync(CONF) session.configure_db(CONF) DB_SETUP = True trove-5.0.0/trove/tests/unittests/instance/0000775000567000056710000000000012701410521022154 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/instance/__init__.py0000664000567000056710000000000012701410316024255 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/instance/test_instance_models.py0000664000567000056710000002672112701410316026746 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # Copyright 2014 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from mock import Mock, patch from trove.backup import models as backup_models from trove.common import cfg from trove.common import exception from trove.common.instance import ServiceStatuses from trove.datastore import models as datastore_models from trove.instance import models from trove.instance.models import DBInstance from trove.instance.models import filter_ips from trove.instance.models import Instance from trove.instance.models import InstanceServiceStatus from trove.instance.models import SimpleInstance from trove.instance.tasks import InstanceTasks from trove.taskmanager import api as task_api from trove.tests.fakes import nova from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util CONF = cfg.CONF class SimpleInstanceTest(trove_testtools.TestCase): def setUp(self): super(SimpleInstanceTest, self).setUp() db_info = DBInstance( InstanceTasks.BUILDING, name="TestInstance") self.instance = SimpleInstance( None, db_info, InstanceServiceStatus( ServiceStatuses.BUILDING), ds_version=Mock(), ds=Mock()) db_info.addresses = {"private": [{"addr": "123.123.123.123"}], "internal": [{"addr": "10.123.123.123"}], "public": [{"addr": "15.123.123.123"}]} self.orig_conf = CONF.network_label_regex self.orig_ip_regex = CONF.ip_regex self.orig_black_list_regex = CONF.black_list_regex def tearDown(self): super(SimpleInstanceTest, self).tearDown() CONF.network_label_regex = self.orig_conf CONF.ip_start = None def test_get_root_on_create(self): root_on_create_val = Instance.get_root_on_create( 'redis') self.assertFalse(root_on_create_val) def test_filter_ips_white_list(self): CONF.network_label_regex = '.*' CONF.ip_regex = '^(15.|123.)' CONF.black_list_regex = '^10.123.123.*' ip = self.instance.get_visible_ip_addresses() ip = filter_ips( ip, CONF.ip_regex, CONF.black_list_regex) self.assertEqual(2, len(ip)) self.assertTrue('123.123.123.123' in ip) self.assertTrue('15.123.123.123' in ip) def test_filter_ips_black_list(self): CONF.network_label_regex = '.*' CONF.ip_regex = '.*' CONF.black_list_regex = '^10.123.123.*' ip = self.instance.get_visible_ip_addresses() ip = filter_ips( ip, CONF.ip_regex, CONF.black_list_regex) self.assertEqual(2, len(ip)) self.assertTrue('10.123.123.123' not in ip) def test_one_network_label(self): CONF.network_label_regex = 'public' ip = self.instance.get_visible_ip_addresses() self.assertEqual(['15.123.123.123'], ip) def test_two_network_labels(self): CONF.network_label_regex = '^(private|public)$' ip = self.instance.get_visible_ip_addresses() self.assertEqual(2, len(ip)) self.assertTrue('123.123.123.123' in ip) self.assertTrue('15.123.123.123' in ip) def test_all_network_labels(self): CONF.network_label_regex = '.*' ip = self.instance.get_visible_ip_addresses() self.assertEqual(3, len(ip)) self.assertTrue('10.123.123.123' in ip) self.assertTrue('123.123.123.123' in ip) self.assertTrue('15.123.123.123' in ip) class CreateInstanceTest(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def setUp(self): util.init_db() self.context = trove_testtools.TroveTestContext(self, is_admin=True) self.name = "name" self.flavor_id = 5 self.image_id = "UUID" self.databases = [] self.users = [] self.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='mysql' + str(uuid.uuid4()), ) self.datastore_version = ( datastore_models.DBDatastoreVersion.create( id=str(uuid.uuid4()), datastore_id=self.datastore.id, name="5.5" + str(uuid.uuid4()), manager="mysql", image_id="image_id", packages="", active=True)) self.volume_size = 1 self.az = "az" self.nics = None self.configuration = None self.tenant_id = "UUID" self.datastore_version_id = str(uuid.uuid4()) self.db_info = DBInstance.create( name=self.name, flavor_id=self.flavor_id, tenant_id=self.tenant_id, volume_size=self.volume_size, datastore_version_id=self.datastore_version.id, task_status=InstanceTasks.BUILDING, configuration_id=self.configuration ) self.backup_name = "name" self.descr = None self.backup_state = backup_models.BackupState.COMPLETED self.instance_id = self.db_info.id self.parent_id = None self.deleted = False self.backup = backup_models.DBBackup.create( name=self.backup_name, description=self.descr, tenant_id=self.tenant_id, state=self.backup_state, instance_id=self.instance_id, parent_id=self.parent_id, datastore_version_id=self.datastore_version.id, deleted=False ) self.backup.size = 1.1 self.backup.save() self.backup_id = self.backup.id self.orig_client = models.create_nova_client models.create_nova_client = nova.fake_create_nova_client self.orig_api = task_api.API(self.context).create_instance task_api.API(self.context).create_instance = Mock() self.run_with_quotas = models.run_with_quotas models.run_with_quotas = Mock() self.check = backup_models.DBBackup.check_swift_object_exist backup_models.DBBackup.check_swift_object_exist = Mock( return_value=True) super(CreateInstanceTest, self).setUp() @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def tearDown(self): self.db_info.delete() self.backup.delete() self.datastore.delete() self.datastore_version.delete() models.create_nova_client = self.orig_client task_api.API(self.context).create_instance = self.orig_api models.run_with_quotas = self.run_with_quotas backup_models.DBBackup.check_swift_object_exist = self.check self.backup.delete() self.db_info.delete() super(CreateInstanceTest, self).tearDown() def test_exception_on_invalid_backup_size(self): self.assertEqual(self.backup.id, self.backup_id) exc = self.assertRaises( exception.BackupTooLarge, models.Instance.create, self.context, self.name, self.flavor_id, self.image_id, self.databases, self.users, self.datastore, self.datastore_version, self.volume_size, self.backup_id, self.az, self.nics, self.configuration ) self.assertIn("Backup is too large for " "given flavor or volume.", str(exc)) def test_can_restore_from_backup_with_almost_equal_size(self): # target size equals to "1Gb" self.backup.size = 0.99 self.backup.save() instance = models.Instance.create( self.context, self.name, self.flavor_id, self.image_id, self.databases, self.users, self.datastore, self.datastore_version, self.volume_size, self.backup_id, self.az, self.nics, self.configuration) self.assertIsNotNone(instance) class TestReplication(trove_testtools.TestCase): def setUp(self): util.init_db() self.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='name' + str(uuid.uuid4()), default_version_id=str(uuid.uuid4())) self.datastore_version = datastore_models.DBDatastoreVersion.create( id=self.datastore.default_version_id, name='name' + str(uuid.uuid4()), image_id=str(uuid.uuid4()), packages=str(uuid.uuid4()), datastore_id=self.datastore.id, manager='mysql', active=1) self.master = DBInstance( InstanceTasks.NONE, id=str(uuid.uuid4()), name="TestMasterInstance", datastore_version_id=self.datastore_version.id) self.master.set_task_status(InstanceTasks.NONE) self.master.save() self.master_status = InstanceServiceStatus( ServiceStatuses.RUNNING, id=str(uuid.uuid4()), instance_id=self.master.id) self.master_status.save() self.safe_nova_client = models.create_nova_client models.create_nova_client = nova.fake_create_nova_client super(TestReplication, self).setUp() def tearDown(self): self.master.delete() self.master_status.delete() self.datastore.delete() self.datastore_version.delete() models.create_nova_client = self.safe_nova_client super(TestReplication, self).tearDown() @patch('trove.instance.models.LOG') def test_replica_of_not_active_master(self, mock_logging): self.master.set_task_status(InstanceTasks.BUILDING) self.master.save() self.master_status.set_status(ServiceStatuses.BUILDING) self.master_status.save() self.assertRaises(exception.UnprocessableEntity, Instance.create, None, 'name', 1, "UUID", [], [], None, self.datastore_version, 1, None, slave_of_id=self.master.id) @patch('trove.instance.models.LOG') def test_replica_with_invalid_slave_of_id(self, mock_logging): self.assertRaises(exception.NotFound, Instance.create, None, 'name', 1, "UUID", [], [], None, self.datastore_version, 1, None, slave_of_id=str(uuid.uuid4())) def test_create_replica_from_replica(self): self.replica_datastore_version = Mock( spec=datastore_models.DBDatastoreVersion) self.replica_datastore_version.id = "UUID" self.replica_datastore_version.manager = 'mysql' self.replica_info = DBInstance( InstanceTasks.NONE, id="UUID", name="TestInstance", datastore_version_id=self.replica_datastore_version.id, slave_of_id=self.master.id) self.replica_info.save() self.assertRaises(exception.Forbidden, Instance.create, None, 'name', 2, "UUID", [], [], None, self.datastore_version, 1, None, slave_of_id=self.replica_info.id) trove-5.0.0/trove/tests/unittests/instance/test_instance_views.py0000664000567000056710000000774012701410316026620 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from trove.common import cfg from trove.instance.views import InstanceDetailView from trove.instance.views import InstanceView from trove.tests.unittests import trove_testtools CONF = cfg.CONF class InstanceViewsTest(trove_testtools.TestCase): def setUp(self): super(InstanceViewsTest, self).setUp() self.addresses = {"private": [{"addr": "123.123.123.123"}], "internal": [{"addr": "10.123.123.123"}], "public": [{"addr": "15.123.123.123"}]} self.orig_label_regex = CONF.network_label_regex self.orig_ip_regex = CONF.ip_regex def tearDown(self): super(InstanceViewsTest, self).tearDown() CONF.network_label_regex = self.orig_label_regex CONF.ip_regex = self.orig_ip_regex class InstanceDetailViewTest(trove_testtools.TestCase): def setUp(self): super(InstanceDetailViewTest, self).setUp() self.build_links_method = InstanceView._build_links self.build_flavor_links_method = InstanceView._build_flavor_links self.build_config_method = InstanceDetailView._build_configuration_info InstanceView._build_links = Mock() InstanceView._build_flavor_links = Mock() InstanceDetailView._build_configuration_info = Mock() self.instance = Mock() self.instance.created = 'Yesterday' self.instance.updated = 'Now' self.instance.datastore_version = Mock() self.instance.datastore_version.name = 'mysql_test_version' self.instance.datastore_version.manager = 'mysql' self.instance.hostname = 'test.trove.com' self.ip = "1.2.3.4" self.instance.addresses = {"private": [{"addr": self.ip}]} self.instance.volume_used = '3' self.instance.root_password = 'iloveyou' self.instance.get_visible_ip_addresses = lambda: ["1.2.3.4"] self.instance.slave_of_id = None self.instance.slaves = [] def tearDown(self): super(InstanceDetailViewTest, self).tearDown() InstanceView._build_links = self.build_links_method InstanceView._build_flavor_links = self.build_flavor_links_method InstanceDetailView._build_configuration_info = self.build_config_method def test_data_hostname(self): view = InstanceDetailView(self.instance, Mock()) result = view.data() self.assertEqual(self.instance.created, result['instance']['created']) self.assertEqual(self.instance.updated, result['instance']['updated']) self.assertEqual(self.instance.datastore_version.name, result['instance']['datastore']['version']) self.assertEqual(self.instance.hostname, result['instance']['hostname']) self.assertNotIn('ip', result['instance']) def test_data_ip(self): self.instance.hostname = None view = InstanceDetailView(self.instance, Mock()) result = view.data() self.assertEqual(self.instance.created, result['instance']['created']) self.assertEqual(self.instance.updated, result['instance']['updated']) self.assertEqual(self.instance.datastore_version.name, result['instance']['datastore']['version']) self.assertNotIn('hostname', result['instance']) self.assertEqual([self.ip], result['instance']['ip']) trove-5.0.0/trove/tests/unittests/instance/test_instance_status.py0000664000567000056710000001304012701410316026774 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common.instance import ServiceStatuses from trove.datastore import models from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceStatus from trove.instance.models import SimpleInstance from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util import uuid class FakeInstanceTask(object): def __init__(self): self.is_error = False self.action = None class FakeDBInstance(object): def __init__(self): self.id = str(uuid.uuid4()) self.deleted = False self.datastore_version_id = str(uuid.uuid4()) self.server_status = "ACTIVE" self.task_status = FakeInstanceTask() class BaseInstanceStatusTestCase(trove_testtools.TestCase): def setUp(self): util.init_db() self.db_info = FakeDBInstance() self.status = InstanceServiceStatus( ServiceStatuses.RUNNING) self.datastore = models.DBDatastore.create( id=str(uuid.uuid4()), name='mysql' + str(uuid.uuid4()), default_version_id=self.db_info.datastore_version_id ) self.version = models.DBDatastoreVersion.create( id=self.db_info.datastore_version_id, datastore_id=self.datastore.id, name='5.5' + str(uuid.uuid4()), manager='mysql', image_id=str(uuid.uuid4()), active=1, packages="mysql-server-5.5" ) super(BaseInstanceStatusTestCase, self).setUp() def tearDown(self): self.datastore.delete() self.version.delete() super(BaseInstanceStatusTestCase, self).tearDown() class InstanceStatusTest(BaseInstanceStatusTestCase): def test_task_status_error_reports_error(self): self.db_info.task_status.is_error = True instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.ERROR, instance.status) def test_task_status_action_building_reports_build(self): self.db_info.task_status.action = "BUILDING" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.BUILD, instance.status) def test_task_status_action_rebooting_reports_reboot(self): self.db_info.task_status.action = "REBOOTING" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.REBOOT, instance.status) def test_task_status_action_resizing_reports_resize(self): self.db_info.task_status.action = "RESIZING" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.RESIZE, instance.status) def test_task_status_action_deleting_reports_shutdown(self): self.db_info.task_status.action = "DELETING" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.SHUTDOWN, instance.status) def test_nova_server_build_reports_build(self): self.db_info.server_status = "BUILD" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.BUILD, instance.status) def test_nova_server_error_reports_error(self): self.db_info.server_status = "ERROR" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.ERROR, instance.status) def test_nova_server_reboot_reports_reboot(self): self.db_info.server_status = "REBOOT" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.REBOOT, instance.status) def test_nova_server_resize_reports_resize(self): self.db_info.server_status = "RESIZE" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.RESIZE, instance.status) def test_nova_server_verify_resize_reports_resize(self): self.db_info.server_status = "VERIFY_RESIZE" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.RESIZE, instance.status) def test_service_status_paused_reports_reboot(self): self.status.set_status(ServiceStatuses.PAUSED) instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.REBOOT, instance.status) def test_service_status_new_reports_build(self): self.status.set_status(ServiceStatuses.NEW) instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.BUILD, instance.status) def test_service_status_running_reports_active(self): self.status.set_status(ServiceStatuses.RUNNING) instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.ACTIVE, instance.status) trove-5.0.0/trove/tests/unittests/instance/test_instance_controller.py0000664000567000056710000003257612701410316027653 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from mock import Mock from testtools.matchers import Is, Equals from testtools.testcase import skip from trove.common import apischema from trove.instance.service import InstanceController from trove.tests.unittests import trove_testtools class TestInstanceController(trove_testtools.TestCase): def setUp(self): super(TestInstanceController, self).setUp() self.controller = InstanceController() self.instance = { "instance": { "volume": {"size": "1"}, "users": [ {"name": "user1", "password": "litepass", "databases": [{"name": "firstdb"}]} ], "flavorRef": "https://localhost:8779/v1.0/2500/1", "name": "TEST-XYS2d2fe2kl;zx;jkl2l;sjdcma239(E)@(D", "databases": [ { "name": "firstdb", "collate": "latin2_general_ci", "character_set": "latin2" }, { "name": "db2" } ] } } self.context = trove_testtools.TroveTestContext(self) self.req = Mock(remote_addr='ip:port', host='myhost') def verify_errors(self, errors, msg=None, properties=None, path=None): msg = msg or [] properties = properties or [] self.assertThat(len(errors), Is(len(msg))) i = 0 while i < len(msg): self.assertIn(errors[i].message, msg) if path: self.assertThat(path, Equals(properties[i])) else: self.assertThat(errors[i].path.pop(), Equals(properties[i])) i += 1 def test_get_schema_create(self): schema = self.controller.get_schema('create', {'instance': {}}) self.assertIsNotNone(schema) self.assertTrue('instance' in schema['properties']) def test_get_schema_action_restart(self): schema = self.controller.get_schema('action', {'restart': {}}) self.assertIsNotNone(schema) self.assertTrue('restart' in schema['properties']) def test_get_schema_action_resize_volume(self): schema = self.controller.get_schema( 'action', {'resize': {'volume': {}}}) self.assertIsNotNone(schema) self.assertTrue('resize' in schema['properties']) self.assertTrue( 'volume' in schema['properties']['resize']['properties']) def test_get_schema_action_resize_flavorRef(self): schema = self.controller.get_schema( 'action', {'resize': {'flavorRef': {}}}) self.assertIsNotNone(schema) self.assertTrue('resize' in schema['properties']) self.assertTrue( 'flavorRef' in schema['properties']['resize']['properties']) def test_get_schema_action_other(self): schema = self.controller.get_schema( 'action', {'supersized': {'flavorRef': {}}}) self.assertIsNotNone(schema) self.assertThat(len(schema.keys()), Is(0)) def test_validate_create_complete(self): body = self.instance schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_complete_with_restore(self): body = self.instance body['instance']['restorePoint'] = { "backupRef": "d761edd8-0771-46ff-9743-688b9e297a3b" } schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_complete_with_restore_error(self): body = self.instance backup_id_ref = "invalid-backup-id-ref" body['instance']['restorePoint'] = { "backupRef": backup_id_ref } schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("'%s' does not match '%s'" % (backup_id_ref, apischema.uuid['pattern']))) def test_validate_create_blankname(self): body = self.instance body['instance']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_invalid_name(self): body = self.instance body['instance']['name'] = "$#$%^^" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("'$#$%^^' does not match '^.*[0-9a-zA-Z]+.*$'", errors[0].message) def test_validate_restart(self): body = {"restart": {}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_invalid_action(self): # TODO(juice) perhaps we should validate the schema not recognized body = {"restarted": {}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_volume(self): body = {"resize": {"volume": {"size": 4}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_volume_string(self): body = {"resize": {"volume": {"size": "4"}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_volume_string_invalid_number(self): body = {"resize": {"volume": {"size": '-44.0'}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].context[1].message, Equals("'-44.0' does not match '^[0-9]+$'")) self.assertThat(errors[0].path.pop(), Equals('size')) def test_validate_resize_volume_invalid_characters(self): body = {"resize": {"volume": {"size": 'x'}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].context[0].message, Equals("'x' is not of type 'integer'")) self.assertThat(errors[0].context[1].message, Equals("'x' does not match '^[0-9]+$'")) self.assertThat(errors[0].path.pop(), Equals('size')) def test_validate_resize_instance(self): body = {"resize": {"flavorRef": "https://endpoint/v1.0/123/flavors/2"}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_instance_int(self): body = {"resize": {"flavorRef": 2}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_instance_string(self): body = {"resize": {"flavorRef": 'foo'}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_instance_empty_url(self): body = {"resize": {"flavorRef": ""}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.verify_errors(errors[0].context, ["'' is too short", "'' does not match '^.*[0-9a-zA-Z]+.*$'", "'' is not of type 'integer'"], ["flavorRef", "flavorRef", "flavorRef", "flavorRef"], errors[0].path.pop()) @skip("This URI validator allows just about anything you give it") def test_validate_resize_instance_invalid_url(self): body = {"resize": {"flavorRef": "xyz-re1f2-daze329d-f23901"}} schema = self.controller.get_schema('action', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.verify_errors(errors, ["'' is too short"], ["flavorRef"]) def _setup_modify_instance_mocks(self): instance = Mock() instance.detach_replica = Mock() instance.assign_configuration = Mock() instance.unassign_configuration = Mock() instance.update_db = Mock() return instance def test_modify_instance_with_empty_args(self): instance = self._setup_modify_instance_mocks() args = {} self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(0, instance.detach_replica.call_count) self.assertEqual(0, instance.unassign_configuration.call_count) self.assertEqual(0, instance.assign_configuration.call_count) self.assertEqual(0, instance.update_db.call_count) def test_modify_instance_with_nonempty_args_calls_update_db(self): instance = self._setup_modify_instance_mocks() args = {} args['any'] = 'anything' self.controller._modify_instance(self.context, self.req, instance, **args) instance.update_db.assert_called_once_with(**args) def test_modify_instance_with_False_detach_replica_arg(self): instance = self._setup_modify_instance_mocks() args = {} args['detach_replica'] = False self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(0, instance.detach_replica.call_count) def test_modify_instance_with_True_detach_replica_arg(self): instance = self._setup_modify_instance_mocks() args = {} args['detach_replica'] = True self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(1, instance.detach_replica.call_count) def test_modify_instance_with_configuration_id_arg(self): instance = self._setup_modify_instance_mocks() args = {} args['configuration_id'] = 'some_id' self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(1, instance.assign_configuration.call_count) def test_modify_instance_with_None_configuration_id_arg(self): instance = self._setup_modify_instance_mocks() args = {} args['configuration_id'] = None self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(1, instance.unassign_configuration.call_count) def test_modify_instance_with_all_args(self): instance = self._setup_modify_instance_mocks() args = {} args['detach_replica'] = True args['configuration_id'] = 'some_id' self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(1, instance.detach_replica.call_count) self.assertEqual(1, instance.assign_configuration.call_count) instance.update_db.assert_called_once_with(**args) trove-5.0.0/trove/tests/unittests/cluster/0000775000567000056710000000000012701410521022031 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/cluster/test_mongodb_cluster.py0000664000567000056710000002457312701410316026645 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import uuid from trove.cluster import models from trove.cluster import tasks from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.strategies.cluster.experimental.mongodb import api from trove.instance import models as inst_models from trove.instance import tasks as inst_tasks from trove.tests.unittests import trove_testtools CONF = cfg.CONF class MongoDBClusterTest(trove_testtools.TestCase): def setUp(self): super(MongoDBClusterTest, self).setUp() self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = models.DBCluster(models.ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=models.ClusterTasks.NONE._code) self.context = mock.Mock() self.datastore = mock.Mock() self.dv = mock.Mock() self.datastore_version = self.dv self.cluster = api.MongoDbCluster(self.context, self.db_info, self.datastore, self.datastore_version) self.manager = mock.Mock() self.cluster.manager = self.manager self.volume_support = CONF.get('mongodb').volume_support self.remote_nova = remote.create_nova_client def tearDown(self): super(MongoDBClusterTest, self).tearDown() @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') @mock.patch.object(api.MongoDbCluster, '_check_instances') @mock.patch.object(api.MongoDbCluster, '_create_shard_instances', return_value=['id1', 'id2', 'id3']) @mock.patch.object(api.MongoDbCluster, '_create_query_router_instances', return_value=['id4']) @mock.patch.object(api.MongoDbCluster, 'update_db') def test_grow(self, mock_update_db, mock_create_query_router_instances, mock_create_shard_instances, mock_check_instances, mock_check_quotas, mock_prep_resize): instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} instance2 = {'name': 'replicaB', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaA'} instance3 = {'name': 'replicaC', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaA'} instance4 = {'flavor_id': 1, 'volume_size': 5, 'instance_type': 'query_router'} self.cluster.grow([instance1, instance2, instance3, instance4]) self.assertEqual(mock_prep_resize.called, True) mock_create_shard_instances.assert_called_with([instance1, instance2, instance3]) mock_create_query_router_instances.assert_called_with([instance4]) mock_update_db.assert_called_with( task_status=tasks.ClusterTasks.GROWING_CLUSTER ) self.manager.grow_cluster.assert_called_with( self.cluster_id, ['id1', 'id2', 'id3', 'id4'] ) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_invalid_type(self, mock_check_quotas, mock_prep_resize): instance1 = {'flavor_id': 1, 'volume_size': 5, 'instance_type': 'config_server'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_invalid_shard_size(self, mock_check_quotas, mock_prep_resize): instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaB'} instance2 = {'name': 'replicaB', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaA'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1, instance2]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_no_name(self, mock_check_quotas, mock_prep_resize): instance1 = {'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_repeated_name(self, mock_check_quotas, mock_prep_resize): instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} instance1 = {'name': 'replicaC', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_bad_relations(self, mock_check_quotas, mock_prep_resize): instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaB'} instance2 = {'name': 'replicaB', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} instance3 = {'name': 'replicaC', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaD'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1, instance2, instance3]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_shard_status') @mock.patch.object(api.MongoDbCluster, 'update_db') @mock.patch.object(inst_models, 'load_any_instance') def test_shrink(self, mock_load_any_instance, mock_update_db, mock_check_shard_status, mock_prep_resize): self._mock_db_instances() self.cluster.query_routers.append( inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id6', cluster_id=self.cluster_id, type='query_router') ) self.cluster.shrink(['id1', 'id2', 'id3', 'id4']) self.assertEqual(mock_prep_resize.called, True) mock_check_shard_status.assert_called_with('id1') mock_update_db.assert_called_with( task_status=tasks.ClusterTasks.SHRINKING_CLUSTER ) self.assertEqual(4, mock_load_any_instance().delete.call_count) self.manager.shrink_cluster.assert_called_with( self.cluster_id, ['id1', 'id2', 'id3', 'id4'] ) @mock.patch.object(api.MongoDbCluster, '_prep_resize') def test_shrink_invalid_type(self, mock_prep_resize): self._mock_db_instances() self.assertRaises(exception.TroveError, self.cluster.shrink, ['id5']) @mock.patch.object(api.MongoDbCluster, '_prep_resize') def test_shrink_incomplete_shard(self, mock_prep_resize): self._mock_db_instances() self.assertRaises(exception.TroveError, self.cluster.shrink, ['id1', 'id2']) def _mock_db_instances(self): self.shard_id = uuid.uuid4() self.cluster.members = [ inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id1', cluster_id=self.cluster_id, shard_id=self.shard_id, type='member'), inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id2', cluster_id=self.cluster_id, shard_id=self.shard_id, type='member'), inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id3', cluster_id=self.cluster_id, shard_id=self.shard_id, type='member'), ] self.cluster.query_routers = [ inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id4', cluster_id=self.cluster_id, type='query_router') ] self.cluster.config_svrs = [ inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id5', cluster_id=self.cluster_id, type='config_server') ] trove-5.0.0/trove/tests/unittests/cluster/test_cluster_pxc_controller.py0000664000567000056710000002573512701410316030256 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from mock import MagicMock from mock import Mock from mock import patch from testtools.matchers import Is, Equals from trove.cluster import models from trove.cluster.models import Cluster from trove.cluster.service import ClusterController from trove.cluster import views import trove.common.cfg as cfg from trove.common import exception from trove.common import utils from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.controller = ClusterController() instances = [ { "flavorRef": "7", "volume": { "size": 1 }, "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] * 3 self.cluster = { "cluster": { "name": "products", "datastore": { "type": "pxc", "version": "5.5" }, "instances": instances } } def test_get_schema_create(self): schema = self.controller.get_schema('create', self.cluster) self.assertIsNotNone(schema) self.assertTrue('cluster' in schema['properties']) self.assertTrue('cluster') def test_validate_create(self): body = self.cluster schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.cluster body['cluster']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.cluster body['cluster']['datastore']['type'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("type", error_paths) @patch.object(Cluster, 'create') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters_disabled(self, mock_get_datastore_version, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mysql' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.ClusterDatastoreNotSupported, self.controller.create, req, body, tenant_id) @patch.object(Cluster, 'create') @patch.object(utils, 'get_id_from_href') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters(self, mock_get_datastore_version, mock_id_from_href, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'pxc' datastore = Mock() mock_get_datastore_version.return_value = (datastore, datastore_version) instances = [ { 'volume_size': 1, 'volume_type': None, 'flavor_id': '1234', 'availability_zone': 'az', 'nics': [ {'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'} ] } ] * 3 mock_id_from_href.return_value = '1234' mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'pxc' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) mock_cluster_create.assert_called_with(context, 'products', datastore, datastore_version, instances, {}) @patch.object(Cluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'pxc' mock_cluster_load.return_value = mock_cluster self.controller.show(req, tenant_id, id) mock_cluster_load.assert_called_with(context, id) @patch.object(Cluster, 'load') @patch.object(Cluster, 'load_instance') def test_show_cluster_instance(self, mock_cluster_load_instance, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() instance_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) cluster = Mock() mock_cluster_load.return_value = cluster cluster.id = cluster_id self.controller.show_instance(req, tenant_id, cluster_id, instance_id) mock_cluster_load_instance.assert_called_with(context, cluster.id, instance_id) @patch.object(Cluster, 'load') def test_delete_cluster(self, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() req = MagicMock() cluster = Mock() trove_testtools.patch_notifier(self) mock_cluster_load.return_value = cluster self.controller.delete(req, tenant_id, cluster_id) cluster.delete.assert_called_with() class TestClusterControllerWithStrategy(trove_testtools.TestCase): def setUp(self): super(TestClusterControllerWithStrategy, self).setUp() self.controller = ClusterController() self.cluster = { "cluster": { "name": "products", "datastore": { "type": "pxc", "version": "5.5" }, "instances": [ { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, ] } } def tearDown(self): super(TestClusterControllerWithStrategy, self).tearDown() cfg.CONF.clear_override('cluster_support', group='pxc') cfg.CONF.clear_override('api_strategy', group='pxc') @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_disabled(self, mock_cluster_create, mock_get_datastore_version): cfg.CONF.set_override('cluster_support', False, group='pxc', enforce_type=True) body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'pxc' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.TroveError, self.controller.create, req, body, tenant_id) @patch.object(views.ClusterView, 'data', return_value={}) @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_enabled(self, mock_cluster_create, mock_get_datastore_version, mock_cluster_view_data): cfg.CONF.set_override('cluster_support', True, group='pxc', enforce_type=True) body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'pxc' mock_get_datastore_version.return_value = (Mock(), datastore_version) mock_cluster = Mock() mock_cluster.datastore_version.manager = 'pxc' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) trove-5.0.0/trove/tests/unittests/cluster/__init__.py0000664000567000056710000000000012701410316024132 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/cluster/test_cluster_vertica_controller.py0000664000567000056710000002601512701410316031111 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from mock import MagicMock from mock import Mock from mock import patch from testtools.matchers import Is, Equals from trove.cluster import models from trove.cluster.models import Cluster from trove.cluster.service import ClusterController from trove.cluster import views import trove.common.cfg as cfg from trove.common import exception from trove.common import utils from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.controller = ClusterController() instances = [ { "flavorRef": "7", "volume": { "size": 1 }, "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] * 3 self.cluster = { "cluster": { "name": "products", "datastore": { "type": "vertica", "version": "7.1" }, "instances": instances } } def test_get_schema_create(self): schema = self.controller.get_schema('create', self.cluster) self.assertIsNotNone(schema) self.assertTrue('cluster' in schema['properties']) self.assertTrue('cluster') def test_validate_create(self): body = self.cluster schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.cluster body['cluster']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.cluster body['cluster']['datastore']['type'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("type", error_paths) @patch.object(Cluster, 'create') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters_disabled(self, mock_get_datastore_version, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mysql' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.ClusterDatastoreNotSupported, self.controller.create, req, body, tenant_id) @patch.object(Cluster, 'create') @patch.object(utils, 'get_id_from_href') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters(self, mock_get_datastore_version, mock_id_from_href, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'vertica' datastore = Mock() mock_get_datastore_version.return_value = (datastore, datastore_version) instances = [ { 'volume_size': 1, 'volume_type': None, 'flavor_id': '1234', 'availability_zone': 'az', 'nics': [ {'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'} ] } ] * 3 mock_id_from_href.return_value = '1234' mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'vertica' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) mock_cluster_create.assert_called_with(context, 'products', datastore, datastore_version, instances, {}) @patch.object(Cluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'vertica' mock_cluster_load.return_value = mock_cluster self.controller.show(req, tenant_id, id) mock_cluster_load.assert_called_with(context, id) @patch.object(Cluster, 'load') @patch.object(Cluster, 'load_instance') def test_show_cluster_instance(self, mock_cluster_load_instance, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() instance_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) cluster = Mock() mock_cluster_load.return_value = cluster cluster.id = cluster_id self.controller.show_instance(req, tenant_id, cluster_id, instance_id) mock_cluster_load_instance.assert_called_with(context, cluster.id, instance_id) @patch.object(Cluster, 'load') def test_delete_cluster(self, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() req = MagicMock() cluster = Mock() trove_testtools.patch_notifier(self) mock_cluster_load.return_value = cluster self.controller.delete(req, tenant_id, cluster_id) cluster.delete.assert_called_with() class TestClusterControllerWithStrategy(trove_testtools.TestCase): def setUp(self): super(TestClusterControllerWithStrategy, self).setUp() self.controller = ClusterController() self.cluster = { "cluster": { "name": "products", "datastore": { "type": "vertica", "version": "7.1" }, "instances": [ { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, ] } } def tearDown(self): super(TestClusterControllerWithStrategy, self).tearDown() cfg.CONF.clear_override('cluster_support', group='vertica') cfg.CONF.clear_override('api_strategy', group='vertica') @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_disabled(self, mock_cluster_create, mock_get_datastore_version): cfg.CONF.set_override('cluster_support', False, group='vertica', enforce_type=True) body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'vertica' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.TroveError, self.controller.create, req, body, tenant_id) @patch.object(views.ClusterView, 'data', return_value={}) @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_enabled(self, mock_cluster_create, mock_get_datastore_version, mock_cluster_view_data): cfg.CONF.set_override('cluster_support', True, group='vertica', enforce_type=True) body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'vertica' mock_get_datastore_version.return_value = (Mock(), datastore_version) mock_cluster = Mock() mock_cluster.datastore_version.manager = 'vertica' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) trove-5.0.0/trove/tests/unittests/cluster/test_cluster.py0000664000567000056710000002266212701410316025135 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from mock import Mock from mock import patch from trove.cluster.models import Cluster from trove.cluster.models import ClusterTasks from trove.cluster.models import DBCluster from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.strategies.cluster.experimental.mongodb import ( api as mongodb_api) from trove.common import utils from trove.datastore import models as datastore_models from trove.instance import models as inst_models from trove.instance.models import DBInstance from trove.instance.tasks import InstanceTasks from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools CONF = cfg.CONF class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.get_client_patch = patch.object(task_api.API, 'get_client') self.get_client_mock = self.get_client_patch.start() self.addCleanup(self.get_client_patch.stop) self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = DBCluster(ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=ClusterTasks.NONE._code) self.context = trove_testtools.TroveTestContext(self) self.datastore = Mock() self.dv = Mock() self.dv.manager = "mongodb" self.datastore_version = self.dv self.cluster = mongodb_api.MongoDbCluster(self.context, self.db_info, self.datastore, self.datastore_version) self.instances = [{'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}] self.volume_support = CONF.get(self.dv.manager).volume_support self.remote_nova = remote.create_nova_client def tearDown(self): super(ClusterTest, self).tearDown() CONF.get(self.dv.manager).volume_support = self.volume_support remote.create_nova_client = self.remote_nova def test_create_empty_instances(self): self.assertRaises(exception.ClusterNumInstancesNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, [], None ) def test_create_unequal_flavors(self): instances = self.instances instances[0]['flavor_id'] = '4567' self.assertRaises(exception.ClusterFlavorsNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch.object(remote, 'create_nova_client') def test_create_unequal_volumes(self, mock_client): instances = self.instances instances[0]['volume_size'] = 2 flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizesNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch.object(remote, 'create_nova_client') def test_create_storage_not_specified(self, mock_client): class FakeFlavor: def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 0 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] CONF.get(self.dv.manager).volume_support = False (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.assertRaises(exception.LocalStorageNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch('trove.cluster.models.LOG') def test_delete_bad_task_status(self, mock_logging): self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL self.assertRaises(exception.UnprocessableEntity, self.cluster.delete) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_none(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.NONE self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_deleting(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.DELETING self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch('trove.common.strategies.cluster.experimental.mongodb.api.LOG') def test_add_shard_bad_task_status(self, mock_logging): task_status = ClusterTasks.BUILDING_INITIAL self.cluster.db_info.task_status = task_status self.assertRaises(exception.UnprocessableEntity, self.cluster.add_shard) @patch.object(utils, 'generate_uuid', Mock(return_value='new-shard-id')) @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(task_api, 'load') @patch.object(Cluster, 'update_db') @patch.object(inst_models.Instance, 'create') @patch.object(QUOTAS, 'check_quotas') @patch.object(inst_models, 'load_any_instance') @patch.object(inst_models.DBInstance, 'find_all') def test_add_shard(self, mock_find_all, mock_load_any_instance, mock_check_quotas, mock_instance_create, mock_update_db, mock_task_api_load, mock_load_by_uuid): self.cluster.db_info.task_status = ClusterTasks.NONE (mock_find_all.return_value .all.return_value) = [DBInstance(InstanceTasks.NONE, name="TestInstance1", shard_id="1", id='1', datastore_version_id='1'), DBInstance(InstanceTasks.NONE, name="TestInstance2", shard_id="1", id='2', datastore_version_id='1'), DBInstance(InstanceTasks.NONE, name="TestInstance3", shard_id="1", id='3', datastore_version_id='1')] mock_datastore_version = Mock() mock_datastore_version.manager = 'mongodb' mock_load_by_uuid.return_value = mock_datastore_version mock_task_api = Mock() mock_task_api.mongodb_add_shard_cluster.return_value = None mock_task_api_load.return_value = mock_task_api self.cluster.add_shard() mock_update_db.assert_called_with( task_status=ClusterTasks.ADDING_SHARD) mock_task_api.mongodb_add_shard_cluster.assert_called_with( self.cluster.id, 'new-shard-id', 'rs2') trove-5.0.0/trove/tests/unittests/cluster/test_cassandra_cluster.py0000664000567000056710000000763312701410316027155 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import ANY from mock import MagicMock from mock import Mock from mock import patch from trove.cluster import models from trove.common.strategies.cluster.experimental.cassandra.api \ import CassandraCluster from trove.common.strategies.cluster.experimental.cassandra.taskmanager \ import CassandraClusterTasks from trove.instance import models as inst_models from trove.quota import quota from trove.tests.unittests import trove_testtools class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) def tearDown(self): super(ClusterTest, self).tearDown() @patch.object(inst_models.Instance, 'create') @patch.object(quota.QUOTAS, 'check_quotas') @patch.object(models, 'get_flavors_from_instance_defs') @patch.object(models, 'get_required_volume_size', return_value=3) def test_create_cluster_instances(self, get_vol_size, _, check_quotas, inst_create): test_instances = [MagicMock(), MagicMock()] num_instances = len(test_instances) datastore = Mock(manager='cassandra') datastore_version = Mock(manager='cassandra') with patch.object(CassandraClusterTasks, 'find_cluster_node_ids', return_value=[inst.id for inst in test_instances]): CassandraCluster._create_cluster_instances( self.context, 'test_cluster_id', 'test_cluster', datastore, datastore_version, test_instances) check_quotas.assert_called_once_with( ANY, instances=num_instances, volumes=get_vol_size.return_value) self.assertEqual(num_instances, inst_create.call_count, "Unexpected number of instances created.") def test_choose_seed_nodes(self): nodes = self._build_mock_nodes(3) seeds = CassandraClusterTasks.choose_seed_nodes(nodes) self.assertEqual(1, len(seeds), "Only one seed node should be selected for a " "single-rack-single-dc cluster.") nodes = self._build_mock_nodes(3) nodes[0]['rack'] = 'rack1' nodes[1]['rack'] = 'rack2' seeds = CassandraClusterTasks.choose_seed_nodes(nodes) self.assertEqual(2, len(seeds), "There should be exactly two seed nodes. " "One from each rack.") nodes = self._build_mock_nodes(3) nodes[0]['rack'] = 'rack1' nodes[1]['rack'] = 'rack2' nodes[2]['dc'] = 'dc2' seeds = CassandraClusterTasks.choose_seed_nodes(nodes) self.assertEqual(3, len(seeds), "There should be exactly three seed nodes. " "One from each rack and data center.") def _build_mock_nodes(self, num_nodes): nodes = [] for _ in range(num_nodes): mock_instance = MagicMock() nodes.append({'instance': mock_instance, 'guest': MagicMock(), 'id': mock_instance.id, 'ip': '%s_IP' % mock_instance.id, 'dc': 'dc1', 'rack': 'rack1' }) return nodes trove-5.0.0/trove/tests/unittests/cluster/test_galera_cluster.py0000664000567000056710000003604312701410316026446 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from mock import Mock from mock import patch from novaclient import exceptions as nova_exceptions from trove.cluster.models import Cluster from trove.cluster.models import ClusterTasks from trove.cluster.models import DBCluster from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.strategies.cluster.experimental.galera_common import ( api as galera_api) from trove.instance import models as inst_models from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools CONF = cfg.CONF class FakeOptGroup(object): def __init__(self, min_cluster_member_count=3, volume_support=True, device_path='/dev/vdb'): self.min_cluster_member_count = min_cluster_member_count self.volume_support = volume_support self.device_path = device_path class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.get_client_patch = patch.object(task_api.API, 'get_client') self.get_client_mock = self.get_client_patch.start() self.addCleanup(self.get_client_patch.stop) self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = DBCluster(ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=ClusterTasks.NONE._code) self.context = trove_testtools.TroveTestContext(self) self.datastore = Mock() self.dv = Mock() self.dv.manager = "pxc" self.datastore_version = self.dv self.cluster = galera_api.GaleraCommonCluster( self.context, self.db_info, self.datastore, self.datastore_version) self.instances = [{'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}] def tearDown(self): super(ClusterTest, self).tearDown() def test_create_empty_instances(self): self.assertRaises(exception.ClusterNumInstancesNotLargeEnough, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, [], {}, ) def test_create_flavor_not_specified(self): instances = self.instances instances[0]['flavor_id'] = None self.assertRaises(exception.ClusterFlavorsNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {} ) @patch.object(remote, 'create_nova_client') def test_create_invalid_flavor_specified(self, mock_client): instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] (mock_client.return_value.flavors.get) = Mock( side_effect=nova_exceptions.NotFound( 404, "Flavor id not found %s" % id)) self.assertRaises(exception.FlavorNotFound, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {} ) @patch.object(remote, 'create_nova_client') def test_create_volume_no_specified(self, mock_client): instances = self.instances instances[0]['volume_size'] = None flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizeRequired, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {} ) @patch.object(remote, 'create_nova_client') @patch.object(galera_api, 'CONF') def test_create_storage_specified_with_no_volume_support(self, mock_conf, mock_client): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) instances = self.instances instances[0]['volume_size'] = None flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.VolumeNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {} ) @patch.object(remote, 'create_nova_client') @patch.object(galera_api, 'CONF') def test_create_storage_not_specified_and_no_ephemeral_flavor(self, mock_conf, mock_client): class FakeFlavor: def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 0 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.assertRaises(exception.LocalStorageNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {} ) @patch.object(remote, 'create_nova_client') def test_create_volume_not_equal(self, mock_client): instances = self.instances instances[0]['volume_size'] = 2 flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizesNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {} ) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_create(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create, mock_find_all): instances = self.instances flavors = Mock() networks = Mock() mock_client.return_value.flavors = flavors mock_client.return_value.networks = networks self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_create_over_limit(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create): instances = [{'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}] flavors = Mock() mock_client.return_value.flavors = flavors self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(4, mock_ins_create.call_count) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(galera_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_create_with_ephemeral_flavor(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create, mock_conf, mock_find_all): class FakeFlavor: def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 1 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch('trove.cluster.models.LOG') def test_delete_bad_task_status(self, mock_logging): self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL self.assertRaises(exception.UnprocessableEntity, self.cluster.delete) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_none(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.NONE self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_deleting(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.DELETING self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(galera_api.GaleraCommonCluster, '_get_cluster_network_interfaces') @patch.object(DBCluster, 'update') @patch.object(galera_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_grow(self, mock_client, mock_check_quotas, mock_task_api, mock_inst_create, mock_conf, mock_update, mock_interfaces): mock_client.return_value.flavors = Mock() mock_interfaces.return_value = [Mock()] self.cluster.grow(self.instances) mock_update.assert_called_with( task_status=ClusterTasks.GROWING_CLUSTER) mock_task_api.return_value.grow_cluster.assert_called_with( self.db_info.id, [mock_inst_create.return_value.id] * 3) self.assertEqual(3, mock_inst_create.call_count) self.assertEqual(1, mock_interfaces.call_count) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'load') @patch.object(Cluster, 'validate_cluster_available') def test_shrink_empty(self, mock_validate, mock_load, mock_find_all): instance = Mock() self.assertRaises( exception.ClusterShrinkMustNotLeaveClusterEmpty, self.cluster.shrink, [instance]) @patch.object(galera_api.GaleraCommonCluster, '__init__') @patch.object(task_api, 'load') @patch.object(DBCluster, 'update') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'load') @patch.object(Cluster, 'validate_cluster_available') def test_shrink(self, mock_validate, mock_load, mock_find_all, mock_update, mock_task_api, mock_init): mock_init.return_value = None existing_instances = [Mock(), Mock()] mock_find_all.return_value.all.return_value = existing_instances instance = Mock() self.cluster.shrink([instance]) mock_validate.assert_called_with() mock_update.assert_called_with( task_status=ClusterTasks.SHRINKING_CLUSTER) mock_task_api.return_value.shrink_cluster.assert_called_with( self.db_info.id, [mock_load.return_value.id]) mock_init.assert_called_with(self.context, self.db_info, self.datastore, self.datastore_version) trove-5.0.0/trove/tests/unittests/cluster/test_cluster_views.py0000664000567000056710000001600612701410316026345 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import MagicMock from mock import Mock from mock import patch from trove.cluster.views import ClusterInstanceDetailView from trove.cluster.views import ClusterView from trove.cluster.views import load_view from trove.common import cfg from trove.common.strategies.cluster.experimental.mongodb.api import ( MongoDbClusterView) from trove.tests.unittests import trove_testtools CONF = cfg.CONF class ClusterViewTest(trove_testtools.TestCase): def setUp(self): super(ClusterViewTest, self).setUp() self.cluster = Mock() self.cluster.created = 'Yesterday' self.cluster.updated = 'Now' self.cluster.name = 'cluster1' self.cluster.datastore_version = Mock() self.cluster.datastore_version.name = 'mysql_test_version' self.cluster.instances = [] self.cluster.instances.append(Mock()) self.cluster.instances[0].flavor_id = '123' self.cluster.instances[0].volume = Mock() self.cluster.instances[0].volume.size = 1 self.cluster.instances[0].slave_of_id = None self.cluster.instances[0].slaves = None def tearDown(self): super(ClusterViewTest, self).tearDown() @patch.object(ClusterView, 'build_instances', return_value=('10.0.0.1', [])) @patch.object(ClusterView, '_build_flavor_info') @patch.object(ClusterView, '_build_links') def test_data(self, mock_build_links, mock_build_flavor_info, mock_build_instances): mock_build_instances.return_value = Mock(), Mock() view = ClusterView(self.cluster, Mock()) result = view.data() self.assertEqual(self.cluster.created, result['cluster']['created']) self.assertEqual(self.cluster.updated, result['cluster']['updated']) self.assertEqual(self.cluster.name, result['cluster']['name']) self.assertEqual(self.cluster.datastore_version.name, result['cluster']['datastore']['version']) @patch.object(ClusterView, 'build_instances', return_value=('10.0.0.1', [])) @patch.object(ClusterView, '_build_flavor_info') @patch.object(ClusterView, '_build_links') def test_load_view(self, *args): cluster = Mock() cluster.datastore_version.manager = 'mongodb' view = load_view(cluster, Mock()) self.assertIsInstance(view, MongoDbClusterView) def test__build_instances(self, *args): cluster = Mock() cluster.instances = [] cluster.instances.append(Mock()) cluster.instances.append(Mock()) cluster.instances.append(Mock()) cluster.instances[0].type = 'configsvr' cluster.instances[0].get_visible_ip_addresses = lambda: ['1.2.3.4'] cluster.instances[0].datastore_version.manager = 'mongodb' cluster.instances[1].type = 'query_router' cluster.instances[1].get_visible_ip_addresses = lambda: ['1.2.3.4'] cluster.instances[1].datastore_version.manager = 'mongodb' cluster.instances[2].type = 'member' cluster.instances[2].get_visible_ip_addresses = lambda: ['1.2.3.4'] cluster.instances[2].datastore_version.manager = 'mongodb' def test_case(ip_to_be_published_for, instance_dict_to_be_published_for, number_of_ip_published, number_of_instance_dict_published): view = ClusterView(cluster, MagicMock()) instances, ip_list = view._build_instances( ip_to_be_published_for, instance_dict_to_be_published_for) self.assertEqual(number_of_ip_published, len(ip_list)) self.assertEqual(number_of_instance_dict_published, len(instances)) test_case([], [], 0, 0) test_case(['abc'], ['def'], 0, 0) test_case(['query_router'], ['member'], 1, 1) test_case(['query_router'], ['query_router', 'configsvr', 'member'], 1, 3) test_case(['query_router', 'member'], ['member'], 2, 1) class ClusterInstanceDetailViewTest(trove_testtools.TestCase): def setUp(self): super(ClusterInstanceDetailViewTest, self).setUp() self.instance = Mock() self.instance.created = 'Yesterday' self.instance.updated = 'Now' self.instance.datastore_version = Mock() self.instance.datastore_version.name = 'mysql_test_version' self.instance.hostname = 'test.trove.com' self.ip = "1.2.3.4" self.instance.addresses = {"private": [{"addr": self.ip}]} self.instance.volume_used = '3' self.instance.root_password = 'iloveyou' self.instance.get_visible_ip_addresses = lambda: ["1.2.3.4"] self.instance.slave_of_id = None self.instance.slaves = None def tearDown(self): super(ClusterInstanceDetailViewTest, self).tearDown() @patch.object(ClusterInstanceDetailView, '_build_links') @patch.object(ClusterInstanceDetailView, '_build_flavor_links') @patch.object(ClusterInstanceDetailView, '_build_configuration_info') def test_data(self, *args): view = ClusterInstanceDetailView(self.instance, Mock()) result = view.data() self.assertEqual(self.instance.created, result['instance']['created']) self.assertEqual(self.instance.updated, result['instance']['updated']) self.assertEqual(self.instance.datastore_version.name, result['instance']['datastore']['version']) self.assertEqual(self.instance.hostname, result['instance']['hostname']) self.assertNotIn('ip', result['instance']) @patch.object(ClusterInstanceDetailView, '_build_links') @patch.object(ClusterInstanceDetailView, '_build_flavor_links') @patch.object(ClusterInstanceDetailView, '_build_configuration_info') def test_data_ip(self, *args): self.instance.hostname = None view = ClusterInstanceDetailView(self.instance, Mock()) result = view.data() self.assertEqual(self.instance.created, result['instance']['created']) self.assertEqual(self.instance.updated, result['instance']['updated']) self.assertEqual(self.instance.datastore_version.name, result['instance']['datastore']['version']) self.assertNotIn('hostname', result['instance']) self.assertEqual([self.ip], result['instance']['ip']) trove-5.0.0/trove/tests/unittests/cluster/test_cluster_models.py0000664000567000056710000000271412701410316026474 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock, patch from trove.cluster import models from trove.common.strategies.cluster.experimental.mongodb.api import ( MongoDbCluster) from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterModel(trove_testtools.TestCase): @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(models.DBCluster, 'find_by') def test_load(self, mock_find_by, mock_load_dsv_by_uuid, mock_ds_load): context = trove_testtools.TroveTestContext(self) id = Mock() dsv = Mock() dsv.manager = 'mongodb' mock_load_dsv_by_uuid.return_value = dsv cluster = models.Cluster.load(context, id) self.assertIsInstance(cluster, MongoDbCluster) trove-5.0.0/trove/tests/unittests/cluster/test_cluster_controller.py0000664000567000056710000003513412701410316027376 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from mock import MagicMock from mock import Mock from mock import patch from testtools import TestCase from testtools.matchers import Is, Equals from trove.cluster import models from trove.cluster.models import Cluster, DBCluster from trove.cluster.service import ClusterController from trove.cluster.tasks import ClusterTasks from trove.cluster import views import trove.common.cfg as cfg from trove.common import exception from trove.common.strategies.cluster import strategy from trove.common import utils from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterController(TestCase): def setUp(self): super(TestClusterController, self).setUp() self.controller = ClusterController() instances = [ { "flavorRef": "7", "volume": { "size": 1 }, "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] * 5 self.cluster = { "cluster": { "name": "products", "datastore": { "type": "mongodb", "version": "2.4.10" }, "instances": instances } } self.add_shard = { "add_shard": {} } def test_get_schema_create(self): schema = self.controller.get_schema('create', self.cluster) self.assertIsNotNone(schema) self.assertTrue('cluster' in schema['properties']) self.assertTrue('cluster') def test_get_schema_action_add_shard(self): schema = self.controller.get_schema('add_shard', self.add_shard) self.assertIsNotNone(schema) self.assertTrue('add_shard' in schema['properties']) def test_validate_create(self): body = self.cluster schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_add_shard(self): body = self.add_shard schema = self.controller.get_schema('add_shard', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.cluster body['cluster']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.cluster body['cluster']['datastore']['type'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("type", error_paths) @patch.object(Cluster, 'create') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters_disabled(self, mock_get_datastore_version, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mysql' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.ClusterDatastoreNotSupported, self.controller.create, req, body, tenant_id) @patch.object(Cluster, 'create') @patch.object(utils, 'get_id_from_href') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters(self, mock_get_datastore_version, mock_id_from_href, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mongodb' datastore = Mock() mock_get_datastore_version.return_value = (datastore, datastore_version) instances = [ { 'volume_size': 1, 'volume_type': None, 'flavor_id': '1234', 'availability_zone': 'az', 'nics': [ {'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'} ] } ] * 5 mock_id_from_href.return_value = '1234' mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'mongodb' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) mock_cluster_create.assert_called_with(context, 'products', datastore, datastore_version, instances, {}) @patch.object(Cluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'mongodb' mock_cluster_load.return_value = mock_cluster self.controller.show(req, tenant_id, id) mock_cluster_load.assert_called_with(context, id) @patch.object(Cluster, 'load') @patch.object(Cluster, 'load_instance') def test_show_cluster_instance(self, mock_cluster_load_instance, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() instance_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) cluster = Mock() mock_cluster_load.return_value = cluster cluster.id = cluster_id self.controller.show_instance(req, tenant_id, cluster_id, instance_id) mock_cluster_load_instance.assert_called_with(context, cluster.id, instance_id) @patch.object(Cluster, 'load') def test_delete_cluster(self, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() req = MagicMock() cluster = Mock() trove_testtools.patch_notifier(self) mock_cluster_load.return_value = cluster self.controller.delete(req, tenant_id, cluster_id) cluster.delete.assert_called_with() class TestClusterControllerWithStrategy(TestCase): def setUp(self): super(TestClusterControllerWithStrategy, self).setUp() self.controller = ClusterController() self.cluster = { "cluster": { "name": "products", "datastore": { "type": "mongodb", "version": "2.4.10" }, "instances": [ { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, } ] } } def tearDown(self): super(TestClusterControllerWithStrategy, self).tearDown() cfg.CONF.clear_override('cluster_support', group='mongodb') cfg.CONF.clear_override('api_strategy', group='mongodb') @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_disabled(self, mock_cluster_create, mock_get_datastore_version): cfg.CONF.set_override('cluster_support', False, group='mongodb', enforce_type=True) body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mongodb' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.TroveError, self.controller.create, req, body, tenant_id) @patch.object(views.ClusterView, 'data', return_value={}) @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_enabled(self, mock_cluster_create, mock_get_datastore_version, mock_cluster_view_data): cfg.CONF.set_override('cluster_support', True, group='mongodb', enforce_type=True) body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mongodb' mock_get_datastore_version.return_value = (Mock(), datastore_version) mock_cluster = Mock() mock_cluster.datastore_version.manager = 'mongodb' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) @patch.object(models.Cluster, 'load') def test_controller_action_multi_action(self, mock_cluster_load): body = {'do_stuff': {}, 'do_stuff2': {}} tenant_id = Mock() context = trove_testtools.TroveTestContext(self) cluster_id = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) cluster = Mock() cluster.instances_without_server = [Mock()] cluster.datastore_version.manager = 'test_dsv' mock_cluster_load.return_value = cluster self.assertRaisesRegexp(exception.TroveError, 'should have exactly one action specified', self.controller.action, req, body, tenant_id, cluster_id) @patch.object(models.Cluster, 'load') def test_controller_action_no_strategy(self, mock_cluster_load): body = {'do_stuff2': {}} tenant_id = Mock() context = trove_testtools.TroveTestContext(self) cluster_id = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) db_info = DBCluster(ClusterTasks.NONE, id=cluster_id, tenant_id=tenant_id) cluster = Cluster(context, db_info, datastore='test_ds', datastore_version='test_dsv') mock_cluster_load.return_value = cluster self.assertRaisesRegexp(exception.TroveError, 'Action do_stuff2 not supported', self.controller.action, req, body, tenant_id, cluster_id) @patch.object(strategy, 'load_api_strategy') @patch.object(models.Cluster, 'load') def test_controller_action_found(self, mock_cluster_load, mock_cluster_api_strategy): body = {'grow': {}} tenant_id = Mock() context = trove_testtools.TroveTestContext(self) cluster_id = 'test_uuid' req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) cluster = Mock() cluster.instances_without_server = [Mock()] cluster.datastore_version.manager = 'test_dsv' mock_cluster_load.return_value = cluster self.controller.action(req, body, tenant_id, cluster_id) self.assertEqual(1, cluster.action.call_count) trove-5.0.0/trove/tests/unittests/cluster/test_redis_cluster.py0000664000567000056710000002674412701410316026330 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from mock import Mock from mock import patch from novaclient import exceptions as nova_exceptions from trove.cluster.models import Cluster from trove.cluster.models import ClusterTasks from trove.cluster.models import DBCluster from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.strategies.cluster.experimental.redis import api as redis_api from trove.instance import models as inst_models from trove.instance.models import DBInstance from trove.instance.models import InstanceTasks from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools CONF = cfg.CONF class FakeOptGroup(object): def __init__(self, cluster_member_count=3, volume_support=True, device_path='/dev/vdb'): self.cluster_member_count = cluster_member_count self.volume_support = volume_support self.device_path = device_path class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = DBCluster(ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=ClusterTasks.NONE._code) self.get_client_patch = patch.object(task_api.API, 'get_client') self.get_client_mock = self.get_client_patch.start() self.addCleanup(self.get_client_patch.stop) self.dbcreate_patch = patch.object(DBCluster, 'create', return_value=self.db_info) self.dbcreate_mock = self.dbcreate_patch.start() self.addCleanup(self.dbcreate_patch.stop) self.context = trove_testtools.TroveTestContext(self) self.datastore = Mock() self.dv = Mock() self.dv.manager = "redis" self.datastore_version = self.dv self.cluster = redis_api.RedisCluster(self.context, self.db_info, self.datastore, self.datastore_version) self.instances_w_volumes = [{'volume_size': 1, 'flavor_id': '1234'}] * 3 self.instances_no_volumes = [{'flavor_id': '1234'}] * 3 def tearDown(self): super(ClusterTest, self).tearDown() @patch.object(remote, 'create_nova_client') def test_create_invalid_flavor_specified(self, mock_client): (mock_client.return_value.flavors.get) = Mock( side_effect=nova_exceptions.NotFound( 404, "Flavor id not found %s" % id)) self.assertRaises(exception.FlavorNotFound, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_w_volumes, {}) @patch.object(remote, 'create_nova_client') @patch.object(redis_api, 'CONF') def test_create_volume_no_specified(self, mock_conf, mock_client): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=True)) self.assertRaises(exception.VolumeSizeNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_no_volumes, {}) @patch.object(remote, 'create_nova_client') @patch.object(redis_api, 'CONF') def test_create_storage_specified_with_no_volume_support(self, mock_conf, mock_client): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) mock_client.return_value.flavors = Mock() self.assertRaises(exception.VolumeNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_w_volumes, {}) @patch.object(remote, 'create_nova_client') @patch.object(redis_api, 'CONF') def test_create_storage_not_specified_and_no_ephemeral_flavor(self, mock_conf, mock_client): class FakeFlavor: def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 0 mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.assertRaises(exception.LocalStorageNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_no_volumes, {}) @patch.object(redis_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_create(self, mock_client, mock_check_quotas, mock_task_api, mock_ins_create, mock_conf): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=True)) mock_client.return_value.flavors = Mock() self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_w_volumes, {}) mock_task_api.return_value.create_cluster.assert_called_with( self.dbcreate_mock.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch.object(redis_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_create_with_ephemeral_flavor(self, mock_client, mock_check_quotas, mock_task_api, mock_ins_create, mock_conf): class FakeFlavor: def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 1 mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_no_volumes, {}) mock_task_api.return_value.create_cluster.assert_called_with( self.dbcreate_mock.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch.object(DBCluster, 'update') @patch.object(redis_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_grow(self, mock_client, mock_check_quotas, mock_task_api, mock_ins_create, mock_conf, mock_update): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=True)) mock_client.return_value.flavors = Mock() self.cluster.grow(self.instances_w_volumes) mock_task_api.return_value.grow_cluster.assert_called_with( self.dbcreate_mock.return_value.id, [mock_ins_create.return_value.id] * 3) self.assertEqual(3, mock_ins_create.call_count) @patch.object(DBInstance, 'find_all') @patch.object(Cluster, 'get_guest') @patch.object(DBCluster, 'update') @patch.object(inst_models.Instance, 'load') @patch.object(inst_models.Instance, 'delete') def test_shrink(self, mock_ins_delete, mock_ins_load, mock_update, mock_guest, mock_find_all): mock_find_all.return_value.all.return_value = [ DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, type="member")] self.cluster.shrink(['id1']) self.assertEqual(1, mock_ins_delete.call_count) @patch('trove.cluster.models.LOG') def test_delete_bad_task_status(self, mock_logging): self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL self.assertRaises(exception.UnprocessableEntity, self.cluster.delete) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_none(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.NONE self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_deleting(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.DELETING self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) trove-5.0.0/trove/tests/unittests/cluster/test_cluster_redis_controller.py0000664000567000056710000003044612701410316030565 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from mock import MagicMock from mock import Mock from mock import patch from testtools.matchers import Is, Equals from trove.cluster import models from trove.cluster.models import Cluster from trove.cluster.service import ClusterController from trove.cluster import views import trove.common.cfg as cfg from trove.common import exception from trove.common import utils from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.controller = ClusterController() instances = [ { "volume_size": None, "flavorRef": "7", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] }, { "volume_size": None, "flavorRef": "8", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] }, { "volume_size": None, "flavorRef": "7", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] self.cluster = { "cluster": { "name": "products", "datastore": { "type": "redis", "version": "3.0" }, "instances": instances } } def test_get_schema_create(self): schema = self.controller.get_schema('create', self.cluster) self.assertIsNotNone(schema) self.assertTrue('cluster' in schema['properties']) self.assertTrue('cluster') def test_validate_create(self): body = self.cluster schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.cluster body['cluster']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.cluster body['cluster']['datastore']['type'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("type", error_paths) @patch.object(Cluster, 'create') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters_disabled(self, mock_get_datastore_version, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mysql' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaisesRegexp(exception.ClusterDatastoreNotSupported, "Clusters not supported for", self.controller.create, req, body, tenant_id) @patch.object(Cluster, 'create') @patch.object(utils, 'get_id_from_href') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters(self, mock_get_datastore_version, mock_id_from_href, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'redis' datastore = Mock() mock_get_datastore_version.return_value = (datastore, datastore_version) instances = [ { "volume_size": None, 'volume_type': None, "flavor_id": "1234", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] }, { "volume_size": None, 'volume_type': None, "flavor_id": "1234", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] }, { "volume_size": None, 'volume_type': None, "flavor_id": "1234", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] mock_id_from_href.return_value = '1234' mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'redis' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) mock_cluster_create.assert_called_with(context, 'products', datastore, datastore_version, instances, {}) @patch.object(Cluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'redis' mock_cluster_load.return_value = mock_cluster self.controller.show(req, tenant_id, id) mock_cluster_load.assert_called_with(context, id) @patch.object(Cluster, 'load') @patch.object(Cluster, 'load_instance') def test_show_cluster_instance(self, mock_cluster_load_instance, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() instance_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) cluster = Mock() mock_cluster_load.return_value = cluster cluster.id = cluster_id self.controller.show_instance(req, tenant_id, cluster_id, instance_id) mock_cluster_load_instance.assert_called_with(context, cluster.id, instance_id) @patch.object(Cluster, 'load') def test_delete_cluster(self, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() req = MagicMock() cluster = Mock() trove_testtools.patch_notifier(self) mock_cluster_load.return_value = cluster self.controller.delete(req, tenant_id, cluster_id) cluster.delete.assert_called_with() class TestClusterControllerWithStrategy(trove_testtools.TestCase): def setUp(self): super(TestClusterControllerWithStrategy, self).setUp() self.controller = ClusterController() self.cluster = { "cluster": { "name": "products", "datastore": { "type": "redis", "version": "3.0" }, "instances": [ { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, ] } } def tearDown(self): super(TestClusterControllerWithStrategy, self).tearDown() cfg.CONF.clear_override('cluster_support', group='redis') cfg.CONF.clear_override('api_strategy', group='redis') @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_disabled(self, mock_cluster_create, mock_get_datastore_version): cfg.CONF.set_override('cluster_support', False, group='redis', enforce_type=True) body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'redis' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaisesRegexp(exception.TroveError, "Clusters not supported for", self.controller.create, req, body, tenant_id) @patch.object(views.ClusterView, 'data', return_value={}) @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_enabled(self, mock_cluster_create, mock_get_datastore_version, mock_cluster_view_data): cfg.CONF.set_override('cluster_support', True, group='redis', enforce_type=True) body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'redis' mock_get_datastore_version.return_value = (Mock(), datastore_version) mock_cluster = Mock() mock_cluster.datastore_version.manager = 'redis' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) trove-5.0.0/trove/tests/unittests/cluster/test_vertica_cluster.py0000664000567000056710000003177512701410316026657 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from mock import Mock from mock import patch from novaclient import exceptions as nova_exceptions from trove.cluster.models import Cluster from trove.cluster.models import ClusterTasks from trove.cluster.models import DBCluster from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.strategies.cluster.experimental.vertica import ( api as vertica_api) from trove.instance import models as inst_models from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools CONF = cfg.CONF class FakeOptGroup(object): def __init__(self, cluster_member_count=3, volume_support=True, device_path='/dev/vdb'): self.cluster_member_count = cluster_member_count self.volume_support = volume_support self.device_path = device_path class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.get_client_patch = patch.object(task_api.API, 'get_client') self.get_client_mock = self.get_client_patch.start() self.addCleanup(self.get_client_patch.stop) self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = DBCluster(ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=ClusterTasks.NONE._code) self.context = trove_testtools.TroveTestContext(self) self.datastore = Mock() self.dv = Mock() self.dv.manager = "vertica" self.datastore_version = self.dv self.cluster = vertica_api.VerticaCluster(self.context, self.db_info, self.datastore, self.datastore_version) self.instances = [{'volume_size': 1, 'flavor_id': '1234', 'instance_type': 'master'}, {'volume_size': 1, 'flavor_id': '1234', 'instance_type': 'member'}, {'volume_size': 1, 'flavor_id': '1234', 'instance_type': 'member'}] self.db_instances = [1, 2, 3] def tearDown(self): super(ClusterTest, self).tearDown() @patch.object(inst_models.DBInstance, 'find_all') def test_create_empty_instances(self, *args): self.assertRaises(exception.ClusterNumInstancesNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, [], None) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') def test_create_flavor_not_specified(self, *args): instances = self.instances instances[0]['flavor_id'] = None self.assertRaises(exception.ClusterFlavorsNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(remote, 'create_nova_client') def test_create_invalid_flavor_specified(self, mock_client, mock_find_all, mock_create): instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] (mock_client.return_value.flavors.get) = Mock( side_effect=nova_exceptions.NotFound( 404, "Flavor id not found %s" % id)) self.assertRaises(exception.FlavorNotFound, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(remote, 'create_nova_client') def test_create_volume_no_specified(self, mock_client, mock_find_all, mock_create): instances = self.instances instances[0]['volume_size'] = None flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizeRequired, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(remote, 'create_nova_client') @patch.object(vertica_api, 'CONF') def test_create_storage_specified_with_no_volume_support(self, mock_conf, mock_client, mock_find_all, mock_create): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) instances = self.instances instances[0]['volume_size'] = None flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.VolumeNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(remote, 'create_nova_client') @patch.object(vertica_api, 'CONF') def test_create_storage_not_specified_and_no_ephemeral_flavor(self, mock_conf, mock_client, m_find_all, mock_create): class FakeFlavor: def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 0 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.assertRaises(exception.LocalStorageNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(remote, 'create_nova_client') def test_create_volume_not_equal(self, mock_client, mock_find_all, mock_create): instances = self.instances instances[0]['volume_size'] = 2 flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizesNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None ) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_create(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create, mock_find_all): instances = self.instances flavors = Mock() mock_client.return_value.flavors = flavors self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(vertica_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(remote, 'create_nova_client') def test_create_with_ephemeral_flavor(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create, mock_conf, mock_find_all): class FakeFlavor: def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 1 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch('trove.cluster.models.LOG') def test_delete_bad_task_status(self, mock_logging): self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL self.assertRaises(exception.UnprocessableEntity, self.cluster.delete) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_none(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.NONE self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_deleting(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.DELETING self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) trove-5.0.0/trove/tests/unittests/__init__.py0000664000567000056710000000000012701410316022451 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/conductor/0000775000567000056710000000000012701410521022350 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/conductor/__init__.py0000664000567000056710000000000012701410316024451 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/conductor/test_conf.py0000664000567000056710000000473412701410316024720 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from mock import patch from oslo_service import service as os_service from trove.cmd import common as common_cmd from trove.cmd import conductor as conductor_cmd import trove.common.cfg as cfg import trove.tests.fakes.conf as fake_conf from trove.tests.unittests import trove_testtools CONF = cfg.CONF TROVE_UT = 'trove.tests.unittests' def mocked_conf(manager): return fake_conf.FakeConf({ 'conductor_queue': 'conductor', 'conductor_manager': manager, 'trove_conductor_workers': 1, 'host': 'mockhost', 'report_interval': 1}) class NoopManager(object): RPC_API_VERSION = 1.0 class ConductorConfTests(trove_testtools.TestCase): def setUp(self): super(ConductorConfTests, self).setUp() def tearDown(self): super(ConductorConfTests, self).tearDown() def _test_manager(self, conf, rt_mgr_name): def mock_launch(conf, server, workers): qualified_mgr = "%s.%s" % (server.manager_impl.__module__, server.manager_impl.__class__.__name__) self.assertEqual(rt_mgr_name, qualified_mgr, "Invalid manager") return MagicMock() os_service.launch = mock_launch with patch.object(common_cmd, 'initialize', MagicMock(return_value=conf)): conductor_cmd.main() def test_user_defined_manager(self): qualified_mgr = TROVE_UT + ".conductor.test_conf.NoopManager" self._test_manager(mocked_conf(qualified_mgr), qualified_mgr) def test_default_manager(self): qualified_mgr = "trove.conductor.manager.Manager" self._test_manager(CONF, qualified_mgr) def test_invalid_manager(self): self.assertRaises(ImportError, self._test_manager, mocked_conf('foo.bar.MissingMgr'), 'foo.bar.MissingMgr') trove-5.0.0/trove/tests/unittests/conductor/test_methods.py0000664000567000056710000002003712701410316025430 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import patch from trove.backup import models as bkup_models from trove.backup import state from trove.common import exception as t_exception from trove.common.instance import ServiceStatuses from trove.common import utils from trove.conductor import manager as conductor_manager from trove.guestagent.common import timeutils from trove.instance import models as t_models from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util # See LP bug #1255178 OLD_DBB_SAVE = bkup_models.DBBackup.save class ConductorMethodTests(trove_testtools.TestCase): def setUp(self): # See LP bug #1255178 bkup_models.DBBackup.save = OLD_DBB_SAVE super(ConductorMethodTests, self).setUp() util.init_db() self.cond_mgr = conductor_manager.Manager() self.instance_id = utils.generate_uuid() def tearDown(self): super(ConductorMethodTests, self).tearDown() def _create_iss(self): new_id = utils.generate_uuid() iss = t_models.InstanceServiceStatus( id=new_id, instance_id=self.instance_id, status=ServiceStatuses.NEW) iss.save() return new_id def _get_iss(self, id): return t_models.InstanceServiceStatus.find_by(id=id) def _create_backup(self, name='fake backup'): new_id = utils.generate_uuid() backup = bkup_models.DBBackup.create( id=new_id, name=name, description='This is a fake backup object.', tenant_id=utils.generate_uuid(), state=state.BackupState.NEW, instance_id=self.instance_id) backup.save() return new_id def _get_backup(self, id): return bkup_models.DBBackup.find_by(id=id) # --- Tests for heartbeat --- def test_heartbeat_instance_not_found(self): new_id = utils.generate_uuid() self.assertRaises(t_exception.ModelNotFoundError, self.cond_mgr.heartbeat, None, new_id, {}) @patch('trove.conductor.manager.LOG') def test_heartbeat_instance_no_changes(self, mock_logging): iss_id = self._create_iss() old_iss = self._get_iss(iss_id) self.cond_mgr.heartbeat(None, self.instance_id, {}) new_iss = self._get_iss(iss_id) self.assertEqual(old_iss.status_id, new_iss.status_id) self.assertEqual(old_iss.status_description, new_iss.status_description) @patch('trove.conductor.manager.LOG') def test_heartbeat_instance_status_bogus_change(self, mock_logging): iss_id = self._create_iss() old_iss = self._get_iss(iss_id) new_status = 'potato salad' payload = { 'service_status': new_status, } self.assertRaises(ValueError, self.cond_mgr.heartbeat, None, self.instance_id, payload) new_iss = self._get_iss(iss_id) self.assertEqual(old_iss.status_id, new_iss.status_id) self.assertEqual(old_iss.status_description, new_iss.status_description) @patch('trove.conductor.manager.LOG') def test_heartbeat_instance_status_changed(self, mock_logging): iss_id = self._create_iss() payload = {'service_status': ServiceStatuses.BUILDING.description} self.cond_mgr.heartbeat(None, self.instance_id, payload) iss = self._get_iss(iss_id) self.assertEqual(ServiceStatuses.BUILDING, iss.status) # --- Tests for update_backup --- def test_backup_not_found(self): new_bkup_id = utils.generate_uuid() self.assertRaises(t_exception.ModelNotFoundError, self.cond_mgr.update_backup, None, self.instance_id, new_bkup_id) @patch('trove.conductor.manager.LOG') def test_backup_instance_id_nomatch(self, mock_logging): new_iid = utils.generate_uuid() bkup_id = self._create_backup('nomatch') old_name = self._get_backup(bkup_id).name self.cond_mgr.update_backup(None, new_iid, bkup_id, name="remains unchanged") bkup = self._get_backup(bkup_id) self.assertEqual(old_name, bkup.name) @patch('trove.conductor.manager.LOG') def test_backup_bogus_fields_not_changed(self, mock_logging): bkup_id = self._create_backup('bogus') self.cond_mgr.update_backup(None, self.instance_id, bkup_id, not_a_valid_field="INVALID") bkup = self._get_backup(bkup_id) self.assertFalse(hasattr(bkup, 'not_a_valid_field')) @patch('trove.conductor.manager.LOG') def test_backup_real_fields_changed(self, mock_logging): bkup_id = self._create_backup('realrenamed') new_name = "recently renamed" self.cond_mgr.update_backup(None, self.instance_id, bkup_id, name=new_name) bkup = self._get_backup(bkup_id) self.assertEqual(new_name, bkup.name) # --- Tests for discarding old messages --- @patch('trove.conductor.manager.LOG') def test_heartbeat_newer_timestamp_accepted(self, mock_logging): new_p = {'service_status': ServiceStatuses.NEW.description} build_p = {'service_status': ServiceStatuses.BUILDING.description} iss_id = self._create_iss() iss = self._get_iss(iss_id) now = timeutils.float_utcnow() future = now + 60 self.cond_mgr.heartbeat(None, self.instance_id, new_p, sent=now) self.cond_mgr.heartbeat(None, self.instance_id, build_p, sent=future) iss = self._get_iss(iss_id) self.assertEqual(ServiceStatuses.BUILDING, iss.status) @patch('trove.conductor.manager.LOG') def test_heartbeat_older_timestamp_discarded(self, mock_logging): new_p = {'service_status': ServiceStatuses.NEW.description} build_p = {'service_status': ServiceStatuses.BUILDING.description} iss_id = self._create_iss() iss = self._get_iss(iss_id) now = timeutils.float_utcnow() past = now - 60 self.cond_mgr.heartbeat(None, self.instance_id, new_p, sent=past) self.cond_mgr.heartbeat(None, self.instance_id, build_p, sent=past) iss = self._get_iss(iss_id) self.assertEqual(ServiceStatuses.NEW, iss.status) def test_backup_newer_timestamp_accepted(self): old_name = "oldname" new_name = "renamed" bkup_id = self._create_backup(old_name) bkup = self._get_backup(bkup_id) now = timeutils.float_utcnow() future = now + 60 self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=now, name=old_name) self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=future, name=new_name) bkup = self._get_backup(bkup_id) self.assertEqual(new_name, bkup.name) def test_backup_older_timestamp_discarded(self): old_name = "oldname" new_name = "renamed" bkup_id = self._create_backup(old_name) bkup = self._get_backup(bkup_id) now = timeutils.float_utcnow() past = now - 60 self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=now, name=old_name) self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=past, name=new_name) bkup = self._get_backup(bkup_id) self.assertEqual(old_name, bkup.name) trove-5.0.0/trove/tests/unittests/secgroups/0000775000567000056710000000000012701410521022362 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/secgroups/__init__.py0000664000567000056710000000000012701410316024463 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/secgroups/test_security_group.py0000664000567000056710000001442012701410316027061 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from mock import Mock from mock import patch from novaclient import exceptions as nova_exceptions from trove.common import exception import trove.common.remote from trove.extensions.security_group import models as sec_mod from trove.instance import models as inst_model from trove.tests.fakes import nova from trove.tests.unittests import trove_testtools """ Unit tests for testing the exceptions raised by Security Groups """ class Security_Group_Exceptions_Test(trove_testtools.TestCase): def setUp(self): super(Security_Group_Exceptions_Test, self).setUp() self.createNovaClient = trove.common.remote.create_nova_client self.context = trove_testtools.TroveTestContext(self) self.FakeClient = nova.fake_create_nova_client(self.context) fException = Mock( side_effect=lambda *args, **kwargs: self._raise( nova_exceptions.ClientException("Test"))) self.FakeClient.security_groups.create = fException self.FakeClient.security_groups.delete = fException self.FakeClient.security_group_rules.create = fException self.FakeClient.security_group_rules.delete = fException trove.common.remote.create_nova_client = ( lambda c: self._return_mocked_nova_client(c)) def tearDown(self): super(Security_Group_Exceptions_Test, self).tearDown() trove.common.remote.create_nova_client = self.createNovaClient def _return_mocked_nova_client(self, context): return self.FakeClient def _raise(self, ex): raise ex @patch('trove.network.nova.LOG') def test_failed_to_create_security_group(self, mock_logging): self.assertRaises(exception.SecurityGroupCreationError, sec_mod.RemoteSecurityGroup.create, "TestName", "TestDescription", self.context) @patch('trove.network.nova.LOG') def test_failed_to_delete_security_group(self, mock_logging): self.assertRaises(exception.SecurityGroupDeletionError, sec_mod.RemoteSecurityGroup.delete, 1, self.context) @patch('trove.network.nova.LOG') def test_failed_to_create_security_group_rule(self, mock_logging): self.assertRaises(exception.SecurityGroupRuleCreationError, sec_mod.RemoteSecurityGroup.add_rule, 1, "tcp", 3306, 3306, "0.0.0.0/0", self.context) @patch('trove.network.nova.LOG') def test_failed_to_delete_security_group_rule(self, mock_logging): self.assertRaises(exception.SecurityGroupRuleDeletionError, sec_mod.RemoteSecurityGroup.delete_rule, 1, self.context) class fake_RemoteSecGr(object): def data(self): self.id = uuid.uuid4() return {'id': self.id} def delete(self, context): pass class fake_SecGr_Association(object): def get_security_group(self): return fake_RemoteSecGr() def delete(self): pass class SecurityGroupDeleteTest(trove_testtools.TestCase): def setUp(self): super(SecurityGroupDeleteTest, self).setUp() self.inst_model_conf_patch = patch.object(inst_model, 'CONF') self.inst_model_conf_mock = self.inst_model_conf_patch.start() self.addCleanup(self.inst_model_conf_patch.stop) self.context = trove_testtools.TroveTestContext(self) self.original_find_by = ( sec_mod.SecurityGroupInstanceAssociation.find_by) self.original_delete = sec_mod.SecurityGroupInstanceAssociation.delete self.fException = Mock( side_effect=lambda *args, **kwargs: self._raise( exception.ModelNotFoundError())) def tearDown(self): super(SecurityGroupDeleteTest, self).tearDown() (sec_mod.SecurityGroupInstanceAssociation. find_by) = self.original_find_by (sec_mod.SecurityGroupInstanceAssociation. delete) = self.original_delete def _raise(self, ex): raise ex def test_failed_to_get_assoc_on_delete(self): sec_mod.SecurityGroupInstanceAssociation.find_by = self.fException self.assertIsNone( sec_mod.SecurityGroup.delete_for_instance( uuid.uuid4(), self.context)) def test_get_security_group_from_assoc_with_db_exception(self): fException = Mock( side_effect=lambda *args, **kwargs: self._raise( nova_exceptions.ClientException('TEST'))) i_id = uuid.uuid4() class new_fake_RemoteSecGrAssoc(object): def get_security_group(self): return None def delete(self): return fException sec_mod.SecurityGroupInstanceAssociation.find_by = Mock( return_value=new_fake_RemoteSecGrAssoc()) self.assertIsNone( sec_mod.SecurityGroup.delete_for_instance( i_id, self.context)) def test_delete_secgr_assoc_with_db_exception(self): i_id = uuid.uuid4() sec_mod.SecurityGroupInstanceAssociation.find_by = Mock( return_value=fake_SecGr_Association()) sec_mod.SecurityGroupInstanceAssociation.delete = self.fException self.assertNotEqual(sec_mod.SecurityGroupInstanceAssociation.find_by( i_id, deleted=False).get_security_group(), None) self.assertTrue(hasattr(sec_mod.SecurityGroupInstanceAssociation. find_by(i_id, deleted=False). get_security_group(), 'delete')) self.assertIsNone( sec_mod.SecurityGroup.delete_for_instance( i_id, self.context)) trove-5.0.0/trove/tests/unittests/network/0000775000567000056710000000000012701410521022041 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/network/__init__.py0000664000567000056710000000000012701410316024142 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/network/test_neutron_driver.py0000664000567000056710000001256212701410316026527 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import MagicMock from mock import Mock, patch from neutronclient.common import exceptions as neutron_exceptions from neutronclient.v2_0 import client as NeutronClient from trove.common import exception from trove.common.models import NetworkRemoteModelBase from trove.common import remote from trove.extensions.security_group.models import RemoteSecurityGroup from trove.network import neutron from trove.network.neutron import NeutronDriver as driver from trove.tests.unittests import trove_testtools class NeutronDriverTest(trove_testtools.TestCase): def setUp(self): super(NeutronDriverTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.orig_neutron_driver = NetworkRemoteModelBase.get_driver self.orig_create_sg = driver.create_security_group self.orig_add_sg_rule = driver.add_security_group_rule self.orig_del_sg_rule = driver.delete_security_group_rule self.orig_del_sg = driver.delete_security_group NetworkRemoteModelBase.get_driver = Mock(return_value=driver) def tearDown(self): super(NeutronDriverTest, self).tearDown() NetworkRemoteModelBase.get_driver = self.orig_neutron_driver driver.create_security_group = self.orig_create_sg driver.add_security_group_rule = self.orig_add_sg_rule driver.delete_security_group_rule = self.orig_del_sg_rule driver.delete_security_group = self.orig_del_sg def test_create_security_group(self): driver.create_security_group = Mock() RemoteSecurityGroup.create(name=Mock(), description=Mock(), context=self.context) self.assertEqual(1, driver.create_security_group.call_count) def test_add_security_group_rule(self): driver.add_security_group_rule = Mock() RemoteSecurityGroup.add_rule(sec_group_id=Mock(), protocol=Mock(), from_port=Mock(), to_port=Mock(), cidr=Mock(), context=self.context) self.assertEqual(1, driver.add_security_group_rule.call_count) def test_delete_security_group_rule(self): driver.delete_security_group_rule = Mock() RemoteSecurityGroup.delete_rule(sec_group_rule_id=Mock(), context=self.context) self.assertEqual(1, driver.delete_security_group_rule.call_count) def test_delete_security_group(self): driver.delete_security_group = Mock() RemoteSecurityGroup.delete(sec_group_id=Mock(), context=self.context) self.assertEqual(1, driver.delete_security_group.call_count) class NeutronDriverExceptionTest(trove_testtools.TestCase): def setUp(self): super(NeutronDriverExceptionTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.orig_neutron_driver = NetworkRemoteModelBase.get_driver self.orig_NeutronClient = NeutronClient.Client self.orig_get_endpoint = remote.get_endpoint remote.get_endpoint = MagicMock(return_value="neutron_url") mock_driver = neutron.NeutronDriver(self.context) NetworkRemoteModelBase.get_driver = MagicMock( return_value=mock_driver) NeutronClient.Client = Mock( side_effect=neutron_exceptions.NeutronClientException()) def tearDown(self): super(NeutronDriverExceptionTest, self).tearDown() NetworkRemoteModelBase.get_driver = self.orig_neutron_driver NeutronClient.Client = self.orig_NeutronClient remote.get_endpoint = self.orig_get_endpoint @patch('trove.network.neutron.LOG') def test_create_sg_with_exception(self, mock_logging): self.assertRaises(exception.SecurityGroupCreationError, RemoteSecurityGroup.create, "sg_name", "sg_desc", self.context) @patch('trove.network.neutron.LOG') def test_add_sg_rule_with_exception(self, mock_logging): self.assertRaises(exception.SecurityGroupRuleCreationError, RemoteSecurityGroup.add_rule, "12234", "tcp", "22", "22", "0.0.0.0/8", self.context) @patch('trove.network.neutron.LOG') def test_delete_sg_rule_with_exception(self, mock_logging): self.assertRaises(exception.SecurityGroupRuleDeletionError, RemoteSecurityGroup.delete_rule, "12234", self.context) @patch('trove.network.neutron.LOG') def test_delete_sg_with_exception(self, mock_logging): self.assertRaises(exception.SecurityGroupDeletionError, RemoteSecurityGroup.delete, "123445", self.context) trove-5.0.0/trove/tests/unittests/module/0000775000567000056710000000000012701410521021635 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/module/test_module_views.py0000664000567000056710000000541012701410316025752 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock, patch from trove.datastore import models from trove.module.views import DetailedModuleView from trove.tests.unittests import trove_testtools class ModuleViewsTest(trove_testtools.TestCase): def setUp(self): super(ModuleViewsTest, self).setUp() def tearDown(self): super(ModuleViewsTest, self).tearDown() class DetailedModuleViewTest(trove_testtools.TestCase): def setUp(self): super(DetailedModuleViewTest, self).setUp() self.module = Mock() self.module.name = 'test_module' self.module.type = 'test' self.module.md5 = 'md5-hash' self.module.created = 'Yesterday' self.module.updated = 'Now' self.module.datastore = 'mysql' self.module.datastore_version = '5.6' self.module.auto_apply = False self.module.tenant_id = 'my_tenant' def tearDown(self): super(DetailedModuleViewTest, self).tearDown() def test_data(self): datastore = Mock() datastore.name = self.module.datastore ds_version = Mock() ds_version.name = self.module.datastore_version with patch.object(models, 'get_datastore_version', Mock(return_value=(datastore, ds_version))): view = DetailedModuleView(self.module) result = view.data() self.assertEqual(self.module.name, result['module']['name']) self.assertEqual(self.module.type, result['module']['type']) self.assertEqual(self.module.md5, result['module']['md5']) self.assertEqual(self.module.created, result['module']['created']) self.assertEqual(self.module.updated, result['module']['updated']) self.assertEqual(self.module.datastore_version, result['module']['datastore_version']) self.assertEqual(self.module.datastore, result['module']['datastore']) self.assertEqual(self.module.auto_apply, result['module']['auto_apply']) self.assertEqual(self.module.tenant_id, result['module']['tenant_id']) trove-5.0.0/trove/tests/unittests/module/__init__.py0000664000567000056710000000000012701410316023736 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/module/test_module_controller.py0000664000567000056710000000604712701410316027007 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from testtools.matchers import Is, Equals from trove.module.service import ModuleController from trove.tests.unittests import trove_testtools class TestModuleController(trove_testtools.TestCase): def setUp(self): super(TestModuleController, self).setUp() self.controller = ModuleController() self.module = { "module": { "name": 'test_module', "module_type": 'test', "contents": 'my_contents\n', } } def verify_errors(self, errors, msg=None, properties=None, path=None): msg = msg or [] properties = properties or [] self.assertThat(len(errors), Is(len(msg))) i = 0 while i < len(msg): self.assertIn(errors[i].message, msg) if path: self.assertThat(path, Equals(properties[i])) else: self.assertThat(errors[i].path.pop(), Equals(properties[i])) i += 1 def test_get_schema_create(self): schema = self.controller.get_schema('create', {'module': {}}) self.assertIsNotNone(schema) self.assertTrue('module' in schema['properties']) def test_validate_create_complete(self): body = self.module schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.module body['module']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_invalid_name(self): body = self.module body['module']['name'] = "$#$%^^" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("'$#$%^^' does not match '^.*[0-9a-zA-Z]+.*$'", errors[0].message) trove-5.0.0/trove/tests/unittests/module/test_module_models.py0000664000567000056710000000317612701410316026107 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock, patch from trove.common import cfg from trove.module import models from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util CONF = cfg.CONF class CreateModuleTest(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def setUp(self): util.init_db() self.context = Mock() self.name = "name" self.module_type = 'ping' self.contents = 'my_contents\n' super(CreateModuleTest, self).setUp() @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def tearDown(self): super(CreateModuleTest, self).tearDown() def test_can_create_module(self): module = models.Module.create( self.context, self.name, self.module_type, self.contents, 'my desc', 'my_tenant', None, None, False, True, False) self.assertIsNotNone(module) module.delete() trove-5.0.0/trove/tests/unittests/dns/0000775000567000056710000000000012701410521021134 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/dns/test_designate_driver.py0000664000567000056710000002257012701410316026073 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import hashlib from designateclient.v1.domains import Domain from designateclient.v1.records import Record from mock import MagicMock from mock import patch from trove.dns.designate import driver from trove.tests.unittests import trove_testtools class DesignateObjectConverterTest(trove_testtools.TestCase): def setUp(self): super(DesignateObjectConverterTest, self).setUp() def tearDown(self): super(DesignateObjectConverterTest, self).tearDown() def test_convert_domain_to_zone(self): name = 'www.example.com' id = '39413651-3b9e-41f1-a4df-e47d5e9f67be' email = 'john.smith@openstack.com' domain = Domain(name=name, id=id, email=email) converter = driver.DesignateObjectConverter() converted_domain = converter.domain_to_zone(domain) self.assertEqual(name, converted_domain.name) self.assertEqual(id, converted_domain.id) def test_convert_record_to_entry(self): name = 'test.example.com' id = '4f3439ef-fc8b-4098-a1aa-a66ed01102b9' domain_id = '39413651-3b9e-41f1-a4df-e47d5e9f67be' domain_name = 'example.com' type = 'CNAME' data = '127.0.0.1' ttl = 3600 priority = 1 zone = driver.DesignateDnsZone(domain_id, domain_name) record = Record(name=name, id=id, domain_id=domain_id, type=type, data=data, priority=priority, ttl=ttl) converter = driver.DesignateObjectConverter() converted_record = converter.record_to_entry(record, zone) self.assertEqual(name, converted_record.name) self.assertEqual(data, converted_record.content) self.assertEqual(type, converted_record.type) self.assertEqual(priority, converted_record.priority) self.assertEqual(ttl, converted_record.ttl) self.assertEqual(zone, converted_record.dns_zone) class DesignateDriverTest(trove_testtools.TestCase): def setUp(self): super(DesignateDriverTest, self).setUp() self.domains = [Domain(name='www.example.com', id='11111111-1111-1111-1111-111111111111', email='test@example.com'), Domain(name='www.trove.com', id='22222222-2222-2222-2222-222222222222', email='test@trove.com'), Domain(name='www.openstack.com', id='33333333-3333-3333-3333-333333333333', email='test@openstack.com')] self.records = [Record(name='record1', type='A', data='10.0.0.1', ttl=3600, priority=1), Record(name='record2', type='CNAME', data='10.0.0.2', ttl=1800, priority=2), Record(name='record3', type='A', data='10.0.0.3', ttl=3600, priority=1)] self.create_des_client_patch = patch.object( driver, 'create_designate_client', MagicMock(return_value=None)) self.create_des_client_mock = self.create_des_client_patch.start() self.addCleanup(self.create_des_client_patch.stop) def tearDown(self): super(DesignateDriverTest, self).tearDown() def test_get_entries_by_name(self): zone = driver.DesignateDnsZone('123', 'www.example.com') with patch.object(driver.DesignateDriver, '_get_records', MagicMock(return_value=self.records)): dns_driver = driver.DesignateDriver() entries = dns_driver.get_entries_by_name('record2', zone) self.assertEqual(1, len(entries), 'More than one record found') entry = entries[0] self.assertEqual('record2', entry.name) self.assertEqual('CNAME', entry.type) self.assertEqual('10.0.0.2', entry.content) self.assertEqual(1800, entry.ttl) self.assertEqual(2, entry.priority) zone = entry.dns_zone self.assertEqual('123', zone.id) self.assertEqual('www.example.com', zone.name) def test_get_entries_by_name_not_found(self): zone = driver.DesignateDnsZone('123', 'www.example.com') with patch.object(driver.DesignateDriver, '_get_records', MagicMock(return_value=self.records)): dns_driver = driver.DesignateDriver() entries = dns_driver.get_entries_by_name('record_not_found', zone) self.assertEqual(0, len(entries), 'Some records were returned') def test_get_entries_by_content(self): zone = driver.DesignateDnsZone('123', 'www.example.com') with patch.object(driver.DesignateDriver, '_get_records', MagicMock(return_value=self.records)): dns_driver = driver.DesignateDriver() entries = dns_driver.get_entries_by_content('10.0.0.1', zone) self.assertEqual(1, len(entries), 'More than one record found') entry = entries[0] self.assertEqual('record1', entry.name) self.assertEqual('A', entry.type) self.assertEqual('10.0.0.1', entry.content) self.assertEqual(3600, entry.ttl) self.assertEqual(1, entry.priority) zone = entry.dns_zone self.assertEqual('123', zone.id) self.assertEqual('www.example.com', zone.name) def test_get_entries_by_content_not_found(self): zone = driver.DesignateDnsZone('123', 'www.example.com') with patch.object(driver.DesignateDriver, '_get_records', MagicMock(return_value=self.records)): dns_driver = driver.DesignateDriver() entries = dns_driver.get_entries_by_content('127.0.0.1', zone) self.assertEqual(0, len(entries), 'Some records were returned') def test_get_dnz_zones(self): client = MagicMock() self.create_des_client_mock.return_value = client client.domains.list = MagicMock(return_value=self.domains) dns_driver = driver.DesignateDriver() zones = dns_driver.get_dns_zones() self.assertEqual(3, len(zones)) for x in range(0, 3): self.assertDomainsAreEqual(self.domains[x], zones[x]) def test_get_dnz_zones_by_name(self): client = MagicMock() self.create_des_client_mock.return_value = client client.domains.list = MagicMock(return_value=self.domains) dns_driver = driver.DesignateDriver() zones = dns_driver.get_dns_zones('www.trove.com') self.assertEqual(1, len(zones)) self.assertDomainsAreEqual(self.domains[1], zones[0]) def test_get_dnz_zones_not_found(self): client = MagicMock() self.create_des_client_mock.return_value = client client.domains.list = MagicMock(return_value=self.domains) dns_driver = driver.DesignateDriver() zones = dns_driver.get_dns_zones('www.notfound.com') self.assertEqual(0, len(zones)) def assertDomainsAreEqual(self, expected, actual): self.assertEqual(expected.name, actual.name) self.assertEqual(expected.id, actual.id) class DesignateInstanceEntryFactoryTest(trove_testtools.TestCase): def setUp(self): super(DesignateInstanceEntryFactoryTest, self).setUp() def tearDown(self): super(DesignateInstanceEntryFactoryTest, self).tearDown() def test_create_entry(self): instance_id = '11111111-2222-3333-4444-555555555555' driver.DNS_DOMAIN_ID = '00000000-0000-0000-0000-000000000000' driver.DNS_DOMAIN_NAME = 'trove.com' driver.DNS_TTL = 3600 hashed_id = base64.b32encode(hashlib.md5(instance_id).digest()) hashed_id_concat = hashed_id[:11].lower() exp_hostname = ("%s.%s" % (hashed_id_concat, driver.DNS_DOMAIN_NAME)) factory = driver.DesignateInstanceEntryFactory() entry = factory.create_entry(instance_id) self.assertEqual(exp_hostname, entry.name) self.assertEqual('A', entry.type) self.assertEqual(3600, entry.ttl) zone = entry.dns_zone self.assertEqual(driver.DNS_DOMAIN_NAME, zone.name) self.assertEqual(driver.DNS_DOMAIN_ID, zone.id) def test_create_entry_ends_with_dot(self): instance_id = '11111111-2222-3333-4444-555555555555' driver.DNS_DOMAIN_ID = '00000000-0000-0000-0000-000000000000' driver.DNS_DOMAIN_NAME = 'trove.com.' driver.DNS_TTL = 3600 hashed_id = base64.b32encode(hashlib.md5(instance_id).digest()) hashed_id_concat = hashed_id[:11].lower() exp_hostname = ("%s.%s" % (hashed_id_concat, driver.DNS_DOMAIN_NAME))[:-1] factory = driver.DesignateInstanceEntryFactory() entry = factory.create_entry(instance_id) self.assertEqual(exp_hostname, entry.name) trove-5.0.0/trove/tests/unittests/dns/__init__.py0000664000567000056710000000000012701410316023235 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/trove_testtools.py0000664000567000056710000001322112701410316024202 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import inspect import mock import os import sys import testtools from trove.common.context import TroveContext from trove.common.notification import DBaaSAPINotification from trove.tests import root_logger def patch_notifier(test_case): notification_notify = mock.patch.object( DBaaSAPINotification, "_notify") notification_notify.start() test_case.addCleanup(notification_notify.stop) class TroveTestNotification(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'test_notification' @abc.abstractmethod def required_start_traits(self): return [] class TroveTestContext(TroveContext): def __init__(self, test_case, **kwargs): super(TroveTestContext, self).__init__(**kwargs) self.notification = TroveTestNotification( self, request_id='req_id', flavor_id='7') self.notification.server_type = 'api' patch_notifier(test_case) class TestCase(testtools.TestCase): """Base class of Trove unit tests. Integrates automatic dangling mock detection. """ _NEWLINE = '\n' @classmethod def setUpClass(cls): # Number of nested levels to examine when searching for mocks. # Higher setting will potentially uncover more dangling objects, # at the cost of increased scanning time. cls._max_recursion_depth = int(os.getenv( 'TROVE_TESTS_UNMOCK_RECURSION_DEPTH', 2)) # Should we skip the remaining tests after the first failure. cls._fail_fast = cls.is_bool(os.getenv( 'TROVE_TESTS_UNMOCK_FAIL_FAST', False)) # Should we report only unique dangling mock references. cls._only_unique = cls.is_bool(os.getenv( 'TROVE_TESTS_UNMOCK_ONLY_UNIQUE', True)) cls._dangling_mocks = set() root_logger.DefaultRootLogger(enable_backtrace=False) @classmethod def is_bool(cls, val): return str(val).lower() in ['true', '1', 't', 'y', 'yes', 'on', 'set'] def setUp(self): if self.__class__._fail_fast and self.__class__._dangling_mocks: self.skipTest("This test suite already has dangling mock " "references from a previous test case.") super(TestCase, self).setUp() self.addCleanup(self._assert_modules_unmocked) self._mocks_before = self._find_mock_refs() root_logger.DefaultRootHandler.set_info(self.id()) def tearDown(self): # yes, this is gross and not thread aware. # but the only way to make it thread aware would require that # we single thread all testing root_logger.DefaultRootHandler.set_info(info=None) super(TestCase, self).tearDown() def _assert_modules_unmocked(self): """Check that all members of loaded modules are currently unmocked. Consider only new mocks created since the last setUp() call. """ mocks_after = self._find_mock_refs() new_mocks = mocks_after.difference(self._mocks_before) if self.__class__._only_unique: # Remove mock references that have already been reported once in # this test suite (probably defined in setUp()). new_mocks.difference_update(self.__class__._dangling_mocks) self.__class__._dangling_mocks.update(new_mocks) if new_mocks: messages = ["Member '%s' needs to be unmocked." % item[0] for item in new_mocks] self.fail(self._NEWLINE + self._NEWLINE.join(messages)) def _find_mock_refs(self): discovered_mocks = set() for module_name, module in self._get_loaded_modules().items(): self._find_mocks(module_name, module, discovered_mocks, 1) return discovered_mocks def _find_mocks(self, parent_name, parent, container, depth): """Search for mock members in the parent object. Descend into class types. """ if depth <= self.__class__._max_recursion_depth: try: if isinstance(parent, mock.Mock): # Add just the parent if it's a mock itself. container.add((parent_name, parent)) else: # Add all mocked members of the parent. for member_name, member in inspect.getmembers(parent): full_name = '%s.%s' % (parent_name, member_name) if isinstance(member, mock.Mock): container.add((full_name, member)) elif inspect.isclass(member): self._find_mocks( full_name, member, container, depth + 1) except ImportError: pass # Module cannot be imported - ignore it. except RuntimeError: # Something else went wrong when probing the class member. # See: https://bugs.launchpad.net/trove/+bug/1524918 pass def _get_loaded_modules(self): return {name: obj for name, obj in sys.modules.items() if obj} trove-5.0.0/trove/tests/unittests/router/0000775000567000056710000000000012701410521021670 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/router/test_router.py0000664000567000056710000000302112701410316024617 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from routes import Mapper from trove.common.wsgi import Router, Fault from trove.tests.unittests import trove_testtools class FakeRequst(object): """A fake webob request object designed to cause 404. The dispatcher actually checks if the given request is a dict and throws an error if it is. This object wrapper tricks the dispatcher into handling the request like a regular request. """ environ = { "wsgiorg.routing_args": [ False, False ] } class TestRouter(trove_testtools.TestCase): """Test case for trove `Router` extensions.""" def setUp(self): super(TestRouter, self).setUp() self.mapper = Mapper() def test_404_is_fault(self): """Test that the dispatcher wraps 404's in a `Fault`.""" fake_request = FakeRequst() response = Router._dispatch(fake_request) assert isinstance(response, Fault) trove-5.0.0/trove/tests/unittests/router/__init__.py0000664000567000056710000000000012701410316023771 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/common/0000775000567000056710000000000012701410521021640 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/common/test_pagination.py0000664000567000056710000000652312701410316025412 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common import pagination from trove.tests.unittests import trove_testtools class TestPaginatedDataView(trove_testtools.TestCase): def test_creation_with_string_marker(self): view = pagination.PaginatedDataView("TestType", [], "http://current_page", next_page_marker="marker") self.assertEqual("marker", view.next_page_marker) def test_creation_with_none_marker(self): view = pagination.PaginatedDataView("TestType", [], "http://current_page", next_page_marker=None) self.assertIsNone(view.next_page_marker) def test_creation_with_none_string_marker(self): view = pagination.PaginatedDataView("TestType", [], "http://current_page", next_page_marker=52) self.assertEqual("52", view.next_page_marker) def _do_paginate_list(self, limit=None, marker=None, include_marker=False): li = ['a', 'b', 'c', 'd', 'e'] return pagination.paginate_list(li, limit, marker, include_marker) def test_paginate_list(self): # start list li_1, marker_1 = self._do_paginate_list(limit=2) self.assertEqual(['a', 'b'], li_1) self.assertEqual('b', marker_1) # continue list, do not include marker in result li_2, marker_2 = self._do_paginate_list(limit=2, marker=marker_1) self.assertEqual(['c', 'd'], li_2) self.assertEqual('d', marker_2) li_3, marker_3 = self._do_paginate_list(limit=2, marker=marker_2) self.assertEqual(['e'], li_3) self.assertIsNone(marker_3) # alternate continue list, include marker in result li_4, marker_4 = self._do_paginate_list(limit=2, marker=marker_1, include_marker=True) self.assertEqual(['b', 'c'], li_4) self.assertEqual('c', marker_4) li_5, marker_5 = self._do_paginate_list(limit=2, marker=marker_4, include_marker=True) self.assertEqual(['c', 'd'], li_5) self.assertEqual('d', marker_5) li_6, marker_6 = self._do_paginate_list(limit=2, marker=marker_5, include_marker=True) self.assertEqual(['d', 'e'], li_6) self.assertIsNone(marker_6) # bad marker li_4, marker_4 = self._do_paginate_list(marker='f') self.assertEqual([], li_4) self.assertIsNone(marker_4) trove-5.0.0/trove/tests/unittests/common/__init__.py0000664000567000056710000000000012701410316023741 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/common/test_utils.py0000664000567000056710000000660612701410316024423 0ustar jenkinsjenkins00000000000000# Copyright 2014 SUSE Linux GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from testtools import ExpectedException from trove.common import exception from trove.common import utils from trove.tests.unittests import trove_testtools class TestTroveExecuteWithTimeout(trove_testtools.TestCase): def setUp(self): super(TestTroveExecuteWithTimeout, self).setUp() self.orig_utils_execute = utils.execute self.orig_utils_log_error = utils.LOG.error def tearDown(self): super(TestTroveExecuteWithTimeout, self).tearDown() utils.execute = self.orig_utils_execute utils.LOG.error = self.orig_utils_log_error def test_throws_process_execution_error(self): utils.execute = Mock( side_effect=exception.ProcessExecutionError( description='test-desc', exit_code=42, stderr='err', stdout='out', cmd='test')) with ExpectedException( exception.ProcessExecutionError, "test-desc\nCommand: test\nExit code: 42\n" "Stdout: 'out'\nStderr: 'err'"): utils.execute_with_timeout('/usr/bin/foo') def test_log_error_when_log_output_on_error_is_true(self): utils.execute = Mock( side_effect=exception.ProcessExecutionError( description='test-desc', exit_code=42, stderr='err', stdout='out', cmd='test')) utils.LOG.error = Mock() with ExpectedException( exception.ProcessExecutionError, "test-desc\nCommand: test\nExit code: 42\n" "Stdout: 'out'\nStderr: 'err'"): utils.execute_with_timeout( '/usr/bin/foo', log_output_on_error=True) utils.LOG.error.assert_called_with( u"Command 'test' failed. test-desc Exit code: 42\n" "stderr: err\nstdout: out") def test_unpack_singleton(self): self.assertEqual([1, 2, 3], utils.unpack_singleton([1, 2, 3])) self.assertEqual(0, utils.unpack_singleton([0])) self.assertEqual('test', utils.unpack_singleton('test')) self.assertEqual('test', utils.unpack_singleton(['test'])) self.assertEqual([], utils.unpack_singleton([])) self.assertIsNone(utils.unpack_singleton(None)) self.assertEqual([None, None], utils.unpack_singleton([None, None])) self.assertEqual('test', utils.unpack_singleton([['test']])) self.assertEqual([1, 2, 3], utils.unpack_singleton([[1, 2, 3]])) self.assertEqual(1, utils.unpack_singleton([[[1]]])) self.assertEqual([[1], [2]], utils.unpack_singleton([[1], [2]])) self.assertEqual(['a', 'b'], utils.unpack_singleton(['a', 'b'])) def test_pagination_limit(self): self.assertEqual(5, utils.pagination_limit(5, 9)) self.assertEqual(5, utils.pagination_limit(9, 5)) trove-5.0.0/trove/tests/unittests/common/test_template.py0000664000567000056710000001750612701410316025077 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from mock import Mock from trove.common import exception from trove.common import template from trove.common import utils from trove.datastore.models import DatastoreVersion from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class TemplateTest(trove_testtools.TestCase): def setUp(self): super(TemplateTest, self).setUp() util.init_db() self.env = template.ENV self.template = self.env.get_template("mysql/config.template") self.flavor_dict = {'ram': 1024, 'name': 'small', 'id': '55'} self.server_id = "180b5ed1-3e57-4459-b7a3-2aeee4ac012a" def tearDown(self): super(TemplateTest, self).tearDown() def _find_in_template(self, contents, teststr): found_group = None for line in contents.split('\n'): m = re.search('^%s.*' % teststr, line) if m: found_group = m.group(0) return found_group def validate_template(self, contents, teststr, test_flavor, server_id): # expected query_cache_size = {{ 8 * flavor_multiplier }}M flavor_multiplier = test_flavor['ram'] / 512 found_group = self._find_in_template(contents, teststr) if not found_group: raise "Could not find text in template" # Check that the last group has been rendered memsize = found_group.split(" ")[2] self.assertEqual("%sM" % (8 * flavor_multiplier), memsize) self.assertIsNotNone(server_id) self.assertTrue(server_id > 1) def test_rendering(self): rendered = self.template.render(flavor=self.flavor_dict, server_id=self.server_id) self.validate_template(rendered, "query_cache_size", self.flavor_dict, self.server_id) def test_single_instance_config_rendering(self): datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'MySql' datastore.name = 'mysql-5.6' datastore.manager = 'mysql' config = template.SingleInstanceConfigTemplate(datastore, self.flavor_dict, self.server_id) self.validate_template(config.render(), "query_cache_size", self.flavor_dict, self.server_id) def test_renderer_discovers_special_config(self): """Finds our special config file for the version 'mysql-test'.""" datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'mysql' datastore.name = 'mysql-test' datastore.manager = 'mysql' config = template.SingleInstanceConfigTemplate(datastore, self.flavor_dict, self.server_id) self.validate_template(config.render(), "hyper", {'ram': 0}, self.server_id) def test_replica_source_config_rendering(self): datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'MySql' datastore.name = 'mysql-5.6' datastore.manager = 'mysql' config = template.ReplicaSourceConfigTemplate(datastore, self.flavor_dict, self.server_id) self.assertTrue(self._find_in_template(config.render(), "log_bin")) def test_replica_config_rendering(self): datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'MySql' datastore.name = 'mysql-5.6' datastore.manager = 'mysql' config = template.ReplicaConfigTemplate(datastore, self.flavor_dict, self.server_id) self.assertTrue(self._find_in_template(config.render(), "relay_log")) class HeatTemplateLoadTest(trove_testtools.TestCase): class FakeTemplate(): def __init__(self): self.name = 'mysql/heat.template' def setUp(self): self.default = 'default.heat.template' self.orig_1 = utils.ENV.list_templates self.orig_2 = utils.ENV.get_template super(HeatTemplateLoadTest, self).setUp() def tearDown(self): utils.ENV.list_templates = self.orig_1 utils.ENV.get_template = self.orig_2 super(HeatTemplateLoadTest, self).tearDown() def test_heat_template_load_with_invalid_datastore(self): invalid_datastore = 'mysql-blah' self.assertRaises(exception.InvalidDatastoreManager, template.load_heat_template, invalid_datastore) def test_heat_template_load_non_default(self): orig = utils.ENV._load_template utils.ENV._load_template = Mock(return_value=self.FakeTemplate()) mysql_tmpl = template.load_heat_template('mysql') self.assertNotEqual(mysql_tmpl.name, self.default) utils.ENV._load_template = orig def test_heat_template_load_success(self): mysql_tmpl = template.load_heat_template('mysql') redis_tmpl = template.load_heat_template('redis') cassandra_tmpl = template.load_heat_template('cassandra') mongo_tmpl = template.load_heat_template('mongodb') percona_tmpl = template.load_heat_template('percona') couchbase_tmpl = template.load_heat_template('couchbase') self.assertIsNotNone(mysql_tmpl) self.assertIsNotNone(redis_tmpl) self.assertIsNotNone(cassandra_tmpl) self.assertIsNotNone(mongo_tmpl) self.assertIsNotNone(percona_tmpl) self.assertIsNotNone(couchbase_tmpl) self.assertEqual(self.default, mysql_tmpl.name) self.assertEqual(self.default, redis_tmpl.name) self.assertEqual(self.default, cassandra_tmpl.name) self.assertEqual(self.default, mongo_tmpl.name) self.assertEqual(self.default, percona_tmpl.name) self.assertEqual(self.default, couchbase_tmpl.name) def test_render_templates_with_ports_from_config(self): mysql_tmpl = template.load_heat_template('mysql') tcp_rules = [{'cidr': "0.0.0.0/0", 'from_': 3306, 'to_': 3309}, {'cidr': "0.0.0.0/0", 'from_': 3320, 'to_': 33022}] output = mysql_tmpl.render( volume_support=True, ifaces=[], ports=[], tcp_rules=tcp_rules, udp_rules=[], files={}) self.assertIsNotNone(output) self.assertIn('FromPort: "3306"', output) self.assertIn('ToPort: "3309"', output) self.assertIn('CidrIp: "0.0.0.0/0"', output) self.assertIn('FromPort: "3320"', output) self.assertIn('ToPort: "33022"', output) def test_no_rules_if_no_ports(self): mysql_tmpl = template.load_heat_template('mysql') output = mysql_tmpl.render( volume_support=True, ifaces=[], ports=[], tcp_rules=[], udp_rules=[], files={}) self.assertIsNotNone(output) self.assertNotIn('- IpProtocol: "tcp"', output) self.assertNotIn('- IpProtocol: "udp"', output) trove-5.0.0/trove/tests/unittests/common/test_notification.py0000664000567000056710000003575312701410316025756 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock, patch from oslo_utils import timeutils from trove.common import cfg from trove.common.context import TroveContext from trove.common import exception from trove.common import notification from trove.common.notification import EndNotification, StartNotification from trove.conductor import api as conductor_api from trove import rpc from trove.tests.unittests import trove_testtools class TestEndNotification(trove_testtools.TestCase): def setUp(self): super(TestEndNotification, self).setUp() self.context = trove_testtools.TroveTestContext(self) def _server_call(self, server_type): with patch.object(self.context, "notification", server_type=server_type) as notification: with EndNotification(self.context): pass self.assertTrue(notification.notify_end.called) def _server_exception(self, server_type): with patch.object(self.context, "notification", server_type=server_type) as notification: try: with EndNotification(self.context): raise exception.TroveError() except Exception: self.assertTrue(notification.notify_exc_info.called) def test_api_server_call(self): self._server_call('api') def test_api_server_exception(self): self._server_exception('api') def test_taskmanager_server_call(self): self._server_call('taskmanager') def test_taskmanager_server_exception(self): self._server_exception('taskmanager') def test_conductor_server_call(self): with patch.object(conductor_api, 'API') as api: with patch.object(self.context, "notification", server_type='conductor'): with EndNotification(self.context): pass self.assertTrue(api(self.context).notify_end.called) def test_conductor_server_exception(self): with patch.object(conductor_api, 'API') as api: with patch.object(self.context, "notification", server_type='conductor'): try: with EndNotification(self.context): raise exception.TroveError() except Exception: self.assertTrue(api(self.context).notify_exc_info.called) class TestStartNotification(trove_testtools.TestCase): def setUp(self): super(TestStartNotification, self).setUp() self.context = trove_testtools.TroveTestContext(self) def test_api_call(self): with patch.object(self.context, "notification", server_type='api') as notification: with StartNotification(self.context): pass self.assertTrue(notification.notify_start.called) def test_taskmanager_call(self): with patch.object(self.context, "notification", server_type='taskmanager') as notification: with StartNotification(self.context): pass self.assertTrue(notification.notify_start.called) def test_conductor_call(self): with patch.object(conductor_api, 'API'): with patch.object(self.context, "notification", server_type='conductor') as notification: with StartNotification(self.context): pass self.assertTrue(notification.notify_start.called) class TestNotificationCastWrapper(trove_testtools.TestCase): def test_no_notification(self): with notification.NotificationCastWrapper(TroveContext(), "foo"): pass def test_with_notification(self): context = trove_testtools.TroveTestContext(self) self.assertEqual(True, context.notification.needs_end_notification) with notification.NotificationCastWrapper(context, "foo"): self.assertEqual('foo', context.notification.server_type) self.assertEqual('api', context.notification.server_type) self.assertEqual(False, context.notification.needs_end_notification) class TestTroveBaseTraits(trove_testtools.TestCase): def setUp(self): super(TestTroveBaseTraits, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(rpc, 'get_notifier') def test_n(self, notifier): notification.TroveBaseTraits( instance=self.instance).notify('event_type', 'publisher') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] required_payload_keys = [ 'created_at', 'name', 'instance_id', 'instance_name', 'instance_type_id', 'launched_at', 'nova_instance_id', 'region', 'state_description', 'state', 'tenant_id', 'user_id' ] self.assertTrue(set(required_payload_keys).issubset(set(payload))) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveBaseTraits(instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveBaseTraits().deserialize(None, serialized) new_notify.notify('event_type', 'publisher') self.assertTrue(notifier().info.called) class TestTroveCommonTraits(trove_testtools.TestCase): def setUp(self): super(TestTroveCommonTraits, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveCommonTraits( instance=self.instance).notify('event_type', 'publisher') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertTrue('availability_zone' in payload) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveCommonTraits(instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveCommonTraits().deserialize(None, serialized) new_notify.notify('event_type', 'publisher') self.assertTrue(notifier().info.called) class TestTroveInstanceCreate(trove_testtools.TestCase): def setUp(self): super(TestTroveInstanceCreate, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveInstanceCreate(instance=self.instance).notify() self.assertTrue(notifier().info.called) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveInstanceCreate(instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveInstanceCreate().deserialize(None, serialized) new_notify.notify() self.assertTrue(notifier().info.called) class TestTroveInstanceDelete(trove_testtools.TestCase): def setUp(self): super(TestTroveInstanceDelete, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveInstanceDelete(instance=self.instance).notify() self.assertTrue(notifier().info.called) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveInstanceDelete(instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveInstanceDelete().deserialize(None, serialized) new_notify.notify() self.assertTrue(notifier().info.called) class TestTroveInstanceModifyVolume(trove_testtools.TestCase): def setUp(self): super(TestTroveInstanceModifyVolume, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveInstanceModifyVolume(instance=self.instance).notify() self.assertTrue(notifier().info.called) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveInstanceModifyVolume( instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveInstanceModifyVolume().deserialize( None, serialized) new_notify.notify() self.assertTrue(notifier().info.called) class TestTroveInstanceModifyFlavor(trove_testtools.TestCase): def setUp(self): super(TestTroveInstanceModifyFlavor, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveInstanceModifyFlavor(instance=self.instance).notify() self.assertTrue(notifier().info.called) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveInstanceModifyFlavor( instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveInstanceModifyFlavor().deserialize( None, serialized) new_notify.notify() self.assertTrue(notifier().info.called) class TestDBaaSQuota(trove_testtools.TestCase): @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.DBaaSQuotas(None, Mock(), Mock()).notify() self.assertTrue(notifier().info.called) class DBaaSTestNotification(notification.DBaaSAPINotification): def event_type(self): return 'instance_test' def required_start_traits(self): return ['name', 'flavor_id', 'datastore'] def optional_start_traits(self): return ['databases', 'users'] def required_end_traits(self): return ['instance_id'] class TestDBaaSNotification(trove_testtools.TestCase): def setUp(self): super(TestDBaaSNotification, self).setUp() self.test_n = DBaaSTestNotification(Mock(), request=Mock()) def test_missing_required_start_traits(self): self.assertRaisesRegexp(exception.TroveError, self.test_n.required_start_traits()[0], self.test_n.notify_start) def test_invalid_start_traits(self): self.assertRaisesRegexp(exception.TroveError, "The following required keys", self.test_n.notify_start, foo='bar') def test_missing_required_end_traits(self): self.assertRaisesRegexp(exception.TroveError, self.test_n.required_end_traits()[0], self.test_n.notify_end) def test_invalid_end_traits(self): self.assertRaisesRegexp(exception.TroveError, "The following required keys", self.test_n.notify_end, foo='bar') def test_missing_required_error_traits(self): self.assertRaisesRegexp(exception.TroveError, self.test_n.required_error_traits()[0], self.test_n._notify, 'error', self.test_n.required_error_traits(), []) @patch.object(rpc, 'get_notifier') def test_start_event(self, notifier): self.test_n.notify_start(name='foo', flavor_id=7, datastore='db') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args self.assertEqual('dbaas.instance_test.start', a[1]) @patch.object(rpc, 'get_notifier') def test_end_event(self, notifier): self.test_n.notify_end(instance_id='foo') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args self.assertEqual('dbaas.instance_test.end', a[1]) @patch.object(rpc, 'get_notifier') def test_verify_base_values(self, notifier): self.test_n.notify_start(name='foo', flavor_id=7, datastore='db') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertTrue('client_ip' in payload) self.assertTrue('request_id' in payload) self.assertTrue('server_type' in payload) self.assertTrue('server_ip' in payload) self.assertTrue('tenant_id' in payload) @patch.object(rpc, 'get_notifier') def test_verify_required_start_args(self, notifier): self.test_n.notify_start(name='foo', flavor_id=7, datastore='db') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertTrue('name' in payload) self.assertTrue('flavor_id' in payload) self.assertTrue('datastore' in payload) self.assertTrue('users' not in payload) @patch.object(rpc, 'get_notifier') def test_verify_optional_start_args(self, notifier): self.test_n.notify_start(name='foo', flavor_id=7, datastore='db', users='the users') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertTrue('users' in payload) @patch.object(rpc, 'get_notifier') def test_verify_required_end_args(self, notifier): self.test_n.notify_end(instance_id='foo') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertTrue('instance_id' in payload) trove-5.0.0/trove/tests/unittests/common/test_common_extensions.py0000664000567000056710000003056412701410320027025 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from mock import patch from oslo_config.cfg import NoSuchOptError from trove.common import exception from trove.common import utils from trove.extensions.common import models from trove.extensions.common.service import ClusterRootController from trove.extensions.common.service import DefaultRootController from trove.extensions.common.service import RootController from trove.instance.models import DBInstance from trove.tests.unittests import trove_testtools class TestDefaultRootController(trove_testtools.TestCase): def setUp(self): super(TestDefaultRootController, self).setUp() self.controller = DefaultRootController() @patch.object(models.Root, "load") def test_root_index(self, root_load): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = False self.controller.root_index(req, tenant_id, uuid, is_cluster) root_load.assert_called_with(context, uuid) def test_root_index_with_cluster(self): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = True self.assertRaises( exception.ClusterOperationNotSupported, self.controller.root_index, req, tenant_id, uuid, is_cluster) @patch.object(models.Root, "create") def test_root_create(self, root_create): user = Mock() context = Mock() context.user = Mock() context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = False password = Mock() body = {'password': password} self.controller.root_create(req, body, tenant_id, uuid, is_cluster) root_create.assert_called_with(context, uuid, context.user, password) def test_root_create_with_cluster(self): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = True password = Mock() body = {'password': password} self.assertRaises( exception.ClusterOperationNotSupported, self.controller.root_create, req, body, tenant_id, uuid, is_cluster) class TestRootController(trove_testtools.TestCase): def setUp(self): super(TestRootController, self).setUp() self.controller = RootController() @patch.object(RootController, "load_root_controller") @patch.object(RootController, "_get_datastore") def test_index(self, service_get_datastore, service_load_root_controller): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() ds_manager = Mock() is_cluster = Mock() service_get_datastore.return_value = (ds_manager, is_cluster) root_controller = Mock() ret = Mock() root_controller.root_index = Mock(return_value=ret) service_load_root_controller.return_value = root_controller self.assertTrue(ret, self.controller.index(req, tenant_id, uuid)) service_get_datastore.assert_called_with(tenant_id, uuid) service_load_root_controller.assert_called_with(ds_manager) root_controller.root_index.assert_called_with( req, tenant_id, uuid, is_cluster) @patch.object(RootController, "load_root_controller") @patch.object(RootController, "_get_datastore") def test_create(self, service_get_datastore, service_load_root_controller): req = Mock() body = Mock() tenant_id = Mock() uuid = utils.generate_uuid() ds_manager = Mock() is_cluster = Mock() service_get_datastore.return_value = (ds_manager, is_cluster) root_controller = Mock() ret = Mock() root_controller.root_create = Mock(return_value=ret) service_load_root_controller.return_value = root_controller self.assertTrue( ret, self.controller.create(req, tenant_id, uuid, body=body)) service_get_datastore.assert_called_with(tenant_id, uuid) service_load_root_controller.assert_called_with(ds_manager) root_controller.root_create.assert_called_with( req, body, tenant_id, uuid, is_cluster) @patch.object(RootController, "load_root_controller") @patch.object(RootController, "_get_datastore") def test_create_with_no_root_controller(self, service_get_datastore, service_load_root_controller): req = Mock() body = Mock() tenant_id = Mock() uuid = utils.generate_uuid() ds_manager = Mock() is_cluster = Mock() service_get_datastore.return_value = (ds_manager, is_cluster) service_load_root_controller.return_value = None self.assertRaises( NoSuchOptError, self.controller.create, req, tenant_id, uuid, body=body) service_get_datastore.assert_called_with(tenant_id, uuid) service_load_root_controller.assert_called_with(ds_manager) class TestClusterRootController(trove_testtools.TestCase): def setUp(self): super(TestClusterRootController, self).setUp() self.controller = ClusterRootController() @patch.object(ClusterRootController, "cluster_root_index") def test_root_index_cluster(self, mock_cluster_root_index): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = True self.controller.root_index(req, tenant_id, uuid, is_cluster) mock_cluster_root_index.assert_called_with(req, tenant_id, uuid) @patch.object(ClusterRootController, "instance_root_index") def test_root_index_instance(self, mock_instance_root_index): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = False self.controller.root_index(req, tenant_id, uuid, is_cluster) mock_instance_root_index.assert_called_with(req, tenant_id, uuid) @patch.object(ClusterRootController, "cluster_root_create") def test_root_create_cluster(self, mock_cluster_root_create): req = Mock() body = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = True self.controller.root_create(req, body, tenant_id, uuid, is_cluster) mock_cluster_root_create.assert_called_with(req, body, tenant_id, uuid) @patch.object(ClusterRootController, "instance_root_create") def test_root_create_instance(self, mock_instance_root_create): req = Mock() body = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = False self.controller.root_create(req, body, tenant_id, uuid, is_cluster) mock_instance_root_create.assert_called_with(req, body, uuid) @patch.object(models.ClusterRoot, "load") def test_instance_root_index(self, mock_cluster_root_load): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = Mock() instance_id = utils.generate_uuid() self.controller.instance_root_index(req, tenant_id, instance_id) mock_cluster_root_load.assert_called_with(context, instance_id) @patch.object(models.ClusterRoot, "load", side_effect=exception.UnprocessableEntity()) def test_instance_root_index_exception(self, mock_cluster_root_load): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = Mock() instance_id = utils.generate_uuid() self.assertRaises( exception.UnprocessableEntity, self.controller.instance_root_index, req, tenant_id, instance_id ) mock_cluster_root_load.assert_called_with(context, instance_id) @patch.object(ClusterRootController, "instance_root_index") @patch.object(ClusterRootController, "_get_cluster_instance_id") def test_cluster_root_index(self, mock_get_cluster_instance, mock_instance_root_index): req = Mock() tenant_id = Mock() cluster_id = utils.generate_uuid() single_instance_id = Mock() mock_get_cluster_instance.return_value = (single_instance_id, Mock()) self.controller.cluster_root_index(req, tenant_id, cluster_id) mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id) mock_instance_root_index.assert_called_with(req, tenant_id, single_instance_id) @patch.object(ClusterRootController, "instance_root_create") @patch.object(ClusterRootController, "_get_cluster_instance_id") def test_cluster_root_create(self, mock_get_cluster_instance, mock_instance_root_create): req = Mock() body = Mock() tenant_id = Mock() cluster_id = utils.generate_uuid() single_instance_id = Mock() cluster_instances = Mock() mock_get_cluster_instance.return_value = (single_instance_id, cluster_instances) self.controller.cluster_root_create(req, body, tenant_id, cluster_id) mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id) mock_instance_root_create.assert_called_with(req, body, single_instance_id, cluster_instances) @patch.object(DBInstance, "find_all") def test_get_cluster_instance_id(self, mock_find_all): tenant_id = Mock() cluster_id = Mock() db_inst_1 = Mock() db_inst_1.id.return_value = utils.generate_uuid() db_inst_2 = Mock() db_inst_2.id.return_value = utils.generate_uuid() cluster_instances = [db_inst_1, db_inst_2] mock_find_all.return_value.all.return_value = cluster_instances ret = self.controller._get_cluster_instance_id(tenant_id, cluster_id) self.assertTrue(db_inst_1.id, ret[0]) self.assertTrue(cluster_instances, ret[1]) @patch.object(models.ClusterRoot, "create") def test_instance_root_create(self, mock_cluster_root_create): user = Mock() context = Mock() context.user = Mock() context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) password = Mock() body = {'password': password} instance_id = utils.generate_uuid() cluster_instances = Mock() self.controller.instance_root_create( req, body, instance_id, cluster_instances) mock_cluster_root_create.assert_called_with( context, instance_id, context.user, password, cluster_instances) @patch.object(models.ClusterRoot, "create") def test_instance_root_create_no_body(self, mock_cluster_root_create): user = Mock() context = Mock() context.user = Mock() context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) password = None body = None instance_id = utils.generate_uuid() cluster_instances = Mock() self.controller.instance_root_create( req, body, instance_id, cluster_instances) mock_cluster_root_create.assert_called_with( context, instance_id, context.user, password, cluster_instances) trove-5.0.0/trove/tests/unittests/common/test_wsgi.py0000664000567000056710000000306012701410316024223 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from testtools.matchers import Equals, Is, Not from trove.common import wsgi from trove.tests.unittests import trove_testtools import webob class TestWsgi(trove_testtools.TestCase): def test_process_request(self): middleware = wsgi.ContextMiddleware("test_trove") req = webob.BaseRequest({}) token = 'MI23fdf2defg123' user_id = 'test_user_id' req.headers = { 'X-User': 'do not use - deprecated', 'X-User-ID': user_id, 'X-Auth-Token': token, 'X-Service-Catalog': '[]' } req.environ = {} # invocation middleware.process_request(req) # assertions ctx = req.environ[wsgi.CONTEXT_KEY] self.assertThat(ctx, Not(Is(None))) self.assertThat(ctx.user, Equals(user_id)) self.assertThat(ctx.auth_token, Equals(token)) self.assertEqual(0, len(ctx.service_catalog)) trove-5.0.0/trove/tests/unittests/common/test_context.py0000664000567000056710000000570512701410316024746 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from testtools.matchers import Equals, Is from trove.common import context from trove.common.notification import DBaaSInstanceCreate from trove.tests.unittests import trove_testtools class TestTroveContext(trove_testtools.TestCase): def test_create_with_extended_args(self): expected_service_catalog = {'key': 'value'} ctx = context.TroveContext(user="test_user_id", request_id="test_req_id", limit="500", marker="x", service_catalog=expected_service_catalog) self.assertThat(ctx.limit, Equals("500")) self.assertThat(ctx.marker, Equals("x")) self.assertThat(ctx.service_catalog, Equals(expected_service_catalog)) def test_create(self): ctx = context.TroveContext(user='test_user_id', request_id='test_req_id') self.assertThat(ctx.user, Equals('test_user_id')) self.assertThat(ctx.request_id, Equals('test_req_id')) self.assertThat(ctx.limit, Is(None)) self.assertThat(ctx.marker, Is(None)) self.assertThat(ctx.service_catalog, Is(None)) def test_to_dict(self): ctx = context.TroveContext(user='test_user_id', request_id='test_req_id') ctx_dict = ctx.to_dict() self.assertThat(ctx_dict.get('user'), Equals('test_user_id')) self.assertThat(ctx_dict.get('request_id'), Equals('test_req_id')) def test_to_dict_with_notification(self): ctx = context.TroveContext(user='test_user_id', tenant='the_tenant', request_id='test_req_id') ctx.notification = DBaaSInstanceCreate(ctx, request=Mock()) ctx_dict = ctx.to_dict() self.assertThat(ctx_dict.get('user'), Equals('test_user_id')) self.assertThat(ctx_dict.get('request_id'), Equals('test_req_id')) self.assertTrue('trove_notification' in ctx_dict) n_dict = ctx_dict['trove_notification'] self.assertThat(n_dict.get('notification_classname'), Equals('trove.common.notification.' 'DBaaSInstanceCreate')) trove-5.0.0/trove/tests/unittests/common/test_remote.py0000664000567000056710000007003612701410316024554 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import uuid from mock import patch, MagicMock import swiftclient.client from testtools import ExpectedException, matchers from trove.common import cfg from trove.common.context import TroveContext from trove.common import exception from trove.common import remote from trove.tests.fakes.swift import SwiftClientStub from trove.tests.unittests import trove_testtools class TestRemote(trove_testtools.TestCase): def setUp(self): super(TestRemote, self).setUp() def tearDown(self): super(TestRemote, self).tearDown() @patch.object(swiftclient.client.Connection, 'get_auth') def test_creation(self, get_auth_mock): self.assertIsNotNone(swiftclient.client.Connection()) def test_create_swift_client(self): mock_resp = MagicMock() with patch.object(swiftclient.client.Connection, 'get_container', MagicMock(return_value=["text", mock_resp])): service_catalog = [{'endpoints': [{'region': 'RegionOne', 'publicURL': 'example.com'}], 'type': 'object-store'}] client = remote.create_swift_client(TroveContext( tenant=uuid.uuid4().hex, service_catalog=service_catalog)) headers, container = client.get_container('bob') self.assertIs(headers, "text") self.assertIs(container, mock_resp) def test_empty_account(self): """ this is an account with no containers and no objects """ # setup expectation with SwiftClientStub() as swift_stub: swift_stub.with_account('123223') # interact conn = swiftclient.client.Connection() account_info = conn.get_account() self.assertThat(account_info, matchers.Not(matchers.Is(None))) self.assertThat(len(account_info), matchers.Is(2)) self.assertThat(account_info, matchers.IsInstance(tuple)) self.assertThat(account_info[0], matchers.IsInstance(dict)) self.assertThat( account_info[0], matchers.KeysEqual('content-length', 'accept-ranges', 'x-timestamp', 'x-trans-id', 'date', 'x-account-bytes-used', 'x-account-container-count', 'content-type', 'x-account-object-count')) self.assertThat(account_info[1], matchers.IsInstance(list)) self.assertThat(len(account_info[1]), matchers.Is(0)) def test_one_container(self): """ tests to ensure behavior is normal with one container """ # setup expectation with SwiftClientStub() as swift_stub: swift_stub.with_account('123223') cont_name = 'a-container-name' swift_stub.with_container(cont_name) # interact conn = swiftclient.client.Connection() conn.get_auth() conn.put_container(cont_name) # get headers plus container metadata self.assertThat(len(conn.get_account()), matchers.Is(2)) # verify container details account_containers = conn.get_account()[1] self.assertThat(len(account_containers), matchers.Is(1)) self.assertThat(account_containers[0], matchers.KeysEqual('count', 'bytes', 'name')) self.assertThat(account_containers[0]['name'], matchers.Is(cont_name)) # get container details cont_info = conn.get_container(cont_name) self.assertIsNotNone(cont_info) self.assertThat( cont_info[0], matchers.KeysEqual('content-length', 'x-container-object-count', 'accept-ranges', 'x-container-bytes-used', 'x-timestamp', 'x-trans-id', 'date', 'content-type')) self.assertThat(len(cont_info[1]), matchers.Equals(0)) # remove container swift_stub.without_container(cont_name) with ExpectedException(swiftclient.ClientException): conn.get_container(cont_name) # ensure there are no more containers in account self.assertThat(len(conn.get_account()[1]), matchers.Is(0)) def test_one_object(self): with SwiftClientStub() as swift_stub: swift_stub.with_account('123223') swift_stub.with_container('bob') swift_stub.with_object('bob', 'test', 'test_contents') # create connection conn = swiftclient.client.Connection() # test container lightly cont_info = conn.get_container('bob') self.assertIsNotNone(cont_info) self.assertThat(cont_info[0], matchers.KeysEqual('content-length', 'x-container-object-count', 'accept-ranges', 'x-container-bytes-used', 'x-timestamp', 'x-trans-id', 'date', 'content-type')) cont_objects = cont_info[1] self.assertThat(len(cont_objects), matchers.Equals(1)) obj_1 = cont_objects[0] self.assertThat(obj_1, matchers.Equals( {'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950', 'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test', 'content_type': 'application/octet-stream', 'contents': 'test_contents'})) # test object api - not much to do here self.assertThat(conn.get_object('bob', 'test')[1], matchers.Is('test_contents')) # test remove object swift_stub.without_object('bob', 'test') # interact with ExpectedException(swiftclient.ClientException): conn.delete_object('bob', 'test') self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(0)) def test_two_objects(self): with SwiftClientStub() as swift_stub: swift_stub.with_account('123223') swift_stub.with_container('bob') swift_stub.with_container('bob2') swift_stub.with_object('bob', 'test', 'test_contents') swift_stub.with_object('bob', 'test2', 'test_contents2') conn = swiftclient.client.Connection() self.assertIs(len(conn.get_account()), 2) cont_info = conn.get_container('bob') self.assertIsNotNone(cont_info) self.assertThat(cont_info[0], matchers.KeysEqual('content-length', 'x-container-object-count', 'accept-ranges', 'x-container-bytes-used', 'x-timestamp', 'x-trans-id', 'date', 'content-type')) self.assertThat(len(cont_info[1]), matchers.Equals(2)) self.assertThat(cont_info[1][0], matchers.Equals( {'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950', 'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test', 'content_type': 'application/octet-stream', 'contents': 'test_contents'})) self.assertThat(conn.get_object('bob', 'test')[1], matchers.Is('test_contents')) self.assertThat(conn.get_object('bob', 'test2')[1], matchers.Is('test_contents2')) swift_stub.without_object('bob', 'test') with ExpectedException(swiftclient.ClientException): conn.delete_object('bob', 'test') self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(1)) swift_stub.without_container('bob') with ExpectedException(swiftclient.ClientException): conn.get_container('bob') self.assertThat(len(conn.get_account()), matchers.Is(2)) def test_nonexisting_container(self): """ when a container does not exist and is accessed then a 404 is returned """ with SwiftClientStub() as swift_stub: swift_stub.with_account('123223') swift_stub.with_container('existing') conn = swiftclient.client.Connection() with ExpectedException(swiftclient.ClientException): conn.get_container('nonexisting') def test_replace_object(self): """ Test to ensure that if an object is updated the container object count is the same and the contents of the object are updated """ with SwiftClientStub() as swift_stub: swift_stub.with_account('1223df2') swift_stub.with_container('new-container') swift_stub.with_object('new-container', 'new-object', 'new-object-contents') conn = swiftclient.client.Connection() conn.put_object('new-container', 'new-object', 'new-object-contents') obj_resp = conn.get_object('new-container', 'new-object') self.assertThat(obj_resp, matchers.Not(matchers.Is(None))) self.assertThat(len(obj_resp), matchers.Is(2)) self.assertThat(obj_resp[1], matchers.Is('new-object-contents')) # set expected behavior - trivial here since it is the intended # behavior however keep in mind this is just to support testing of # trove components swift_stub.with_object('new-container', 'new-object', 'updated-object-contents') conn.put_object('new-container', 'new-object', 'updated-object-contents') obj_resp = conn.get_object('new-container', 'new-object') self.assertThat(obj_resp, matchers.Not(matchers.Is(None))) self.assertThat(len(obj_resp), matchers.Is(2)) self.assertThat(obj_resp[1], matchers.Is( 'updated-object-contents')) # ensure object count has not increased self.assertThat(len(conn.get_container('new-container')[1]), matchers.Is(1)) class TestCreateCinderClient(trove_testtools.TestCase): def setUp(self): super(TestCreateCinderClient, self).setUp() self.volumev2_public_url = 'http://publicURL/v2' self.volume_public_url_region_two = 'http://publicURL-r2/v1' self.service_catalog = [ { 'endpoints': [ { 'region': 'RegionOne', 'publicURL': self.volumev2_public_url, } ], 'type': 'volumev2' }, { 'endpoints': [ { 'region': 'RegionOne', 'publicURL': 'http://publicURL-r1/v1', }, { 'region': 'RegionTwo', 'publicURL': self.volume_public_url_region_two, } ], 'type': 'volume' } ] def tearDown(self): super(TestCreateCinderClient, self).tearDown() cfg.CONF.clear_override('cinder_url') cfg.CONF.clear_override('cinder_service_type') cfg.CONF.clear_override('os_region_name') def test_create_with_no_conf_no_catalog(self): self.assertRaises(exception.EmptyCatalog, remote.create_cinder_client, TroveContext()) def test_create_with_conf_override(self): cinder_url_from_conf = 'http://example.com' tenant_from_ctx = uuid.uuid4().hex cfg.CONF.set_override('cinder_url', cinder_url_from_conf, enforce_type=True) client = remote.create_cinder_client( TroveContext(tenant=tenant_from_ctx)) self.assertEqual('%s/%s' % (cinder_url_from_conf, tenant_from_ctx), client.client.management_url) def test_create_with_conf_override_trailing_slash(self): cinder_url_from_conf = 'http://example.com/' tenant_from_ctx = uuid.uuid4().hex cfg.CONF.set_override('cinder_url', cinder_url_from_conf, enforce_type=True) client = remote.create_cinder_client( TroveContext(tenant=tenant_from_ctx)) self.assertEqual('%s%s' % (cinder_url_from_conf, tenant_from_ctx), client.client.management_url) def test_create_with_catalog_and_default_service_type(self): client = remote.create_cinder_client( TroveContext(service_catalog=self.service_catalog)) self.assertEqual(self.volumev2_public_url, client.client.management_url) def test_create_with_catalog_all_opts(self): cfg.CONF.set_override('cinder_service_type', 'volume', enforce_type=True) cfg.CONF.set_override('os_region_name', 'RegionTwo', enforce_type=True) client = remote.create_cinder_client( TroveContext(service_catalog=self.service_catalog)) self.assertEqual(self.volume_public_url_region_two, client.client.management_url) class TestCreateNovaClient(trove_testtools.TestCase): def setUp(self): super(TestCreateNovaClient, self).setUp() self.compute_public_url = 'http://publicURL/v2' self.computev3_public_url_region_two = 'http://publicURL-r2/v3' self.service_catalog = [ { 'endpoints': [ { 'region': 'RegionOne', 'publicURL': self.compute_public_url, } ], 'type': 'compute' }, { 'endpoints': [ { 'region': 'RegionOne', 'publicURL': 'http://publicURL-r1/v1', }, { 'region': 'RegionTwo', 'publicURL': self.computev3_public_url_region_two, } ], 'type': 'computev3' } ] def tearDown(self): super(TestCreateNovaClient, self).tearDown() cfg.CONF.clear_override('nova_compute_url') cfg.CONF.clear_override('nova_compute_service_type') cfg.CONF.clear_override('os_region_name') def test_create_with_no_conf_no_catalog(self): self.assertRaises(exception.EmptyCatalog, remote.create_nova_client, TroveContext()) def test_create_with_conf_override(self): nova_url_from_conf = 'http://example.com' tenant_from_ctx = uuid.uuid4().hex cfg.CONF.set_override('nova_compute_url', nova_url_from_conf, enforce_type=True) client = remote.create_nova_client( TroveContext(tenant=tenant_from_ctx)) self.assertEqual('%s/%s' % (nova_url_from_conf, tenant_from_ctx), client.client.management_url) def test_create_with_conf_override_trailing_slash(self): nova_url_from_conf = 'http://example.com/' tenant_from_ctx = uuid.uuid4().hex cfg.CONF.set_override('nova_compute_url', nova_url_from_conf, enforce_type=True) client = remote.create_nova_client( TroveContext(tenant=tenant_from_ctx)) self.assertEqual('%s%s' % (nova_url_from_conf, tenant_from_ctx), client.client.management_url) def test_create_with_catalog_and_default_service_type(self): client = remote.create_nova_client( TroveContext(service_catalog=self.service_catalog)) self.assertEqual(self.compute_public_url, client.client.management_url) def test_create_with_catalog_all_opts(self): cfg.CONF.set_override('nova_compute_service_type', 'computev3', enforce_type=True) cfg.CONF.set_override('os_region_name', 'RegionTwo', enforce_type=True) client = remote.create_nova_client( TroveContext(service_catalog=self.service_catalog)) self.assertEqual(self.computev3_public_url_region_two, client.client.management_url) def test_create_admin_client(self): nova_url_from_conf = 'http://adminexample.com/' cfg.CONF.set_override('nova_compute_url', nova_url_from_conf, enforce_type=True) admin_user = 'admin1' admin_pass = 'adminpwd' admin_tenant_id = uuid.uuid4().hex admin_client = remote.create_admin_nova_client( TroveContext(user=admin_user, auth_token=admin_pass, tenant=admin_tenant_id)) self.assertEqual(admin_user, admin_client.client.user) self.assertEqual(admin_pass, admin_client.client.password) self.assertEqual('%s%s' % (nova_url_from_conf, admin_tenant_id), admin_client.client.management_url) class TestCreateHeatClient(trove_testtools.TestCase): def setUp(self): super(TestCreateHeatClient, self).setUp() self.heat_public_url = 'http://publicURL/v2' self.heatv3_public_url_region_two = 'http://publicURL-r2/v3' self.service_catalog = [ { 'endpoints': [ { 'region': 'RegionOne', 'publicURL': self.heat_public_url, } ], 'type': 'orchestration' }, { 'endpoints': [ { 'region': 'RegionOne', 'publicURL': 'http://publicURL-r1/v1', }, { 'region': 'RegionTwo', 'publicURL': self.heatv3_public_url_region_two, } ], 'type': 'orchestrationv3' } ] def tearDown(self): super(TestCreateHeatClient, self).tearDown() cfg.CONF.clear_override('heat_url') cfg.CONF.clear_override('heat_service_type') cfg.CONF.clear_override('os_region_name') def test_create_with_no_conf_no_catalog(self): self.assertRaises(exception.EmptyCatalog, remote.create_heat_client, TroveContext()) def test_create_with_conf_override(self): heat_url_from_conf = 'http://example.com' tenant_from_ctx = uuid.uuid4().hex cfg.CONF.set_override('heat_url', heat_url_from_conf, enforce_type=True) client = remote.create_heat_client( TroveContext(tenant=tenant_from_ctx)) self.assertEqual('%s/%s' % (heat_url_from_conf, tenant_from_ctx), client.http_client.endpoint) def test_create_with_conf_override_trailing_slash(self): heat_url_from_conf = 'http://example.com/' tenant_from_ctx = uuid.uuid4().hex cfg.CONF.set_override('heat_url', heat_url_from_conf, enforce_type=True) client = remote.create_heat_client( TroveContext(tenant=tenant_from_ctx)) self.assertEqual('%s%s' % (heat_url_from_conf, tenant_from_ctx), client.http_client.endpoint) def test_create_with_catalog_and_default_service_type(self): client = remote.create_heat_client( TroveContext(service_catalog=self.service_catalog)) self.assertEqual(self.heat_public_url, client.http_client.endpoint) def test_create_with_catalog_all_opts(self): cfg.CONF.set_override('heat_service_type', 'orchestrationv3', enforce_type=True) cfg.CONF.set_override('os_region_name', 'RegionTwo', enforce_type=True) client = remote.create_heat_client( TroveContext(service_catalog=self.service_catalog)) self.assertEqual(self.heatv3_public_url_region_two, client.http_client.endpoint) class TestCreateSwiftClient(trove_testtools.TestCase): def setUp(self): super(TestCreateSwiftClient, self).setUp() self.swift_public_url = 'http://publicURL/v2' self.swiftv3_public_url_region_two = 'http://publicURL-r2/v3' self.service_catalog = [ { 'endpoints': [ { 'region': 'RegionOne', 'publicURL': self.swift_public_url, } ], 'type': 'object-store' }, { 'endpoints': [ { 'region': 'RegionOne', 'publicURL': 'http://publicURL-r1/v1', }, { 'region': 'RegionTwo', 'publicURL': self.swiftv3_public_url_region_two, } ], 'type': 'object-storev3' } ] def tearDown(self): super(TestCreateSwiftClient, self).tearDown() cfg.CONF.clear_override('swift_url') cfg.CONF.clear_override('swift_service_type') cfg.CONF.clear_override('os_region_name') def test_create_with_no_conf_no_catalog(self): self.assertRaises(exception.EmptyCatalog, remote.create_swift_client, TroveContext()) def test_create_with_conf_override(self): swift_url_from_conf = 'http://example.com/AUTH_' tenant_from_ctx = uuid.uuid4().hex cfg.CONF.set_override('swift_url', swift_url_from_conf, enforce_type=True) client = remote.create_swift_client( TroveContext(tenant=tenant_from_ctx)) self.assertEqual('%s%s' % (swift_url_from_conf, tenant_from_ctx), client.url) def test_create_with_catalog_and_default_service_type(self): client = remote.create_swift_client( TroveContext(service_catalog=self.service_catalog)) self.assertEqual(self.swift_public_url, client.url) def test_create_with_catalog_all_opts(self): cfg.CONF.set_override('swift_service_type', 'object-storev3', enforce_type=True) cfg.CONF.set_override('os_region_name', 'RegionTwo', enforce_type=True) client = remote.create_swift_client( TroveContext(service_catalog=self.service_catalog)) self.assertEqual(self.swiftv3_public_url_region_two, client.url) class TestEndpoints(trove_testtools.TestCase): """ Copied from glance/tests/unit/test_auth.py. """ def setUp(self): super(TestEndpoints, self).setUp() self.service_catalog = [ { 'endpoint_links': [], 'endpoints': [ { 'adminURL': 'http://localhost:8080/', 'region': 'RegionOne', 'internalURL': 'http://internalURL/', 'publicURL': 'http://publicURL/', }, { 'adminURL': 'http://localhost:8081/', 'region': 'RegionTwo', 'internalURL': 'http://internalURL2/', 'publicURL': 'http://publicURL2/', }, ], 'type': 'object-store', 'name': 'Object Storage Service', } ] def test_get_endpoint_empty_catalog(self): self.assertRaises(exception.EmptyCatalog, remote.get_endpoint, None) def test_get_endpoint_with_custom_server_type(self): endpoint = remote.get_endpoint(self.service_catalog, service_type='object-store', endpoint_region='RegionOne') self.assertEqual('http://publicURL/', endpoint) def test_get_endpoint_with_custom_endpoint_type(self): endpoint = remote.get_endpoint(self.service_catalog, service_type='object-store', endpoint_type='internalURL', endpoint_region='RegionOne') self.assertEqual('http://internalURL/', endpoint) def test_get_endpoint_raises_with_invalid_service_type(self): self.assertRaises(exception.NoServiceEndpoint, remote.get_endpoint, self.service_catalog, service_type='foo') def test_get_endpoint_raises_with_invalid_endpoint_type(self): self.assertRaises(exception.NoServiceEndpoint, remote.get_endpoint, self.service_catalog, service_type='object-store', endpoint_type='foo', endpoint_region='RegionOne') def test_get_endpoint_raises_with_invalid_endpoint_region(self): self.assertRaises(exception.NoServiceEndpoint, remote.get_endpoint, self.service_catalog, service_type='object-store', endpoint_region='foo', endpoint_type='internalURL') def test_get_endpoint_ignores_missing_type(self): service_catalog = [ { 'name': 'Other Service', }, { 'endpoint_links': [], 'endpoints': [ { 'adminURL': 'http://localhost:8080/', 'region': 'RegionOne', 'internalURL': 'http://internalURL/', 'publicURL': 'http://publicURL/', }, { 'adminURL': 'http://localhost:8081/', 'region': 'RegionTwo', 'internalURL': 'http://internalURL2/', 'publicURL': 'http://publicURL2/', }, ], 'type': 'object-store', 'name': 'Object Storage Service', } ] endpoint = remote.get_endpoint(service_catalog, service_type='object-store', endpoint_region='RegionOne') self.assertEqual('http://publicURL/', endpoint) trove-5.0.0/trove/tests/unittests/common/test_crypto_utils.py0000664000567000056710000000477512701410316026030 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from Crypto import Random from trove.common import crypto_utils from trove.tests.unittests import trove_testtools class TestEncryptUtils(trove_testtools.TestCase): def setUp(self): super(TestEncryptUtils, self).setUp() def tearDown(self): super(TestEncryptUtils, self).tearDown() def test_encode_decode_string(self): random_data = bytearray(Random.new().read(12)) data = ['abc', 'numbers01234', '\x00\xFF\x00\xFF\xFF\x00', random_data] for datum in data: encoded_data = crypto_utils.encode_data(datum) decoded_data = crypto_utils.decode_data(encoded_data) self. assertEqual(datum, decoded_data, "Encode/decode failed") def test_pad_unpad(self): for size in range(1, 100): data_str = 'a' * size padded_str = crypto_utils.pad_for_encryption( data_str, crypto_utils.IV_BIT_COUNT) self.assertEqual(0, len(padded_str) % crypto_utils.IV_BIT_COUNT, "Padding not successful") unpadded_str = crypto_utils.unpad_after_decryption(padded_str) self.assertEqual(data_str, unpadded_str, "String mangled after pad/unpad") def test_encryp_decrypt(self): key = 'my_secure_key' for size in range(1, 100): orig_data = Random.new().read(size) orig_encoded = crypto_utils.encode_data(orig_data) encrypted = crypto_utils.encrypt_data(orig_encoded, key) encoded = crypto_utils.encode_data(encrypted) decoded = crypto_utils.decode_data(encoded) decrypted = crypto_utils.decrypt_data(decoded, key) final_decoded = crypto_utils.decode_data(decrypted) self.assertEqual(orig_data, final_decoded, "Decrypted data did not match original") trove-5.0.0/trove/tests/unittests/common/test_stream_codecs.py0000664000567000056710000000264412701410316026074 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from Crypto import Random from trove.common import stream_codecs from trove.tests.unittests import trove_testtools class TestStreamCodecs(trove_testtools.TestCase): def setUp(self): super(TestStreamCodecs, self).setUp() def tearDown(self): super(TestStreamCodecs, self).tearDown() def test_serialize_deserialize_base64codec(self): random_data = bytearray(Random.new().read(12)) data = ['abc', 'numbers01234', random_data] codec = stream_codecs.Base64Codec() for datum in data: serialized_data = codec.serialize(datum) deserialized_data = codec.deserialize(serialized_data) self. assertEqual(datum, deserialized_data, "Serialize/Deserialize failed") trove-5.0.0/trove/tests/unittests/common/test_exception.py0000664000567000056710000000202612701410316025251 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.exception import TroveError from trove.tests.unittests import trove_testtools class TroveErrorTest(trove_testtools.TestCase): def test_valid_error_message_format(self): error = TroveError("%02d" % 1) self.assertEqual("01", error.message) def test_invalid_error_message_format(self): error = TroveError("test%999999sdb") self.assertEqual("test999999sdb", error.message) trove-5.0.0/trove/tests/unittests/db/0000775000567000056710000000000012701410521020735 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/db/__init__.py0000664000567000056710000000000012701410316023036 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/db/test_migration_utils.py0000664000567000056710000001227712701410316025572 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import call from mock import Mock from mock import patch from sqlalchemy.engine import reflection from sqlalchemy.schema import Column from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy import utils as db_utils from trove.tests.unittests import trove_testtools class TestDbMigrationUtils(trove_testtools.TestCase): def setUp(self): super(TestDbMigrationUtils, self).setUp() def tearDown(self): super(TestDbMigrationUtils, self).tearDown() @patch.object(reflection.Inspector, 'from_engine') def test_get_foreign_key_constraint_names_single_match(self, mock_inspector): mock_engine = Mock() (mock_inspector.return_value. get_foreign_keys.return_value) = [{'constrained_columns': ['col1'], 'referred_table': 'ref_table1', 'referred_columns': ['ref_col1'], 'name': 'constraint1'}, {'constrained_columns': ['col2'], 'referred_table': 'ref_table2', 'referred_columns': ['ref_col2'], 'name': 'constraint2'}] ret_val = db_utils.get_foreign_key_constraint_names(mock_engine, 'table1', ['col1'], 'ref_table1', ['ref_col1']) self.assertEqual(['constraint1'], ret_val) @patch.object(reflection.Inspector, 'from_engine') def test_get_foreign_key_constraint_names_multi_match(self, mock_inspector): mock_engine = Mock() (mock_inspector.return_value. get_foreign_keys.return_value) = [ {'constrained_columns': ['col1'], 'referred_table': 'ref_table1', 'referred_columns': ['ref_col1'], 'name': 'constraint1'}, {'constrained_columns': ['col2', 'col3'], 'referred_table': 'ref_table1', 'referred_columns': ['ref_col2', 'ref_col3'], 'name': 'constraint2'}, {'constrained_columns': ['col2', 'col3'], 'referred_table': 'ref_table1', 'referred_columns': ['ref_col2', 'ref_col3'], 'name': 'constraint3'}, {'constrained_columns': ['col4'], 'referred_table': 'ref_table2', 'referred_columns': ['ref_col4'], 'name': 'constraint4'}] ret_val = db_utils.get_foreign_key_constraint_names( mock_engine, 'table1', ['col2', 'col3'], 'ref_table1', ['ref_col2', 'ref_col3']) self.assertEqual(['constraint2', 'constraint3'], ret_val) @patch.object(reflection.Inspector, 'from_engine') def test_get_foreign_key_constraint_names_no_match(self, mock_inspector): mock_engine = Mock() (mock_inspector.return_value. get_foreign_keys.return_value) = [] ret_val = db_utils.get_foreign_key_constraint_names(mock_engine, 'table1', ['col1'], 'ref_table1', ['ref_col1']) self.assertEqual([], ret_val) @patch('trove.db.sqlalchemy.utils.ForeignKeyConstraint') def test_drop_foreign_key_constraints(self, mock_constraint): test_columns = [Column('col1', String(5)), Column('col2', String(5))] test_refcolumns = [Column('ref_col1', String(5)), Column('ref_col2', String(5))] test_constraint_names = ['constraint1', 'constraint2'] db_utils.drop_foreign_key_constraints(test_constraint_names, test_columns, test_refcolumns) expected = [call(columns=test_columns, refcolumns=test_refcolumns, name='constraint1'), call(columns=test_columns, refcolumns=test_refcolumns, name='constraint2')] self.assertEqual(expected, mock_constraint.call_args_list) trove-5.0.0/trove/tests/unittests/mysql/0000775000567000056710000000000012701410521021515 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/mysql/test_common.py0000664000567000056710000001447112701410316024427 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from testtools.matchers import Equals from testtools.matchers import Is from trove.common.exception import DatabaseForUserNotInDatabaseListError from trove.common.exception import DatabaseInitialDatabaseDuplicateError from trove.common.exception import DatabaseInitialUserDuplicateError from trove.extensions.mysql.common import populate_users from trove.extensions.mysql.common import populate_validated_databases from trove.tests.unittests import trove_testtools class MySqlCommonTest(trove_testtools.TestCase): def setUp(self): super(MySqlCommonTest, self).setUp() def tearDown(self): super(MySqlCommonTest, self).tearDown() def test_initial_databases_none(self): databases = [] result = populate_validated_databases(databases) self.assertThat(len(result), Is(0)) def test_initial_databases_single(self): databases = [{'name': 'one_db'}] result = populate_validated_databases(databases) self.assertThat(len(result), Is(1)) self.assertThat(result[0]['_name'], Equals('one_db')) def test_initial_databases_unique(self): databases = [{'name': 'one_db'}, {'name': 'diff_db'}] result = populate_validated_databases(databases) self.assertThat(len(result), Is(2)) def test_initial_databases_duplicate(self): databases = [{'name': 'same_db'}, {'name': 'same_db'}] self.assertRaises(DatabaseInitialDatabaseDuplicateError, populate_validated_databases, databases) def test_initial_databases_intermingled(self): databases = [{'name': 'a_db'}, {'name': 'b_db'}, {'name': 'a_db'}] self.assertRaises(DatabaseInitialDatabaseDuplicateError, populate_validated_databases, databases) def test_populate_users_single(self): users = [{'name': 'bob', 'password': 'x'}] result = populate_users(users) self.assertThat(len(result), Is(1)) self.assertThat(result[0]['_name'], Equals('bob')) self.assertThat(result[0]['_password'], Equals('x')) def test_populate_users_unique_host(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'bob', 'password': 'x', 'host': '128.0.0.1'}] result = populate_users(users) self.assertThat(len(result), Is(2)) def test_populate_users_unique_name(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'tom', 'password': 'x', 'host': '127.0.0.1'}] result = populate_users(users) self.assertThat(len(result), Is(2)) def test_populate_users_duplicate(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'bob', 'password': 'y', 'host': '127.0.0.1'}] self.assertRaises(DatabaseInitialUserDuplicateError, populate_users, users) def test_populate_users_intermingled(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'tom', 'password': 'y', 'host': '128.0.0.1'}, {'name': 'bob', 'password': 'z', 'host': '127.0.0.1'}] self.assertRaises(DatabaseInitialUserDuplicateError, populate_users, users) def test_populate_users_both_db_list_empty(self): initial_databases = [] users = [{"name": "bob", "password": "x"}] result = populate_users(users, initial_databases) self.assertThat(len(result), Is(1)) def test_populate_users_initial_db_list_empty(self): initial_databases = [] users = [{"name": "bob", "password": "x", "databases": [{"name": "my_db"}]}] self.assertRaises(DatabaseForUserNotInDatabaseListError, populate_users, users, initial_databases) def test_populate_users_user_db_list_empty(self): initial_databases = ['my_db'] users = [{"name": "bob", "password": "x"}] result = populate_users(users, initial_databases) self.assertThat(len(result), Is(1)) def test_populate_users_db_in_list(self): initial_databases = ['my_db'] users = [{"name": "bob", "password": "x", "databases": [{"name": "my_db"}]}] result = populate_users(users, initial_databases) self.assertThat(len(result), Is(1)) def test_populate_users_db_multi_in_list(self): initial_databases = ['a_db', 'b_db', 'c_db', 'd_db'] users = [{"name": "bob", "password": "x", "databases": [{"name": "a_db"}]}, {"name": "tom", "password": "y", "databases": [{"name": "c_db"}]}, {"name": "sue", "password": "z", "databases": [{"name": "c_db"}]}] result = populate_users(users, initial_databases) self.assertThat(len(result), Is(3)) def test_populate_users_db_not_in_list(self): initial_databases = ['a_db', 'b_db', 'c_db', 'd_db'] users = [{"name": "bob", "password": "x", "databases": [{"name": "fake_db"}]}] self.assertRaises(DatabaseForUserNotInDatabaseListError, populate_users, users, initial_databases) def test_populate_users_db_multi_not_in_list(self): initial_databases = ['a_db', 'b_db', 'c_db', 'd_db'] users = [{"name": "bob", "password": "x", "databases": [{"name": "a_db"}]}, {"name": "tom", "password": "y", "databases": [{"name": "fake_db"}]}, {"name": "sue", "password": "z", "databases": [{"name": "d_db"}]}] self.assertRaises(DatabaseForUserNotInDatabaseListError, populate_users, users, initial_databases) trove-5.0.0/trove/tests/unittests/mysql/__init__.py0000664000567000056710000000000012701410316023616 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/mysql/test_user_controller.py0000664000567000056710000004123212701410316026353 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from testtools.matchers import Is from trove.extensions.mysql.service import SchemaController from trove.extensions.mysql.service import UserAccessController from trove.extensions.mysql.service import UserController from trove.tests.unittests import trove_testtools class TestUserController(trove_testtools.TestCase): def setUp(self): super(TestUserController, self).setUp() self.controller = UserController() def test_get_create_schema(self): body = {'users': [{'name': 'test', 'password': 'test'}]} schema = self.controller.get_schema('create', body) self.assertTrue('users' in schema['properties']) def test_get_update_user_pw(self): body = {'users': [{'name': 'test', 'password': 'test'}]} schema = self.controller.get_schema('update_all', body) self.assertTrue('users' in schema['properties']) def test_get_update_user_db(self): body = {'databases': [{'name': 'test'}, {'name': 'test'}]} schema = self.controller.get_schema('update_all', body) self.assertTrue('databases' in schema['properties']) def test_validate_create_empty(self): body = {"users": []} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) # TODO(zed): Restore after API version increment # errors = sorted(validator.iter_errors(body), key=lambda e: e.path) # self.assertThat(len(errors), Is(1)) # self.assertThat(errors[0].message, Equals("[] is too short")) # self.assertThat(errors[0].path.pop(), Equals("users")) def test_validate_create_short_password(self): body = {"users": [{"name": "joe", "password": ""}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("password", error_paths) def test_validate_create_no_password(self): body = {"users": [{"name": "joe"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'password' is a required property", error_messages) def test_validate_create_short_name(self): body = {"users": [{"name": ""}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(3)) self.assertIn("'password' is a required property", error_messages) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("name", error_paths) def test_validate_create_complete_db_empty(self): body = {"users": [{"databases": [], "name": "joe", "password": "123"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(0)) def test_validate_create_complete_db_no_name(self): body = {"users": [{"databases": [{}], "name": "joe", "password": "123"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'name' is a required property", error_messages) def test_validate_create_bogus_attr(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "bogosity": 100, "password": "123"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) # TODO(zed): After API increment, this will NOT be valid. self.assertTrue(validator.is_valid(body)) def test_validate_create_complete_db(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_host_no_wildcard(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "192.168.1.1"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_host_wildcard(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "%"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_host_wildcard_prefix(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "%.168.1.1"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_host_wildcard_middle(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "192.%.1.1"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'192.%.1.1' does not match '^[%]?[\\\\w(-).]*[%]?$'", error_messages) def test_validate_create_host_wildcard_suffix(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "192.168.1.%"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_update_empty(self): body = {"users": []} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) # TODO(zed): Restore after API version increment # errors = sorted(validator.iter_errors(body), key=lambda e: e.path) # self.assertThat(len(errors), Is(1)) # self.assertThat(errors[0].message, Equals("[] is too short")) # self.assertThat(errors[0].path.pop(), Equals("users")) def test_validate_update_short_password(self): body = {"users": [{"name": "joe", "password": ""}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("password", error_paths) def test_validate_update_user_complete(self): body = {"users": [{"name": "joe", "password": "", "databases": [{"name": "testdb"}]}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("password", error_paths) def test_validate_update_user_with_db_short_password(self): body = {"users": [{"name": "joe", "password": "", "databases": [{"name": "testdb"}]}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("password", error_paths) def test_validate_update_no_password(self): body = {"users": [{"name": "joe"}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'password' is a required property", error_messages) def test_validate_update_database_complete(self): body = {"databases": [{"name": "test1"}, {"name": "test2"}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_update_database_empty(self): body = {"databases": []} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) # TODO(zed): Restore after API version increment # errors = sorted(validator.iter_errors(body), key=lambda e: e.path) # self.assertThat(len(errors), Is(1)) # self.assertThat(errors[0].message, Equals('[] is too short')) def test_validate_update_short_name(self): body = {"users": [{"name": ""}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(3)) self.assertIn("'password' is a required property", error_messages) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("name", error_paths) def test_get_update_user_attributes(self): body = {'user': {'name': 'test'}} schema = self.controller.get_schema('update', body) self.assertTrue('user' in schema['properties']) def test_validate_update_user_attributes(self): body = {'user': {'name': 'test', 'password': 'test', 'host': '%'}} schema = self.controller.get_schema('update', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_update_user_attributes_empty(self): body = {"user": {}} schema = self.controller.get_schema('update', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) def test_validate_host_in_user_attributes(self): body_empty_host = {'user': { 'name': 'test', 'password': 'test', 'host': '%' }} body_with_host = {'user': { 'name': 'test', 'password': 'test', 'host': '1.1.1.1' }} body_none_host = {'user': { 'name': 'test', 'password': 'test', 'host': "" }} schema_empty_host = self.controller.get_schema('update', body_empty_host) schema_with_host = self.controller.get_schema('update', body_with_host) schema_none_host = self.controller.get_schema('update', body_none_host) validator_empty_host = jsonschema.Draft4Validator(schema_empty_host) validator_with_host = jsonschema.Draft4Validator(schema_with_host) validator_none_host = jsonschema.Draft4Validator(schema_none_host) self.assertTrue(validator_empty_host.is_valid(body_empty_host)) self.assertTrue(validator_with_host.is_valid(body_with_host)) self.assertFalse(validator_none_host.is_valid(body_none_host)) class TestUserAccessController(trove_testtools.TestCase): def test_validate_update_db(self): body = {"databases": []} schema = (UserAccessController()).get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) # TODO(zed): Restore after API version increment # errors = sorted(validator.iter_errors(body), key=lambda e: e.path) # self.assertThat(len(errors), Is(1)) # self.assertThat(errors[0].message, Equals("[] is too short")) # self.assertThat(errors[0].path.pop(), Equals("databases")) class TestSchemaController(trove_testtools.TestCase): def setUp(self): super(TestSchemaController, self).setUp() self.controller = SchemaController() self.body = { "databases": [ { "name": "first_db", "collate": "latin2_general_ci", "character_set": "latin2" }, { "name": "second_db" } ] } def test_validate_mixed(self): schema = self.controller.get_schema('create', self.body) self.assertNotEqual(schema, None) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(self.body)) def test_validate_mixed_with_no_name(self): body = self.body.copy() body['databases'].append({"collate": "some_collation"}) schema = self.controller.get_schema('create', body) self.assertNotEqual(schema, None) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) def test_validate_empty(self): body = {"databases": []} schema = self.controller.get_schema('create', body) self.assertNotEqual(schema, None) self.assertTrue('databases' in body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) trove-5.0.0/trove/tests/unittests/api/0000775000567000056710000000000012701410521021121 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/api/__init__.py0000664000567000056710000000000012701410316023222 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/api/common/0000775000567000056710000000000012701410521022411 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/api/common/test_limits.py0000664000567000056710000006670312701410316025341 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests dealing with HTTP rate-limiting. """ import httplib from mock import Mock, MagicMock, patch from oslo_serialization import jsonutils import six import webob from trove.common import limits from trove.common.limits import Limit from trove.limits.service import LimitsController from trove.limits import views from trove.quota.models import Quota from trove.quota.quota import QUOTAS from trove.tests.unittests import trove_testtools TEST_LIMITS = [ Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), Limit("POST", "*", ".*", 7, limits.PER_MINUTE), Limit("POST", "/mgmt", "^/mgmt", 3, limits.PER_MINUTE), Limit("PUT", "*", "", 10, limits.PER_MINUTE), ] class BaseLimitTestSuite(trove_testtools.TestCase): """Base test suite which provides relevant stubs and time abstraction.""" def setUp(self): super(BaseLimitTestSuite, self).setUp() self.absolute_limits = {"max_instances": 55, "max_volumes": 100, "max_backups": 40} class LimitsControllerTest(BaseLimitTestSuite): def setUp(self): super(LimitsControllerTest, self).setUp() @patch.object(QUOTAS, 'get_all_quotas_by_tenant', return_value={}) def test_limit_index_empty(self, quotas_mock): limit_controller = LimitsController() req = MagicMock() req.environ = {} view = limit_controller.index(req, "test_tenant_id") expected = {'limits': [{'verb': 'ABSOLUTE'}]} self.assertEqual(expected, view._data) def test_limit_index(self): tenant_id = "test_tenant_id" limit_controller = LimitsController() limits = [ { "URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "PUT", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "DELETE", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "GET", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 } ] abs_limits = {"instances": Quota(tenant_id=tenant_id, resource="instances", hard_limit=100), "backups": Quota(tenant_id=tenant_id, resource="backups", hard_limit=40), "volumes": Quota(tenant_id=tenant_id, resource="volumes", hard_limit=55)} req = MagicMock() req.environ = {"trove.limits": limits} with patch.object(QUOTAS, 'get_all_quotas_by_tenant', return_value=abs_limits): view = limit_controller.index(req, tenant_id) expected = { 'limits': [ { 'max_instances': 100, 'max_backups': 40, 'verb': 'ABSOLUTE', 'max_volumes': 55 }, { 'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'POST', 'remaining': 2, 'unit': 'MINUTE' }, { 'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'PUT', 'remaining': 2, 'unit': 'MINUTE' }, { 'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'DELETE', 'remaining': 2, 'unit': 'MINUTE' }, { 'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'GET', 'remaining': 2, 'unit': 'MINUTE' } ] } self.assertEqual(expected, view._data) class TestLimiter(limits.Limiter): """Note: This was taken from Nova.""" pass class LimitMiddlewareTest(BaseLimitTestSuite): """ Tests for the `limits.RateLimitingMiddleware` class. """ @webob.dec.wsgify def _empty_app(self, request): """Do-nothing WSGI app.""" pass def setUp(self): """Prepare middleware for use through fake WSGI app.""" super(LimitMiddlewareTest, self).setUp() _limits = '(GET, *, .*, 1, MINUTE)' self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, "%s.TestLimiter" % self.__class__.__module__) def test_limit_class(self): # Test that middleware selected correct limiter class. assert isinstance(self.app._limiter, TestLimiter) def test_good_request(self): # Test successful GET request through middleware. request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) def test_limited_request_json(self): # Test a rate-limited (413) GET request through middleware. request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(413, response.status_int) self.assertTrue('Retry-After' in response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) body = jsonutils.loads(response.body) expected = "Only 1 GET request(s) can be made to * every minute." value = body["overLimit"]["details"].strip() self.assertEqual(expected, value) self.assertTrue("retryAfter" in body["overLimit"]) retryAfter = body["overLimit"]["retryAfter"] self.assertEqual("60", retryAfter) class LimitTest(BaseLimitTestSuite): """ Tests for the `limits.Limit` class. """ def test_GET_no_delay(self): # Test a limit handles 1 GET per second. limit = Limit("GET", "*", ".*", 1, 1) limit._get_time = MagicMock(return_value=0.0) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) def test_GET_delay(self): # Test two calls to 1 GET per second limit. limit = Limit("GET", "*", ".*", 1, 1) limit._get_time = MagicMock(return_value=0.0) delay = limit("GET", "/anything") self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) self.assertEqual(1, limit.next_request) self.assertEqual(0, limit.last_request) limit._get_time = MagicMock(return_value=4.0) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) class ParseLimitsTest(BaseLimitTestSuite): """ Tests for the default limits parser in the in-memory `limits.Limiter` class. """ def test_invalid(self): # Test that parse_limits() handles invalid input correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, ';;;;;') def test_bad_rule(self): # Test that parse_limits() handles bad rules correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, 'GET, *, .*, 20, minute') def test_missing_arg(self): # Test that parse_limits() handles missing args correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20)') def test_bad_value(self): # Test that parse_limits() handles bad values correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, foo, minute)') def test_bad_unit(self): # Test that parse_limits() handles bad units correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20, lightyears)') def test_multiple_rules(self): # Test that parse_limits() handles multiple rules correctly. try: l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)') except ValueError as e: assert False, str(e) # Make sure the number of returned limits are correct self.assertEqual(4, len(l)) # Check all the verbs... expected = ['GET', 'PUT', 'POST', 'SAY'] self.assertEqual(expected, [t.verb for t in l]) # ...the URIs... expected = ['*', '/foo*', '/bar*', '/derp*'] self.assertEqual(expected, [t.uri for t in l]) # ...the regexes... expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] self.assertEqual(expected, [t.regex for t in l]) # ...the values... expected = [20, 10, 5, 1] self.assertEqual(expected, [t.value for t in l]) # ...and the units... expected = [limits.PER_MINUTE, limits.PER_HOUR, limits.PER_SECOND, limits.PER_DAY] self.assertEqual(expected, [t.unit for t in l]) class LimiterTest(BaseLimitTestSuite): """ Tests for the in-memory `limits.Limiter` class. """ def update_limits(self, delay, limit_list): for ln in limit_list: ln._get_time = Mock(return_value=delay) def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() userlimits = {'user:user3': ''} self.update_limits(0.0, TEST_LIMITS) self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" for x in range(num): yield self.limiter.check_for_delay(verb, url, username)[0] def _check_sum(self, num, verb, url, username=None): """Check and sum results from checks.""" results = self._check(num, verb, url, username) return sum(item for item in results if item) def test_no_delay_GET(self): """ Simple test to ensure no delay on a single call for a limit verb we didn"t set. """ delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_no_delay_PUT(self): # Simple test to ensure no delay on a single call for a known limit. delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual((None, None), delay) def test_delay_PUT(self): """ Ensure the 11th PUT will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_POST(self): """ Ensure the 8th POST will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) self.assertEqual(expected, results) expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") self.assertAlmostEqual(expected, results, 8) def test_delay_POST_mgmt(self): """ Ensure the 4th mgmt POST will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 3 results = list(self._check(3, "POST", "/mgmt")) self.assertEqual(expected, results) expected = 60.0 / 3.0 results = self._check_sum(1, "POST", "/mgmt") self.assertAlmostEqual(expected, results, 4) def test_delay_GET(self): # Ensure the 11th GET will result in NO delay. expected = [None] * 11 results = list(self._check(11, "GET", "/mgmt")) self.assertEqual(expected, results) def test_delay_PUT_wait(self): """ Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) # Advance time self.update_limits(6.0, self.limiter.levels[None]) expected = [None, 6.0] results = list(self._check(2, "PUT", "/anything")) self.assertEqual(expected, results) def test_multiple_delays(self): # Ensure multiple requests still get a delay. expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) self.update_limits(1.0, self.limiter.levels[None]) expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) def test_user_limit(self): # Test user-specific limits. self.assertEqual([], self.limiter.levels['user3']) def test_multiple_users(self): # Tests involving multiple users. # User1 self.update_limits(0.0, self.limiter.levels["user1"]) expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 expected = [None] * 10 + [6.0] * 5 results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User3 expected = [None] * 20 results = list(self._check(20, "PUT", "/anything", "user3")) self.assertEqual(expected, results) # User1 again self.update_limits(1.0, self.limiter.levels["user1"]) expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 again self.update_limits(2.0, self.limiter.levels["user2"]) expected = [4.0] * 5 results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) class WsgiLimiterTest(BaseLimitTestSuite): """ Tests for `limits.WsgiLimiter` class. """ def setUp(self): """Run before each test.""" super(WsgiLimiterTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): """Get data describing a limit request verb/path.""" return jsonutils.dumps({"verb": verb, "path": path}) def _request(self, verb, url, username=None): """Make sure that POSTing to the given url causes the given username to perform the given action. Make the internal rate limiter return delay and make sure that the WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) else: request = webob.Request.blank("/") request.method = "POST" request.body = self._request_data(verb, url) response = request.get_response(self.app) if "X-Wait-Seconds" in response.headers: self.assertEqual(403, response.status_int) return response.headers["X-Wait-Seconds"] self.assertEqual(204, response.status_int) def test_invalid_methods(self): # Only POSTs should work. for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(405, response.status_int) def test_good_url(self): delay = self._request("GET", "/something") self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertAlmostEqual(float(delay), 60, 1) def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertAlmostEqual(float(delay), 60, 1) delay = self._request("GET", "/delayed", "user2") self.assertAlmostEqual(float(delay), 60, 1) class FakeHttplibSocket(object): """ Fake `httplib.HTTPResponse` replacement. """ def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" self._buffer = six.StringIO(response_string) def makefile(self, _mode, _other): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """ Fake `httplib.HTTPConnection`. """ def __init__(self, app, host): """ Initialize `FakeHttplibConnection`. """ self.app = app self.host = host def request(self, method, path, body="", headers=None): """ Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `httplib.HTTPResponse`. """ if not headers: headers = {} req = webob.Request.blank(path) req.method = method req.headers = headers req.host = self.host req.body = body resp = str(req.get_response(self.app)) resp = "HTTP/1.0 %s" % resp sock = FakeHttplibSocket(resp) self.http_response = httplib.HTTPResponse(sock) self.http_response.begin() def getresponse(self): """Return our generated response from the request.""" return self.http_response def wire_HTTPConnection_to_WSGI(host, app): """Monkeypatches HTTPConnection so that if you try to connect to host, you are instead routed straight to the given WSGI app. After calling this method, when any code calls httplib.HTTPConnection(host) the connection object will be a fake. Its requests will be sent directly to the given WSGI app rather than through a socket. Code connecting to hosts other than host will not be affected. This method may be called multiple times to map different hosts to different apps. This method returns the original HTTPConnection object, so that the caller can restore the default HTTPConnection interface (for all hosts). """ class HTTPConnectionDecorator(object): """Wraps the real HTTPConnection class so that when you instantiate the class you might instead get a fake instance. """ def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if connection_host == host: return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = httplib.HTTPConnection httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection) return oldHTTPConnection class WsgiLimiterProxyTest(BaseLimitTestSuite): """ Tests for the `limits.WsgiLimiterProxy` class. """ def setUp(self): """ Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `httplib` library. """ super(WsgiLimiterProxyTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) self.oldHTTPConnection = ( wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") def test_200(self): # Successful request test. delay = self.proxy.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_403(self): # Forbidden request test. delay = self.proxy.check_for_delay("GET", "/delayed") self.assertEqual((None, None), delay) delay, error = self.proxy.check_for_delay("GET", "/delayed") error = error.strip() self.assertAlmostEqual(float(delay), 60, 1) self.assertEqual("403 Forbidden\n\nOnly 1 GET request(s) can be" " made to /delayed every minute.", error) def tearDown(self): # restore original HTTPConnection object httplib.HTTPConnection = self.oldHTTPConnection super(WsgiLimiterProxyTest, self).tearDown() class LimitsViewTest(trove_testtools.TestCase): def setUp(self): super(LimitsViewTest, self).setUp() def test_empty_data(self): """ Test the default returned results if an empty dictionary is given """ rate_limit = {} view = views.LimitView(rate_limit) self.assertIsNotNone(view) data = view.data() expected = {'limit': {'regex': '', 'nextAvailable': '1970-01-01T00:00:00Z', 'uri': '', 'value': '', 'verb': '', 'remaining': 0, 'unit': ''}} self.assertEqual(expected, data) def test_data(self): """ Test the returned results for a fully populated dictionary """ rate_limit = { "URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 } view = views.LimitView(rate_limit) self.assertIsNotNone(view) data = view.data() expected = {'limit': {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'POST', 'remaining': 2, 'unit': 'MINUTE'}} self.assertEqual(expected, data) class LimitsViewsTest(trove_testtools.TestCase): def setUp(self): super(LimitsViewsTest, self).setUp() def test_empty_data(self): rate_limits = [] abs_view = dict() view_data = views.LimitViews(abs_view, rate_limits) self.assertIsNotNone(view_data) data = view_data.data() expected = {'limits': [{'verb': 'ABSOLUTE'}]} self.assertEqual(expected, data) def test_data(self): rate_limits = [ { "URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "PUT", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "DELETE", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "GET", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 } ] abs_view = {"instances": 55, "volumes": 100, "backups": 40} view_data = views.LimitViews(abs_view, rate_limits) self.assertIsNotNone(view_data) data = view_data.data() expected = {'limits': [{'max_instances': 55, 'max_backups': 40, 'verb': 'ABSOLUTE', 'max_volumes': 100}, {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'POST', 'remaining': 2, 'unit': 'MINUTE'}, {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'PUT', 'remaining': 2, 'unit': 'MINUTE'}, {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'DELETE', 'remaining': 2, 'unit': 'MINUTE'}, {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'GET', 'remaining': 2, 'unit': 'MINUTE'}]} self.assertEqual(expected, data) trove-5.0.0/trove/tests/unittests/api/common/__init__.py0000664000567000056710000000000012701410316024512 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/api/common/test_extensions.py0000664000567000056710000000746012701410316026232 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import os import pkg_resources import trove import ConfigParser as config_parser from trove.common import extensions from trove.extensions.routes.account import Account from trove.extensions.routes.mgmt import Mgmt from trove.extensions.routes.mysql import Mysql from trove.extensions.routes.security_group import Security_group from trove.tests.unittests import trove_testtools DEFAULT_EXTENSION_MAP = { 'Account': [Account, extensions.ExtensionDescriptor], 'Mgmt': [Mgmt, extensions.ExtensionDescriptor], 'MYSQL': [Mysql, extensions.ExtensionDescriptor], 'SecurityGroup': [Security_group, extensions.ExtensionDescriptor] } EP_TEXT = ''' account = trove.extensions.routes.account:Account mgmt = trove.extensions.routes.mgmt:Mgmt mysql = trove.extensions.routes.mysql:Mysql security_group = trove.extensions.routes.security_group:Security_group invalid = trove.tests.unittests.api.common.test_extensions:InvalidExtension ''' class InvalidExtension(object): def get_name(self): return "Invalid" def get_description(self): return "Invalid Extension" def get_alias(self): return "Invalid" def get_namespace(self): return "http://TBD" def get_updated(self): return "2014-08-14T13:25:27-06:00" def get_resources(self): return [] class TestExtensionLoading(trove_testtools.TestCase): def setUp(self): super(TestExtensionLoading, self).setUp() def tearDown(self): super(TestExtensionLoading, self).tearDown() def _assert_default_extensions(self, ext_list): for alias, ext in ext_list.items(): for clazz in DEFAULT_EXTENSION_MAP[alias]: self.assertIsInstance(ext, clazz, "Improper extension class") @mock.patch("pkg_resources.iter_entry_points") def test_default_extensions(self, mock_iter_eps): trove_base = os.path.abspath(os.path.join( os.path.dirname(trove.__file__), "..")) setup_path = "%s/setup.cfg" % trove_base # check if we are running as unit test without module installed if os.path.isfile(setup_path): parser = config_parser.ConfigParser() parser.read(setup_path) entry_points = parser.get( 'entry_points', extensions.ExtensionManager.EXT_NAMESPACE) eps = pkg_resources.EntryPoint.parse_group('plugins', entry_points) mock_iter_eps.return_value = eps.values() extension_mgr = extensions.ExtensionManager() self.assertEqual(DEFAULT_EXTENSION_MAP.keys().sort(), extension_mgr.extensions.keys().sort(), "Invalid extension names") self._assert_default_extensions(extension_mgr.extensions) @mock.patch("pkg_resources.iter_entry_points") def test_invalid_extension(self, mock_iter_eps): eps = pkg_resources.EntryPoint.parse_group('mock', EP_TEXT) mock_iter_eps.return_value = eps.values() extension_mgr = extensions.ExtensionManager() self.assertEqual(len(DEFAULT_EXTENSION_MAP.keys()), len(extension_mgr.extensions), "Loaded invalid extensions") self._assert_default_extensions(extension_mgr.extensions) trove-5.0.0/trove/tests/unittests/api/test_versions.py0000664000567000056710000002123212701410316024404 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from trove.tests.unittests import trove_testtools from trove.versions import BaseVersion from trove.versions import Version from trove.versions import VersionDataView from trove.versions import VERSIONS from trove.versions import VersionsAPI from trove.versions import VersionsController from trove.versions import VersionsDataView BASE_URL = 'http://localhost' class VersionsControllerTest(trove_testtools.TestCase): def setUp(self): super(VersionsControllerTest, self).setUp() self.controller = VersionsController() self.assertIsNotNone(self.controller, "VersionsController instance was None") def test_index_json(self): request = Mock() result = self.controller.index(request) self.assertIsNotNone(result, 'Result was None') result._data = Mock() result._data.data_for_json = \ lambda: {'status': 'CURRENT', 'updated': '2012-08-01T00:00:00Z', 'id': 'v1.0', 'links': [{'href': 'http://localhost/v1.0/', 'rel': 'self'}]} # can be anything but xml json_data = result.data("application/json") self.assertIsNotNone(json_data, 'Result json_data was None') self.assertEqual('v1.0', json_data['id'], 'Version id is incorrect') self.assertEqual('CURRENT', json_data['status'], 'Version status is incorrect') self.assertEqual('2012-08-01T00:00:00Z', json_data['updated'], 'Version updated value is incorrect') def test_show_json(self): request = Mock() request.url_version = '1.0' result = self.controller.show(request) self.assertIsNotNone(result, 'Result was None') json_data = result.data("application/json") self.assertIsNotNone(json_data, "JSON data was None") version = json_data.get('version', None) self.assertIsNotNone(version, "Version was None") self.assertEqual('CURRENT', version['status'], "Version status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', version['updated'], "Version updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', version['id'], "Version id was not 'v1.0'") class BaseVersionTestCase(trove_testtools.TestCase): def setUp(self): super(BaseVersionTestCase, self).setUp() id = VERSIONS['1.0']['id'] status = VERSIONS['1.0']['status'] base_url = BASE_URL updated = VERSIONS['1.0']['updated'] self.base_version = BaseVersion(id, status, base_url, updated) self.assertIsNotNone(self.base_version, 'BaseVersion instance was None') def test_data(self): data = self.base_version.data() self.assertIsNotNone(data, 'Base Version data was None') self.assertTrue(type(data) is dict, "Base Version data is not a dict") self.assertEqual('CURRENT', data['status'], "Data status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', data['updated'], "Data updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', data['id'], "Data status was not 'v1.0'") def test_url(self): url = self.base_version.url() self.assertIsNotNone(url, 'Url was None') self.assertEqual('http://localhost/v1.0/', url, "Base Version url is incorrect") class VersionTestCase(trove_testtools.TestCase): def setUp(self): super(VersionTestCase, self).setUp() id = VERSIONS['1.0']['id'] status = VERSIONS['1.0']['status'] base_url = BASE_URL updated = VERSIONS['1.0']['updated'] self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') def test_url_no_trailing_slash(self): url = self.version.url() self.assertIsNotNone(url, 'Version url was None') self.assertEqual(BASE_URL + '/', url, 'Base url value was incorrect') def test_url_with_trailing_slash(self): self.version.base_url = 'http://localhost/' url = self.version.url() self.assertEqual(BASE_URL + '/', url, 'Base url value was incorrect') class VersionDataViewTestCase(trove_testtools.TestCase): def setUp(self): super(VersionDataViewTestCase, self).setUp() # get a version object first id = VERSIONS['1.0']['id'] status = VERSIONS['1.0']['status'] base_url = BASE_URL updated = VERSIONS['1.0']['updated'] self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') # then create an instance of VersionDataView self.version_data_view = VersionDataView(self.version) self.assertIsNotNone(self.version_data_view, 'Version Data view instance was None') def test_data_for_json(self): json_data = self.version_data_view.data_for_json() self.assertIsNotNone(json_data, "JSON data was None") self.assertTrue(type(json_data) is dict, "JSON version data is not a dict") self.assertIsNotNone(json_data.get('version'), "Dict json_data has no key 'version'") data = json_data['version'] self.assertIsNotNone(data, "JSON data version was None") self.assertEqual('CURRENT', data['status'], "Data status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', data['updated'], "Data updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', data['id'], "Data status was not 'v1.0'") class VersionsDataViewTestCase(trove_testtools.TestCase): def setUp(self): super(VersionsDataViewTestCase, self).setUp() # get a version object, put it in a list self.versions = [] id = VERSIONS['1.0']['id'] status = VERSIONS['1.0']['status'] base_url = BASE_URL updated = VERSIONS['1.0']['updated'] self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') self.versions.append(self.version) # then create an instance of VersionsDataView self.versions_data_view = VersionsDataView(self.versions) self.assertIsNotNone(self.versions_data_view, 'Versions Data view instance was None') def test_data_for_json(self): json_data = self.versions_data_view.data_for_json() self.assertIsNotNone(json_data, "JSON data was None") self.assertTrue(type(json_data) is dict, "JSON versions data is not a dict") self.assertIsNotNone(json_data.get('versions', None), "Dict json_data has no key 'versions'") versions = json_data['versions'] self.assertIsNotNone(versions, "Versions was None") self.assertEqual(1, len(versions), "Versions length != 1") # explode the version object versions_data = [v.data() for v in self.versions] d1 = versions_data.pop() d2 = versions.pop() self.assertEqual(d1['id'], d2['id'], "Version ids are not equal") class VersionAPITestCase(trove_testtools.TestCase): def setUp(self): super(VersionAPITestCase, self).setUp() def test_instance(self): self.versions_api = VersionsAPI() self.assertIsNotNone(self.versions_api, "VersionsAPI instance was None") trove-5.0.0/trove/tests/unittests/datastore/0000775000567000056710000000000012701410521022336 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/datastore/__init__.py0000664000567000056710000000000012701410316024437 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/datastore/test_datastore_version_metadata.py0000664000567000056710000001002512701410316031342 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import exception from trove.datastore import models as datastore_models from trove.tests.unittests.datastore.base import TestDatastoreBase class TestDatastoreVersionMetadata(TestDatastoreBase): def setUp(self): super(TestDatastoreVersionMetadata, self).setUp() def tearDown(self): super(TestDatastoreVersionMetadata, self).tearDown() def test_map_flavors_to_datastore(self): datastore = datastore_models.Datastore.load(self.ds_name) ds_version = datastore_models.DatastoreVersion.load(datastore, self.ds_version) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=self.flavor_id, deleted=False, key='flavor') self.assertEqual(str(self.flavor_id), mapping.value) self.assertEqual(ds_version.id, mapping.datastore_version_id) self.assertEqual('flavor', str(mapping.key)) def test_add_existing_associations(self): dsmetadata = datastore_models.DatastoreVersionMetadata self.assertRaisesRegexp( exception.DatastoreFlavorAssociationAlreadyExists, "Flavor %s is already associated with datastore %s version %s" % (self.flavor_id, self.ds_name, self.ds_version), dsmetadata.add_datastore_version_flavor_association, self.ds_name, self.ds_version, [self.flavor_id]) def test_delete_nonexistent_mapping(self): dsmeta = datastore_models.DatastoreVersionMetadata self.assertRaisesRegexp( exception.DatastoreFlavorAssociationNotFound, "Flavor 2 is not supported for datastore %s version %s" % (self.ds_name, self.ds_version), dsmeta.delete_datastore_version_flavor_association, self.ds_name, self.ds_version, flavor_id=2) def test_delete_mapping(self): flavor_id = 2 dsmetadata = datastore_models. DatastoreVersionMetadata dsmetadata.add_datastore_version_flavor_association(self.ds_name, self.ds_version, [flavor_id]) dsmetadata.delete_datastore_version_flavor_association(self.ds_name, self.ds_version, flavor_id) datastore = datastore_models.Datastore.load(self.ds_name) ds_version = datastore_models.DatastoreVersion.load(datastore, self.ds_version) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=flavor_id, key='flavor') self.assertTrue(mapping.deleted) # check update dsmetadata.add_datastore_version_flavor_association( self.ds_name, self.ds_version, [flavor_id]) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=flavor_id, key='flavor') self.assertFalse(mapping.deleted) # clear the mapping datastore_models.DatastoreVersionMetadata.\ delete_datastore_version_flavor_association(self.ds_name, self.ds_version, flavor_id) trove-5.0.0/trove/tests/unittests/datastore/base.py0000664000567000056710000000702412701410316023627 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from trove.datastore import models as datastore_models from trove.datastore.models import Capability from trove.datastore.models import Datastore from trove.datastore.models import DatastoreVersion from trove.datastore.models import DatastoreVersionMetadata from trove.datastore.models import DBCapabilityOverrides from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class TestDatastoreBase(trove_testtools.TestCase): def setUp(self): # Basic setup and mock/fake structures for testing only super(TestDatastoreBase, self).setUp() util.init_db() self.rand_id = str(uuid.uuid4()) self.ds_name = "my-test-datastore" + self.rand_id self.ds_version = "my-test-version" + self.rand_id self.capability_name = "root_on_create" + self.rand_id self.capability_desc = "Enables root on create" self.capability_enabled = True self.datastore_version_id = str(uuid.uuid4()) self.flavor_id = 1 datastore_models.update_datastore(self.ds_name, False) self.datastore = Datastore.load(self.ds_name) datastore_models.update_datastore_version( self.ds_name, self.ds_version, "mysql", "", "", True) DatastoreVersionMetadata.add_datastore_version_flavor_association( self.ds_name, self.ds_version, [self.flavor_id]) self.datastore_version = DatastoreVersion.load(self.datastore, self.ds_version) self.test_id = self.datastore_version.id self.cap1 = Capability.create(self.capability_name, self.capability_desc, True) self.cap2 = Capability.create("require_volume" + self.rand_id, "Require external volume", True) self.cap3 = Capability.create("test_capability" + self.rand_id, "Test capability", False) def tearDown(self): super(TestDatastoreBase, self).tearDown() capabilities_overridden = DBCapabilityOverrides.find_all( datastore_version_id=self.datastore_version.id).all() for ce in capabilities_overridden: ce.delete() self.cap1.delete() self.cap2.delete() self.cap3.delete() datastore = datastore_models.Datastore.load(self.ds_name) ds_version = datastore_models.DatastoreVersion.load(datastore, self.ds_version) datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id).delete() Datastore.load(self.ds_name).delete() def capability_name_filter(self, capabilities): new_capabilities = [] for capability in capabilities: if self.rand_id in capability.name: new_capabilities.append(capability) return new_capabilities trove-5.0.0/trove/tests/unittests/datastore/test_datastore_versions.py0000664000567000056710000000456612701410316027702 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.datastore.models import DatastoreVersion from trove.tests.unittests.datastore.base import TestDatastoreBase class TestDatastoreVersions(TestDatastoreBase): def test_load_datastore_version(self): datastore_version = DatastoreVersion.load(self.datastore, self.ds_version) self.assertEqual(self.ds_version, datastore_version.name) def test_datastore_verison_capabilities(self): self.datastore_version.capabilities.add(self.cap1, enabled=False) test_filtered_capabilities = self.capability_name_filter( self.datastore_version.capabilities) self.assertEqual(3, len(test_filtered_capabilities), 'Capabilities the test thinks it has are: %s, ' 'Filtered capabilities: %s' % (self.datastore_version.capabilities, test_filtered_capabilities)) # Test a fresh reloading of the datastore self.datastore_version = DatastoreVersion.load(self.datastore, self.ds_version) test_filtered_capabilities = self.capability_name_filter( self.datastore_version.capabilities) self.assertEqual(3, len(test_filtered_capabilities), 'Capabilities the test thinks it has are: %s, ' 'Filtered capabilities: %s' % (self.datastore_version.capabilities, test_filtered_capabilities)) self.assertIn(self.cap2.name, self.datastore_version.capabilities) self.assertNotIn("non-existent", self.datastore_version.capabilities) self.assertIn(self.cap1.name, self.datastore_version.capabilities) trove-5.0.0/trove/tests/unittests/datastore/test_capability.py0000664000567000056710000000374312701410316026101 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.exception import CapabilityNotFound from trove.datastore.models import Capability from trove.datastore.models import CapabilityOverride from trove.tests.unittests.datastore.base import TestDatastoreBase class TestCapabilities(TestDatastoreBase): def setUp(self): super(TestCapabilities, self).setUp() def tearDown(self): super(TestCapabilities, self).tearDown() def test_capability(self): cap = Capability.load(self.capability_name) self.assertEqual(self.capability_name, cap.name) self.assertEqual(self.capability_desc, cap.description) self.assertEqual(self.capability_enabled, cap.enabled) def test_ds_capability_create_disabled(self): self.ds_cap = CapabilityOverride.create( self.cap1, self.datastore_version.id, enabled=False) self.assertFalse(self.ds_cap.enabled) self.ds_cap.delete() def test_capability_enabled(self): self.assertTrue(Capability.load(self.capability_name).enabled) def test_capability_disabled(self): capability = Capability.load(self.capability_name) capability.disable() self.assertFalse(capability.enabled) self.assertFalse(Capability.load(self.capability_name).enabled) def test_load_nonexistent_capability(self): self.assertRaises(CapabilityNotFound, Capability.load, "non-existent") trove-5.0.0/trove/tests/unittests/datastore/test_datastore.py0000664000567000056710000000227512701410316025745 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import exception from trove.datastore import models as datastore_models from trove.datastore.models import Datastore from trove.tests.unittests.datastore.base import TestDatastoreBase class TestDatastore(TestDatastoreBase): def test_create_failure_with_datastore_default_notfound(self): self.assertRaises( exception.DatastoreDefaultDatastoreNotFound, datastore_models.get_datastore_version) def test_load_datastore(self): datastore = Datastore.load(self.ds_name) self.assertEqual(self.ds_name, datastore.name) trove-5.0.0/trove/tests/unittests/guestagent/0000775000567000056710000000000012701410521022516 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/guestagent/test_db2_manager.py0000664000567000056710000002466212701410316026304 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from mock import patch from testtools.matchers import Is, Equals, Not from trove.common.instance import ServiceStatuses from trove.guestagent import backup from trove.guestagent.datastore.experimental.db2 import ( manager as db2_manager) from trove.guestagent.datastore.experimental.db2 import ( service as db2_service) from trove.guestagent import pkg as pkg from trove.guestagent import volume from trove.tests.unittests import trove_testtools class GuestAgentDB2ManagerTest(trove_testtools.TestCase): def setUp(self): super(GuestAgentDB2ManagerTest, self).setUp() self.real_status = db2_service.DB2AppStatus.set_status class FakeInstanceServiceStatus(object): status = ServiceStatuses.NEW def save(self): pass db2_service.DB2AppStatus.set_status = MagicMock( return_value=FakeInstanceServiceStatus()) self.context = trove_testtools.TroveTestContext(self) self.manager = db2_manager.Manager() self.real_db_app_status = db2_service.DB2AppStatus self.origin_format = volume.VolumeDevice.format self.origin_mount = volume.VolumeDevice.mount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_db = db2_service.DB2App.stop_db self.origin_start_db = db2_service.DB2App.start_db self.orig_change_ownership = (db2_service.DB2App.change_ownership) self.orig_create_databases = db2_service.DB2Admin.create_database self.orig_list_databases = db2_service.DB2Admin.list_databases self.orig_delete_database = db2_service.DB2Admin.delete_database self.orig_create_users = db2_service.DB2Admin.create_user self.orig_list_users = db2_service.DB2Admin.list_users self.orig_delete_user = db2_service.DB2Admin.delete_user self.orig_update_hostname = db2_service.DB2App.update_hostname self.orig_backup_restore = backup.restore def tearDown(self): super(GuestAgentDB2ManagerTest, self).tearDown() db2_service.DB2AppStatus.set_status = self.real_db_app_status volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.mount_points = self.origin_mount_points db2_service.DB2App.stop_db = self.origin_stop_db db2_service.DB2App.start_db = self.origin_start_db db2_service.DB2App.change_ownership = self.orig_change_ownership db2_service.DB2Admin.create_database = self.orig_create_databases db2_service.DB2Admin.create_user = self.orig_create_users db2_service.DB2Admin.create_database = self.orig_create_databases db2_service.DB2Admin.list_databases = self.orig_list_databases db2_service.DB2Admin.delete_database = self.orig_delete_database db2_service.DB2Admin.create_user = self.orig_create_users db2_service.DB2Admin.list_users = self.orig_list_users db2_service.DB2Admin.delete_user = self.orig_delete_user db2_service.DB2App.update_hostname = self.orig_update_hostname backup.restore = self.orig_backup_restore def test_update_status(self): mock_status = MagicMock() self.manager.appStatus = mock_status self.manager.update_status(self.context) mock_status.update.assert_any_call() def test_prepare_device_path_true(self): self._prepare_dynamic() def test_prepare_device_path_false(self): self._prepare_dynamic(device_path=None) def test_prepare_database(self): self._prepare_dynamic(databases=['db1']) def test_prepare_from_backup(self): self._prepare_dynamic(['db2'], backup_id='123backup') def _prepare_dynamic(self, packages=None, databases=None, users=None, config_content=None, device_path='/dev/vdb', is_db_installed=True, backup_id=None, overrides=None): backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'DB2Backup', 'checksum': 'fake-checksum'} if backup_id else None mock_status = MagicMock() mock_app = MagicMock() self.manager.appStatus = mock_status self.manager.app = mock_app mock_status.begin_install = MagicMock(return_value=None) mock_app.change_ownership = MagicMock(return_value=None) mock_app.restart = MagicMock(return_value=None) mock_app.start_db = MagicMock(return_value=None) mock_app.stop_db = MagicMock(return_value=None) volume.VolumeDevice.format = MagicMock(return_value=None) volume.VolumeDevice.mount = MagicMock(return_value=None) volume.VolumeDevice.mount_points = MagicMock(return_value=[]) db2_service.DB2Admin.create_user = MagicMock(return_value=None) db2_service.DB2Admin.create_database = MagicMock(return_value=None) backup.restore = MagicMock(return_value=None) with patch.object(pkg.Package, 'pkg_is_installed', return_value=MagicMock( return_value=is_db_installed)): self.manager.prepare(context=self.context, packages=packages, config_contents=config_content, databases=databases, memory_mb='2048', users=users, device_path=device_path, mount_point="/home/db2inst1/db2inst1", backup_info=backup_info, overrides=None, cluster_config=None) mock_status.begin_install.assert_any_call() self.assertEqual(1, mock_app.change_ownership.call_count) if databases: self.assertTrue(db2_service.DB2Admin.create_database.called) else: self.assertFalse(db2_service.DB2Admin.create_database.called) if users: self.assertTrue(db2_service.DB2Admin.create_user.called) else: self.assertFalse(db2_service.DB2Admin.create_user.called) if backup_id: backup.restore.assert_any_call(self.context, backup_info, '/home/db2inst1/db2inst1') def test_restart(self): mock_status = MagicMock() self.manager.appStatus = mock_status with patch.object(db2_service.DB2App, 'restart', return_value=None) as restart_mock: # invocation self.manager.restart(self.context) # verification/assertion restart_mock.assert_any_call() def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status db2_service.DB2App.stop_db = MagicMock(return_value=None) self.manager.stop_db(self.context) db2_service.DB2App.stop_db.assert_any_call( do_not_start_on_reboot=False) def test_create_database(self): mock_status = MagicMock() self.manager.appStatus = mock_status db2_service.DB2Admin.create_database = MagicMock(return_value=None) self.manager.create_database(self.context, ['db1']) db2_service.DB2Admin.create_database.assert_any_call(['db1']) def test_create_user(self): mock_status = MagicMock() self.manager.appStatus = mock_status db2_service.DB2Admin.create_user = MagicMock(return_value=None) self.manager.create_user(self.context, ['user1']) db2_service.DB2Admin.create_user.assert_any_call(['user1']) def test_delete_database(self): databases = ['db1'] mock_status = MagicMock() self.manager.appStatus = mock_status db2_service.DB2Admin.delete_database = MagicMock(return_value=None) self.manager.delete_database(self.context, databases) db2_service.DB2Admin.delete_database.assert_any_call(databases) def test_delete_user(self): user = ['user1'] mock_status = MagicMock() self.manager.appStatus = mock_status db2_service.DB2Admin.delete_user = MagicMock(return_value=None) self.manager.delete_user(self.context, user) db2_service.DB2Admin.delete_user.assert_any_call(user) def test_list_databases(self): mock_status = MagicMock() self.manager.appStatus = mock_status db2_service.DB2Admin.list_databases = MagicMock( return_value=['database1']) databases = self.manager.list_databases(self.context) self.assertThat(databases, Not(Is(None))) self.assertThat(databases, Equals(['database1'])) db2_service.DB2Admin.list_databases.assert_any_call(None, None, False) def test_list_users(self): db2_service.DB2Admin.list_users = MagicMock(return_value=['user1']) users = self.manager.list_users(self.context) self.assertThat(users, Equals(['user1'])) db2_service.DB2Admin.list_users.assert_any_call(None, None, False) @patch.object(db2_service.DB2Admin, 'get_user', return_value=MagicMock(return_value=['user1'])) def test_get_users(self, get_user_mock): username = ['user1'] hostname = ['host'] mock_status = MagicMock() self.manager.appStatus = mock_status users = self.manager.get_user(self.context, username, hostname) self.assertThat(users, Equals(get_user_mock.return_value)) get_user_mock.assert_any_call(username, hostname) def test_reset_configuration(self): try: configuration = {'config_contents': 'some junk'} self.manager.reset_configuration(self.context, configuration) except Exception: self.fail("reset_configuration raised exception unexpectedly.") def test_rpc_ping(self): output = self.manager.rpc_ping(self.context) self.assertTrue(output) trove-5.0.0/trove/tests/unittests/guestagent/test_mongodb_cluster_manager.py0000664000567000056710000002554112701410316031020 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import netutils import pymongo import trove.common.instance as ds_instance import trove.common.utils as utils from trove.guestagent.common.configuration import ImportOverrideStrategy import trove.guestagent.datastore.experimental.mongodb.manager as manager import trove.guestagent.datastore.experimental.mongodb.service as service import trove.guestagent.volume as volume import trove.tests.unittests.trove_testtools as trove_testtools class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase): @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, _): super(GuestAgentMongoDBClusterManagerTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.manager = manager.Manager() self.manager.app.configuration_manager = mock.MagicMock() self.manager.app.status.set_status = mock.MagicMock() self.manager.app.status.set_host = mock.MagicMock() self.conf_mgr = self.manager.app.configuration_manager self.pymongo_patch = mock.patch.object( pymongo, 'MongoClient' ) self.addCleanup(self.pymongo_patch.stop) self.pymongo_patch.start() def tearDown(self): super(GuestAgentMongoDBClusterManagerTest, self).tearDown() @mock.patch.object(service.MongoDBApp, 'add_members', side_effect=RuntimeError("Boom!")) def test_add_members_failure(self, mock_add_members): members = ["test1", "test2"] self.assertRaises(RuntimeError, self.manager.add_members, self.context, members) self.manager.app.status.set_status.assert_called_with( ds_instance.ServiceStatuses.FAILED) @mock.patch.object(utils, 'poll_until') @mock.patch.object(service.MongoDBAdmin, 'rs_initiate') @mock.patch.object(service.MongoDBAdmin, 'rs_add_members') def test_add_member(self, mock_add, mock_initiate, mock_poll): members = ["test1", "test2"] self.manager.add_members(self.context, members) mock_initiate.assert_any_call() mock_add.assert_any_call(["test1", "test2"]) @mock.patch.object(service.MongoDBApp, 'restart') @mock.patch.object(service.MongoDBApp, 'create_admin_user') @mock.patch.object(utils, 'generate_random_password', return_value='pwd') def test_prep_primary(self, mock_pwd, mock_user, mock_restart): self.manager.prep_primary(self.context) mock_user.assert_called_with('pwd') mock_restart.assert_called_with() @mock.patch.object(service.MongoDBApp, 'add_shard', side_effect=RuntimeError("Boom!")) def test_add_shard_failure(self, mock_add_shard): self.assertRaises(RuntimeError, self.manager.add_shard, self.context, "rs", "rs_member") self.manager.app.status.set_status.assert_called_with( ds_instance.ServiceStatuses.FAILED) @mock.patch.object(service.MongoDBAdmin, 'add_shard') def test_add_shard(self, mock_add_shard): self.manager.add_shard(self.context, "rs", "rs_member") mock_add_shard.assert_called_with("rs/rs_member:27017") @mock.patch.object(service.MongoDBApp, 'add_config_servers', side_effect=RuntimeError("Boom!")) def test_add_config_server_failure(self, mock_add_config): self.assertRaises(RuntimeError, self.manager.add_config_servers, self.context, ["cfg_server1", "cfg_server2"]) self.manager.app.status.set_status.assert_called_with( ds_instance.ServiceStatuses.FAILED) @mock.patch.object(service.MongoDBApp, 'start_db') def test_add_config_servers(self, mock_start_db): self.manager.add_config_servers(self.context, ["cfg_server1", "cfg_server2"]) self.conf_mgr.apply_system_override.assert_called_once_with( {'sharding.configDB': "cfg_server1:27019,cfg_server2:27019"}, 'clustering') mock_start_db.assert_called_with(True) @mock.patch.object(service.MongoDBApp, '_initialize_writable_run_dir') @mock.patch.object(service.MongoDBApp, '_configure_as_query_router') @mock.patch.object(service.MongoDBApp, '_configure_cluster_security') def test_prepare_mongos(self, mock_secure, mock_config, mock_run_init): self._prepare_method("test-id-1", "query_router", None) mock_run_init.assert_called_once_with() mock_config.assert_called_once_with() mock_secure.assert_called_once_with(None) self.manager.app.status.set_status.assert_called_with( ds_instance.ServiceStatuses.INSTANCE_READY, force=True) @mock.patch.object(service.MongoDBApp, '_initialize_writable_run_dir') @mock.patch.object(service.MongoDBApp, '_configure_as_config_server') @mock.patch.object(service.MongoDBApp, '_configure_cluster_security') def test_prepare_config_server( self, mock_secure, mock_config, mock_run_init): self._prepare_method("test-id-2", "config_server", None) mock_run_init.assert_called_once_with() mock_config.assert_called_once_with() mock_secure.assert_called_once_with(None) self.manager.app.status.set_status.assert_called_with( ds_instance.ServiceStatuses.INSTANCE_READY, force=True) @mock.patch.object(service.MongoDBApp, '_initialize_writable_run_dir') @mock.patch.object(service.MongoDBApp, '_configure_as_cluster_member') @mock.patch.object(service.MongoDBApp, '_configure_cluster_security') def test_prepare_member(self, mock_secure, mock_config, mock_run_init): self._prepare_method("test-id-3", "member", None) mock_run_init.assert_called_once_with() mock_config.assert_called_once_with('rs1') mock_secure.assert_called_once_with(None) self.manager.app.status.set_status.assert_called_with( ds_instance.ServiceStatuses.INSTANCE_READY, force=True) @mock.patch.object(service.MongoDBApp, '_configure_network') def test_configure_as_query_router(self, net_conf): self.conf_mgr.parse_configuration = mock.Mock( return_value={'storage.mmapv1.smallFiles': False, 'storage.journal.enabled': True}) self.manager.app._configure_as_query_router() self.conf_mgr.save_configuration.assert_called_once_with({}) net_conf.assert_called_once_with(service.MONGODB_PORT) self.conf_mgr.apply_system_override.assert_called_once_with( {'sharding.configDB': ''}, 'clustering') self.assertTrue(self.manager.app.is_query_router) @mock.patch.object(service.MongoDBApp, '_configure_network') def test_configure_as_config_server(self, net_conf): self.manager.app._configure_as_config_server() net_conf.assert_called_once_with(service.CONFIGSVR_PORT) self.conf_mgr.apply_system_override.assert_called_once_with( {'sharding.clusterRole': 'configsvr'}, 'clustering') @mock.patch.object(service.MongoDBApp, 'start_db') @mock.patch.object(service.MongoDBApp, '_configure_network') def test_configure_as_cluster_member(self, net_conf, start): self.manager.app._configure_as_cluster_member('rs1') net_conf.assert_called_once_with(service.MONGODB_PORT) self.conf_mgr.apply_system_override.assert_called_once_with( {'replication.replSetName': 'rs1'}, 'clustering') @mock.patch.object(service.MongoDBApp, 'store_key') @mock.patch.object(service.MongoDBApp, 'get_key_file', return_value='/var/keypath') def test_configure_cluster_security(self, get_key_mock, store_key_mock): self.manager.app._configure_cluster_security('key') store_key_mock.assert_called_once_with('key') # TODO(mvandijk): enable cluster security once Trove features are in # self.conf_mgr.apply_system_override.assert_called_once_with( # {'security.clusterAuthMode': 'keyFile', # 'security.keyFile': '/var/keypath'}, 'clustering') @mock.patch.object(netutils, 'get_my_ipv4', return_value="10.0.0.2") def test_configure_network(self, ip_mock): self.manager.app._configure_network() self.conf_mgr.apply_system_override.assert_called_once_with( {'net.bindIp': '10.0.0.2,127.0.0.1'}) self.manager.app.status.set_host.assert_called_once_with( '10.0.0.2', port=None) self.manager.app._configure_network(10000) self.conf_mgr.apply_system_override.assert_called_with( {'net.bindIp': '10.0.0.2,127.0.0.1', 'net.port': 10000}) self.manager.app.status.set_host.assert_called_with( '10.0.0.2', port=10000) @mock.patch.object(utils, 'poll_until') @mock.patch.object(service.MongoDBApp, 'get_key_file', return_value="/test/key/file") @mock.patch.object(volume.VolumeDevice, 'mount_points', return_value=[]) @mock.patch.object(volume.VolumeDevice, 'mount', return_value=None) @mock.patch.object(volume.VolumeDevice, 'migrate_data', return_value=None) @mock.patch.object(volume.VolumeDevice, 'format', return_value=None) @mock.patch.object(service.MongoDBApp, 'clear_storage') @mock.patch.object(service.MongoDBApp, 'start_db') @mock.patch.object(service.MongoDBApp, 'stop_db') @mock.patch.object(service.MongoDBAppStatus, 'wait_for_database_service_start') @mock.patch.object(service.MongoDBApp, 'install_if_needed') @mock.patch.object(service.MongoDBAppStatus, 'begin_install') def _prepare_method(self, instance_id, instance_type, key, *args): cluster_config = {"id": instance_id, "shard_id": "test_shard_id", "instance_type": instance_type, "replica_set_name": "rs1", "key": key} # invocation self.manager.prepare(context=self.context, databases=None, packages=['package'], memory_mb='2048', users=None, mount_point='/var/lib/mongodb', overrides=None, cluster_config=cluster_config) trove-5.0.0/trove/tests/unittests/guestagent/test_volume.py0000664000567000056710000002265412701410316025451 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from mock import Mock, MagicMock, patch, mock_open import pexpect from trove.common.exception import GuestError, ProcessExecutionError from trove.common import utils from trove.guestagent import volume from trove.tests.unittests import trove_testtools def _setUp_fake_spawn(return_val=0): fake_spawn = pexpect.spawn('echo') fake_spawn.expect = Mock(return_value=return_val) pexpect.spawn = Mock(return_value=fake_spawn) return fake_spawn class VolumeDeviceTest(trove_testtools.TestCase): def setUp(self): super(VolumeDeviceTest, self).setUp() self.volumeDevice = volume.VolumeDevice('/dev/vdb') def tearDown(self): super(VolumeDeviceTest, self).tearDown() @patch.object(pexpect, 'spawn', Mock()) def test_migrate_data(self): origin_execute = utils.execute utils.execute = Mock() origin_os_path_exists = os.path.exists os.path.exists = Mock() fake_spawn = _setUp_fake_spawn() origin_unmount = self.volumeDevice.unmount self.volumeDevice.unmount = MagicMock() self.volumeDevice.migrate_data('/') self.assertEqual(1, fake_spawn.expect.call_count) self.assertEqual(1, utils.execute.call_count) self.assertEqual(1, self.volumeDevice.unmount.call_count) utils.execute = origin_execute self.volumeDevice.unmount = origin_unmount os.path.exists = origin_os_path_exists def test__check_device_exists(self): origin_execute = utils.execute utils.execute = Mock() self.volumeDevice._check_device_exists() self.assertEqual(1, utils.execute.call_count) utils.execute = origin_execute @patch('trove.guestagent.volume.LOG') def test_fail__check_device_exists(self, mock_logging): with patch.object(utils, 'execute', side_effect=ProcessExecutionError): self.assertRaises(GuestError, self.volumeDevice._check_device_exists) @patch.object(pexpect, 'spawn', Mock()) def test__check_format(self): fake_spawn = _setUp_fake_spawn() self.volumeDevice._check_format() self.assertEqual(1, fake_spawn.expect.call_count) @patch.object(pexpect, 'spawn', Mock()) def test__check_format_2(self): fake_spawn = _setUp_fake_spawn(return_val=1) self.assertEqual(0, fake_spawn.expect.call_count) self.assertRaises(IOError, self.volumeDevice._check_format) @patch.object(pexpect, 'spawn', Mock()) def test__format(self): fake_spawn = _setUp_fake_spawn() self.volumeDevice._format() self.assertEqual(1, fake_spawn.expect.call_count) self.assertEqual(1, pexpect.spawn.call_count) def test_format(self): origin_check_device_exists = self.volumeDevice._check_device_exists origin_format = self.volumeDevice._format origin_check_format = self.volumeDevice._check_format self.volumeDevice._check_device_exists = MagicMock() self.volumeDevice._check_format = MagicMock() self.volumeDevice._format = MagicMock() self.volumeDevice.format() self.assertEqual(1, self.volumeDevice._check_device_exists.call_count) self.assertEqual(1, self.volumeDevice._format.call_count) self.assertEqual(1, self.volumeDevice._check_format.call_count) self.volumeDevice._check_device_exists = origin_check_device_exists self.volumeDevice._format = origin_format self.volumeDevice._check_format = origin_check_format def test_mount(self): origin_ = volume.VolumeMountPoint.mount volume.VolumeMountPoint.mount = Mock() origin_os_path_exists = os.path.exists os.path.exists = Mock() origin_write_to_fstab = volume.VolumeMountPoint.write_to_fstab volume.VolumeMountPoint.write_to_fstab = Mock() self.volumeDevice.mount(Mock) self.assertEqual(1, volume.VolumeMountPoint.mount.call_count) self.assertEqual(1, volume.VolumeMountPoint.write_to_fstab.call_count) volume.VolumeMountPoint.mount = origin_ volume.VolumeMountPoint.write_to_fstab = origin_write_to_fstab os.path.exists = origin_os_path_exists def test_resize_fs(self): origin_check_device_exists = self.volumeDevice._check_device_exists origin_execute = utils.execute utils.execute = Mock() self.volumeDevice._check_device_exists = MagicMock() origin_os_path_exists = os.path.exists os.path.exists = Mock() self.volumeDevice.resize_fs('/mnt/volume') self.assertEqual(1, self.volumeDevice._check_device_exists.call_count) self.assertEqual(2, utils.execute.call_count) self.volumeDevice._check_device_exists = origin_check_device_exists os.path.exists = origin_os_path_exists utils.execute = origin_execute @patch.object(os.path, 'ismount', return_value=True) @patch.object(utils, 'execute', side_effect=ProcessExecutionError) @patch('trove.guestagent.volume.LOG') def test_fail_resize_fs(self, mock_logging, mock_execute, mock_mount): with patch.object(self.volumeDevice, '_check_device_exists'): self.assertRaises(GuestError, self.volumeDevice.resize_fs, '/mnt/volume') self.assertEqual(1, self.volumeDevice._check_device_exists.call_count) self.assertEqual(1, mock_mount.call_count) def test_unmount_positive(self): self._test_unmount() def test_unmount_negative(self): self._test_unmount(False) @patch.object(pexpect, 'spawn', Mock()) def _test_unmount(self, positive=True): origin_ = os.path.exists os.path.exists = MagicMock(return_value=positive) fake_spawn = _setUp_fake_spawn() self.volumeDevice.unmount('/mnt/volume') COUNT = 1 if not positive: COUNT = 0 self.assertEqual(COUNT, fake_spawn.expect.call_count) os.path.exists = origin_ @patch.object(utils, 'execute', return_value=('/var/lib/mysql', '')) def test_mount_points(self, mock_execute): mount_point = self.volumeDevice.mount_points('/dev/vdb') self.assertEqual(['/var/lib/mysql'], mount_point) @patch.object(utils, 'execute', side_effect=ProcessExecutionError) @patch('trove.guestagent.volume.LOG') def test_fail_mount_points(self, mock_logging, mock_execute): self.assertRaises(GuestError, self.volumeDevice.mount_points, '/mnt/volume') def test_set_readahead_size(self): origin_check_device_exists = self.volumeDevice._check_device_exists self.volumeDevice._check_device_exists = MagicMock() mock_execute = MagicMock(return_value=None) readahead_size = 2048 self.volumeDevice.set_readahead_size(readahead_size, execute_function=mock_execute) blockdev = mock_execute.call_args_list[0] blockdev.assert_called_with("sudo", "blockdev", "--setra", readahead_size, "/dev/vdb") self.volumeDevice._check_device_exists = origin_check_device_exists @patch('trove.guestagent.volume.LOG') def test_fail_set_readahead_size(self, mock_logging): mock_execute = MagicMock(side_effect=ProcessExecutionError) readahead_size = 2048 with patch.object(self.volumeDevice, '_check_device_exists'): self.assertRaises(GuestError, self.volumeDevice.set_readahead_size, readahead_size, execute_function=mock_execute) self.volumeDevice._check_device_exists.assert_any_call() class VolumeMountPointTest(trove_testtools.TestCase): def setUp(self): super(VolumeMountPointTest, self).setUp() self.volumeMountPoint = volume.VolumeMountPoint('/mnt/device', '/dev/vdb') def tearDown(self): super(VolumeMountPointTest, self).tearDown() @patch.object(pexpect, 'spawn', Mock()) def test_mount(self): origin_ = os.path.exists os.path.exists = MagicMock(return_value=False) fake_spawn = _setUp_fake_spawn() with patch.object(utils, 'execute_with_timeout', return_value=('0', '')): self.volumeMountPoint.mount() self.assertEqual(1, os.path.exists.call_count) self.assertEqual(1, utils.execute_with_timeout.call_count) self.assertEqual(1, fake_spawn.expect.call_count) os.path.exists = origin_ def test_write_to_fstab(self): origin_execute = utils.execute utils.execute = Mock() m = mock_open() with patch('%s.open' % volume.__name__, m, create=True): self.volumeMountPoint.write_to_fstab() self.assertEqual(1, utils.execute.call_count) utils.execute = origin_execute trove-5.0.0/trove/tests/unittests/guestagent/__init__.py0000664000567000056710000000000012701410316024617 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/guestagent/test_api.py0000664000567000056710000004364712701410316024720 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import Timeout import mock import oslo_messaging as messaging from oslo_messaging.rpc.client import RemoteError from testtools.matchers import Is import trove.common.context as context from trove.common import exception from trove.common.remote import guest_client from trove.guestagent import api from trove import rpc from trove.tests.unittests import trove_testtools REPLICATION_SNAPSHOT = {'master': {'id': '123', 'host': '192.168.0.1', 'port': 3306}, 'dataset': {}, 'binlog_position': 'binpos'} RPC_API_VERSION = '1.0' def _mock_call_pwd_change(cmd, version=None, users=None): if users == 'dummy': return True else: raise BaseException("Test Failed") def _mock_call(cmd, timeout, version=None, username=None, hostname=None, database=None, databases=None): # To check get_user, list_access, grant_access, revoke_access in cmd. if cmd in ('get_user', 'list_access', 'grant_access', 'revoke_access'): return True else: raise BaseException("Test Failed") class ApiTest(trove_testtools.TestCase): @mock.patch.object(rpc, 'get_client') def setUp(self, *args): super(ApiTest, self).setUp() self.context = context.TroveContext() self.guest = api.API(self.context, 0) self.guest._cast = _mock_call_pwd_change self.guest._call = _mock_call self.api = api.API(self.context, "instance-id-x23d2d") self._mock_rpc_client() def test_change_passwords(self): self.assertIsNone(self.guest.change_passwords("dummy")) def test_get_user(self): self.assertTrue(self.guest.get_user("dummyname", "dummyhost")) def test_list_access(self): self.assertTrue(self.guest.list_access("dummyname", "dummyhost")) def test_grant_access(self): self.assertTrue(self.guest.grant_access("dumname", "dumhost", "dumdb")) def test_revoke_access(self): self.assertTrue(self.guest.revoke_access("dumname", "dumhost", "dumdb")) def test_get_routing_key(self): self.assertEqual('guestagent.instance-id-x23d2d', self.api._get_routing_key()) def test_update_attributes(self): self.api.update_attributes('test_user', '%', {'name': 'new_user'}) self._verify_rpc_prepare_before_cast() self._verify_cast('update_attributes', username='test_user', hostname='%', user_attrs={'name': 'new_user'}) def test_create_user(self): self.api.create_user('test_user') self._verify_rpc_prepare_before_cast() self._verify_cast('create_user', users='test_user') @mock.patch('trove.guestagent.api.LOG') def test_api_cast_exception(self, mock_logging): self.call_context.cast.side_effect = IOError('host down') self.assertRaises(exception.GuestError, self.api.create_user, 'test_user') @mock.patch('trove.guestagent.api.LOG') def test_api_call_exception(self, mock_logging): self.call_context.call.side_effect = IOError('host_down') self.assertRaises(exception.GuestError, self.api.list_users) def test_api_call_timeout(self): self.call_context.call.side_effect = Timeout() self.assertRaises(exception.GuestTimeout, self.api.restart) @mock.patch('trove.guestagent.api.LOG') def test_api_cast_remote_error(self, mock_logging): self.call_context.cast.side_effect = RemoteError('Error') self.assertRaises(exception.GuestError, self.api.delete_database, 'test_db') @mock.patch('trove.guestagent.api.LOG') def test_api_call_remote_error(self, mock_logging): self.call_context.call.side_effect = RemoteError('Error') self.assertRaises(exception.GuestError, self.api.stop_db) def test_list_users(self): exp_resp = ['user1', 'user2', 'user3'] self.call_context.call.return_value = exp_resp resp = self.api.list_users() self._verify_rpc_prepare_before_call() self._verify_call('list_users', limit=None, marker=None, include_marker=False) self.assertEqual(exp_resp, resp) def test_delete_user(self): self.api.delete_user('test_user') self._verify_rpc_prepare_before_cast() self._verify_cast('delete_user', user='test_user') def test_create_database(self): databases = ['db1', 'db2', 'db3'] self.api.create_database(databases) self._verify_rpc_prepare_before_cast() self.call_context.cast.assert_called_once_with( self.context, "create_database", databases=databases) def test_list_databases(self): exp_resp = ['db1', 'db2', 'db3'] self.call_context.call.return_value = exp_resp resp = self.api.list_databases( limit=1, marker=2, include_marker=False) self._verify_rpc_prepare_before_call() self._verify_call("list_databases", limit=1, marker=2, include_marker=False) self.assertEqual(exp_resp, resp) def test_delete_database(self): self.api.delete_database('test_database_name') self._verify_rpc_prepare_before_cast() self._verify_cast("delete_database", database='test_database_name') def test_enable_root(self): self.call_context.call.return_value = True resp = self.api.enable_root() self._verify_rpc_prepare_before_call() self._verify_call('enable_root') self.assertThat(resp, Is(True)) def test_enable_root_with_password(self): self.call_context.call.return_value = True resp = self.api.enable_root_with_password() self._verify_rpc_prepare_before_call() self._verify_call('enable_root_with_password', root_password=None) self.assertThat(resp, Is(True)) def test_disable_root(self): self.call_context.call.return_value = True resp = self.api.disable_root() self._verify_rpc_prepare_before_call() self._verify_call('disable_root') self.assertThat(resp, Is(True)) def test_is_root_enabled(self): self.call_context.call.return_value = False resp = self.api.is_root_enabled() self._verify_rpc_prepare_before_call() self._verify_call('is_root_enabled') self.assertThat(resp, Is(False)) def test_get_hwinfo(self): self.call_context.call.return_value = '[blah]' resp = self.api.get_hwinfo() self._verify_rpc_prepare_before_call() self._verify_call('get_hwinfo') self.assertThat(resp, Is('[blah]')) def test_rpc_ping(self): # execute self.api.rpc_ping() # verify self._verify_rpc_prepare_before_call() self._verify_call('rpc_ping') def test_get_diagnostics(self): self.call_context.call.return_value = '[all good]' resp = self.api.get_diagnostics() self._verify_rpc_prepare_before_call() self._verify_call('get_diagnostics') self.assertThat(resp, Is('[all good]')) def test_restart(self): self.api.restart() self._verify_rpc_prepare_before_call() self._verify_call('restart') def test_start_db_with_conf_changes(self): self.api.start_db_with_conf_changes(None) self._verify_rpc_prepare_before_call() self._verify_call('start_db_with_conf_changes', config_contents=None) def test_reset_configuration(self): # execute self.api.reset_configuration({'config_contents': 'some junk'}) # verify self._verify_rpc_prepare_before_call() self._verify_call('reset_configuration', configuration={'config_contents': 'some junk'}) def test_stop_db(self): self.api.stop_db(do_not_start_on_reboot=False) self._verify_rpc_prepare_before_call() self._verify_call('stop_db', do_not_start_on_reboot=False) def test_get_volume_info(self): exp_resp = {'fake': 'resp'} self.call_context.call.return_value = exp_resp resp = self.api.get_volume_info() self._verify_rpc_prepare_before_call() self._verify_call('get_filesystem_stats', fs_path=None) self.assertThat(resp, Is(exp_resp)) def test_update_guest(self): self.api.update_guest() self._verify_rpc_prepare_before_call() self._verify_call('update_guest') def test_create_backup(self): self.api.create_backup({'id': '123'}) self._verify_rpc_prepare_before_cast() self._verify_cast('create_backup', backup_info={'id': '123'}) def test_unmount_volume(self): # execute self.api.unmount_volume('/dev/vdb', '/var/lib/mysql') # verify self._verify_rpc_prepare_before_call() self._verify_call('unmount_volume', device_path='/dev/vdb', mount_point='/var/lib/mysql') def test_mount_volume(self): # execute self.api.mount_volume('/dev/vdb', '/var/lib/mysql') # verify self._verify_rpc_prepare_before_call() self._verify_call('mount_volume', device_path='/dev/vdb', mount_point='/var/lib/mysql') def test_resize_fs(self): # execute self.api.resize_fs('/dev/vdb', '/var/lib/mysql') # verify self._verify_rpc_prepare_before_call() self._verify_call('resize_fs', device_path='/dev/vdb', mount_point='/var/lib/mysql') def test_update_overrides(self): self.api.update_overrides('123') self._verify_rpc_prepare_before_call() self._verify_call('update_overrides', overrides='123', remove=False) def test_apply_overrides(self): self.api.apply_overrides('123') self._verify_rpc_prepare_before_call() self._verify_call('apply_overrides', overrides='123') def test_get_replication_snapshot(self): # execute self.api.get_replication_snapshot({}) # verify self._verify_rpc_prepare_before_call() self._verify_call('get_replication_snapshot', snapshot_info={}, replica_source_config=None) def test_attach_replication_slave(self): # execute self.api.attach_replication_slave(REPLICATION_SNAPSHOT) # verify self._verify_rpc_prepare_before_cast() self._verify_cast('attach_replication_slave', snapshot=REPLICATION_SNAPSHOT, slave_config=None) def test_detach_replica(self): # execute self.api.detach_replica() # verify self._verify_rpc_prepare_before_call() self._verify_call('detach_replica', for_failover=False) def test_get_replica_context(self): # execute self.api.get_replica_context() # verify self._verify_rpc_prepare_before_call() self._verify_call('get_replica_context') def test_attach_replica(self): # execute self.api.attach_replica(REPLICATION_SNAPSHOT, slave_config=None) # verify self._verify_rpc_prepare_before_call() self._verify_call('attach_replica', replica_info=REPLICATION_SNAPSHOT, slave_config=None) def test_make_read_only(self): # execute self.api.make_read_only(True) # verify self._verify_rpc_prepare_before_call() self._verify_call('make_read_only', read_only=True) def test_enable_as_master(self): # execute self.api.enable_as_master({}) # verify self._verify_rpc_prepare_before_call() self._verify_call('enable_as_master', replica_source_config={}) def test_get_txn_count(self): # execute self.api.get_txn_count() # verify self._verify_rpc_prepare_before_call() self._verify_call('get_txn_count') def test_get_last_txn(self): # execute self.api.get_last_txn() # verify self._verify_rpc_prepare_before_call() self._verify_call('get_last_txn') def test_get_latest_txn_id(self): # execute self.api.get_latest_txn_id() # verify self._verify_rpc_prepare_before_call() self._verify_call('get_latest_txn_id') def test_wait_for_txn(self): # execute self.api.wait_for_txn("") # verify self._verify_rpc_prepare_before_call() self._verify_call('wait_for_txn', txn="") def test_cleanup_source_on_replica_detach(self): # execute self.api.cleanup_source_on_replica_detach({'replication_user': 'test_user'}) # verify self._verify_rpc_prepare_before_call() self._verify_call('cleanup_source_on_replica_detach', replica_info={'replication_user': 'test_user'}) def test_demote_replication_master(self): # execute self.api.demote_replication_master() # verify self._verify_rpc_prepare_before_call() self._verify_call('demote_replication_master') @mock.patch.object(messaging, 'Target') @mock.patch.object(rpc, 'get_server') def test_prepare(self, *args): self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', '/mnt/opt', None, 'cont', '1-2-3-4', 'override', {'id': '2-3-4-5'}) self._verify_rpc_prepare_before_cast() self._verify_cast( 'prepare', packages=['package1'], databases='db1', memory_mb='2048', users='user1', device_path='/dev/vdt', mount_point='/mnt/opt', backup_info=None, config_contents='cont', root_password='1-2-3-4', overrides='override', cluster_config={'id': '2-3-4-5'}, snapshot=None, modules=None) @mock.patch.object(messaging, 'Target') @mock.patch.object(rpc, 'get_server') def test_prepare_with_backup(self, *args): backup = {'id': 'backup_id_123'} self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', '/mnt/opt', backup, 'cont', '1-2-3-4', 'overrides', {"id": "2-3-4-5"}, modules=None) self._verify_rpc_prepare_before_cast() self._verify_cast( 'prepare', packages=['package1'], databases='db1', memory_mb='2048', users='user1', device_path='/dev/vdt', mount_point='/mnt/opt', backup_info=backup, config_contents='cont', root_password='1-2-3-4', overrides='overrides', cluster_config={'id': '2-3-4-5'}, snapshot=None, modules=None) @mock.patch.object(messaging, 'Target') @mock.patch.object(rpc, 'get_server') def test_prepare_with_modules(self, *args): modules = [{'id': 'mod_id'}] self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', '/mnt/opt', None, 'cont', '1-2-3-4', 'overrides', {"id": "2-3-4-5"}, modules=modules) self._verify_rpc_prepare_before_cast() self._verify_cast( 'prepare', packages=['package1'], databases='db1', memory_mb='2048', users='user1', device_path='/dev/vdt', mount_point='/mnt/opt', backup_info=None, config_contents='cont', root_password='1-2-3-4', overrides='overrides', cluster_config={'id': '2-3-4-5'}, snapshot=None, modules=modules) def test_upgrade(self): instance_version = "v1.0.1" location = "http://swift/trove-guestagent-v1.0.1.tar.gz" # execute self.api.upgrade(instance_version, location) # verify self._verify_rpc_prepare_before_cast() self._verify_cast( 'upgrade', instance_version=instance_version, location=location, metadata=None) def _verify_rpc_prepare_before_call(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION, timeout=mock.ANY) def _verify_rpc_prepare_before_cast(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION) def _verify_cast(self, *args, **kwargs): self.call_context.cast.assert_called_once_with(self.context, *args, **kwargs) def _verify_call(self, *args, **kwargs): self.call_context.call.assert_called_once_with(self.context, *args, **kwargs) def _mock_rpc_client(self): self.call_context = mock.Mock() self.api.client.prepare = mock.Mock(return_value=self.call_context) self.call_context.call = mock.Mock() self.call_context.cast = mock.Mock() class ApiStrategyTest(trove_testtools.TestCase): @mock.patch('trove.guestagent.api.API.__init__', mock.Mock(return_value=None)) def test_guest_client_mongodb(self): client = guest_client(mock.Mock(), mock.Mock(), 'mongodb') self.assertFalse(hasattr(client, 'add_config_servers2')) self.assertTrue(callable(client.add_config_servers)) @mock.patch('trove.guestagent.api.API.__init__', mock.Mock(return_value=None)) def test_guest_client_vertica(self): client = guest_client(mock.Mock(), mock.Mock(), 'vertica') self.assertFalse(hasattr(client, 'get_public_keys2')) self.assertTrue(callable(client.get_public_keys)) trove-5.0.0/trove/tests/unittests/guestagent/test_dbmodels.py0000664000567000056710000000753312701410316025732 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from trove.guestagent.db import models as dbmodels from trove.tests.unittests import trove_testtools class MySQLDatabaseTest(trove_testtools.TestCase): def setUp(self): super(MySQLDatabaseTest, self).setUp() self.mysqlDb = dbmodels.ValidatedMySQLDatabase() self.origin_ignore_db = self.mysqlDb._ignore_dbs self.mysqlDb._ignore_dbs = ['mysql'] def tearDown(self): super(MySQLDatabaseTest, self).tearDown() self.mysqlDb._ignore_dbs = self.origin_ignore_db def test_name(self): self.assertIsNone(self.mysqlDb.name) def test_name_setter(self): test_name = "Anna" self.mysqlDb.name = test_name self.assertEqual(test_name, self.mysqlDb.name) def test_is_valid_positive(self): self.assertTrue(self.mysqlDb._is_valid('mysqldb')) def test_is_valid_negative(self): self.assertFalse(self.mysqlDb._is_valid('mysql')) class MySQLUserTest(trove_testtools.TestCase): def setUp(self): super(MySQLUserTest, self).setUp() self.mysqlUser = dbmodels.MySQLUser() def tearDown(self): super(MySQLUserTest, self).tearDown() def test_is_valid_negative(self): self.assertFalse(self.mysqlUser._is_valid(None)) self.assertFalse(self.mysqlUser._is_valid("|;")) self.assertFalse(self.mysqlUser._is_valid("\\")) def test_is_valid_positive(self): self.assertTrue(self.mysqlUser._is_valid("real_name")) class IsValidUsernameTest(trove_testtools.TestCase): def setUp(self): super(IsValidUsernameTest, self).setUp() self.mysqlUser = dbmodels.MySQLUser() self.origin_is_valid = self.mysqlUser._is_valid self.origin_ignore_users = self.mysqlUser._ignore_users self.mysqlUser._ignore_users = ["king"] def tearDown(self): super(IsValidUsernameTest, self).tearDown() self.mysqlUser._is_valid = self.origin_is_valid self.mysqlUser._ignore_users = self.origin_ignore_users def test_is_valid_user_name(self): value = "trove" self.assertTrue(self.mysqlUser._is_valid_user_name(value)) def test_is_valid_user_name_negative(self): self.mysqlUser._is_valid = MagicMock(return_value=False) self.assertFalse(self.mysqlUser._is_valid_user_name("trove")) self.mysqlUser._is_valid = MagicMock(return_value=True) self.assertFalse(self.mysqlUser._is_valid_user_name("king")) class IsValidHostnameTest(trove_testtools.TestCase): def setUp(self): super(IsValidHostnameTest, self).setUp() self.mysqlUser = dbmodels.MySQLUser() def tearDown(self): super(IsValidHostnameTest, self).tearDown() def test_is_valid_octet(self): self.assertTrue(self.mysqlUser._is_valid_host_name('192.168.1.1')) def test_is_valid_bad_octet(self): self.assertFalse(self.mysqlUser._is_valid_host_name('999.168.1.1')) def test_is_valid_global_wildcard(self): self.assertTrue(self.mysqlUser._is_valid_host_name('%')) def test_is_valid_prefix_wildcard(self): self.assertTrue(self.mysqlUser._is_valid_host_name('%.168.1.1')) def test_is_valid_suffix_wildcard(self): self.assertTrue(self.mysqlUser._is_valid_host_name('192.168.1.%')) trove-5.0.0/trove/tests/unittests/guestagent/test_pkg.py0000664000567000056710000005200112701410316024710 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import commands import os import re import subprocess from mock import Mock, MagicMock, patch import pexpect from trove.common import exception from trove.common import utils from trove.guestagent import pkg from trove.tests.unittests import trove_testtools """ Unit tests for the classes and functions in pkg.py. """ class PkgDEBInstallTestCase(trove_testtools.TestCase): def setUp(self): super(PkgDEBInstallTestCase, self).setUp() self.pkg = pkg.DebianPackagerMixin() self.pkg_fix = self.pkg._fix self.pkg_fix_package_selections = self.pkg._fix_package_selections p0 = patch('pexpect.spawn') p0.start() self.addCleanup(p0.stop) p1 = patch('trove.common.utils.execute') p1.start() self.addCleanup(p1.stop) self.pkg._fix = Mock(return_value=None) self.pkg._fix_package_selections = Mock(return_value=None) self.pkgName = 'packageName' def tearDown(self): super(PkgDEBInstallTestCase, self).tearDown() self.pkg._fix = self.pkg_fix self.pkg._fix_package_selections = self.pkg_fix_package_selections def test_pkg_is_installed_no_packages(self): packages = [] self.assertTrue(self.pkg.pkg_is_installed(packages)) def test_pkg_is_installed_yes(self): packages = ["package1=1.0", "package2"] self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0"]) self.assertTrue(self.pkg.pkg_is_installed(packages)) def test_pkg_is_installed_no(self): packages = ["package1=1.0", "package2", "package3=3.1"] self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0", "3.0"]) self.assertFalse(self.pkg.pkg_is_installed(packages)) def test_success_install(self): # test pexpect.spawn.return_value.expect.return_value = 7 pexpect.spawn.return_value.match = False self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) def test_success_install_with_config_opts(self): # test config_opts = {'option': 'some_opt'} pexpect.spawn.return_value.expect.return_value = 7 pexpect.spawn.return_value.match = False self.assertTrue( self.pkg.pkg_install(self.pkgName, config_opts, 5000) is None) def test_permission_error(self): # test pexpect.spawn.return_value.expect.return_value = 0 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_not_found_1(self): # test pexpect.spawn.return_value.expect.return_value = 1 pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_not_found_2(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_run_DPKG_bad_State(self): # test _fix method is called and PackageStateError is thrown pexpect.spawn.return_value.expect.return_value = 4 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_install, self.pkgName, {}, 5000) self.assertTrue(self.pkg._fix.called) def test_admin_lock_error(self): # test 'Unable to lock the administration directory' error pexpect.spawn.return_value.expect.return_value = 5 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_broken_error(self): pexpect.spawn.return_value.expect.return_value = 6 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgBrokenError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_timeout_error(self): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install, self.pkgName, {}, 5000) class PkgDEBRemoveTestCase(trove_testtools.TestCase): def setUp(self): super(PkgDEBRemoveTestCase, self).setUp() self.pkg = pkg.DebianPackagerMixin() self.pkg_version = self.pkg.pkg_version self.pkg_install = self.pkg._install self.pkg_fix = self.pkg._fix p0 = patch('pexpect.spawn') p0.start() self.addCleanup(p0.stop) p1 = patch('trove.common.utils.execute') p1.start() self.addCleanup(p1.stop) self.pkg.pkg_version = Mock(return_value="OK") self.pkg._install = Mock(return_value=None) self.pkg._fix = Mock(return_value=None) self.pkgName = 'packageName' def tearDown(self): super(PkgDEBRemoveTestCase, self).tearDown() self.pkg.pkg_version = self.pkg_version self.pkg._install = self.pkg_install self.pkg._fix = self.pkg_fix def test_remove_no_pkg_version(self): # test pexpect.spawn.return_value.expect.return_value = 6 pexpect.spawn.return_value.match = False with patch.object(self.pkg, 'pkg_version', return_value=None): self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) def test_success_remove(self): # test pexpect.spawn.return_value.expect.return_value = 6 pexpect.spawn.return_value.match = False self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) def test_permission_error(self): # test pexpect.spawn.return_value.expect.return_value = 0 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove, self.pkgName, 5000) def test_package_not_found(self): # test pexpect.spawn.return_value.expect.return_value = 1 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove, self.pkgName, 5000) def test_package_reinstall_first_1(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, self.pkgName, 5000) self.assertTrue(self.pkg._install.called) self.assertFalse(self.pkg._fix.called) def test_package_reinstall_first_2(self): # test pexpect.spawn.return_value.expect.return_value = 3 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, self.pkgName, 5000) self.assertTrue(self.pkg._install.called) self.assertFalse(self.pkg._fix.called) def test_package_DPKG_first(self): # test pexpect.spawn.return_value.expect.return_value = 4 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, self.pkgName, 5000) self.assertFalse(self.pkg._install.called) self.assertTrue(self.pkg._fix.called) def test_admin_lock_error(self): # test 'Unable to lock the administration directory' error pexpect.spawn.return_value.expect.return_value = 5 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_remove, self.pkgName, 5000) def test_timeout_error(self): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, self.pkgName, 5000) @patch.object(subprocess, 'call') def test_timeout_error_with_exception(self, mock_call): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) pexpect.spawn.return_value.close.side_effect = ( pexpect.ExceptionPexpect('error')) # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, self.pkgName, 5000) self.assertEqual(1, mock_call.call_count) class PkgDEBVersionTestCase(trove_testtools.TestCase): def setUp(self): super(PkgDEBVersionTestCase, self).setUp() self.pkgName = 'mysql-server-5.5' self.pkgVersion = '5.5.28-0' self.commands_output = commands.getstatusoutput def tearDown(self): super(PkgDEBVersionTestCase, self).tearDown() commands.getstatusoutput = self.commands_output def test_version_success(self): cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, self.pkgVersion) commands.getstatusoutput = Mock(return_value=(0, cmd_out)) version = pkg.DebianPackagerMixin().pkg_version(self.pkgName) self.assertTrue(version) self.assertEqual(self.pkgVersion, version) def test_version_unknown_package(self): cmd_out = "N: Unable to locate package %s" % self.pkgName commands.getstatusoutput = Mock(return_value=(0, cmd_out)) self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName)) def test_version_no_version(self): cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, "(none)") commands.getstatusoutput = Mock(return_value=(0, cmd_out)) self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName)) class PkgRPMVersionTestCase(trove_testtools.TestCase): def setUp(self): super(PkgRPMVersionTestCase, self).setUp() self.pkgName = 'python-requests' self.pkgVersion = '0.14.2-1.el6' self.commands_output = commands.getstatusoutput def tearDown(self): super(PkgRPMVersionTestCase, self).tearDown() commands.getstatusoutput = self.commands_output @patch('trove.guestagent.pkg.LOG') def test_version_no_output(self, mock_logging): cmd_out = '' commands.getstatusoutput = Mock(return_value=(0, cmd_out)) self.assertIsNone(pkg.RedhatPackagerMixin().pkg_version(self.pkgName)) def test_version_success(self): cmd_out = self.pkgVersion commands.getstatusoutput = Mock(return_value=(0, cmd_out)) version = pkg.RedhatPackagerMixin().pkg_version(self.pkgName) self.assertTrue(version) self.assertEqual(self.pkgVersion, version) class PkgRPMInstallTestCase(trove_testtools.TestCase): def setUp(self): super(PkgRPMInstallTestCase, self).setUp() self.pkg = pkg.RedhatPackagerMixin() self.commands_output = commands.getstatusoutput self.pkgName = 'packageName' p0 = patch('pexpect.spawn') p0.start() self.addCleanup(p0.stop) p1 = patch('trove.common.utils.execute') p1.start() self.addCleanup(p1.stop) def tearDown(self): super(PkgRPMInstallTestCase, self).tearDown() commands.getstatusoutput = self.commands_output def test_pkg_is_installed_no_packages(self): packages = [] self.assertTrue(self.pkg.pkg_is_installed(packages)) def test_pkg_is_installed_yes(self): packages = ["package1=1.0", "package2"] with patch.object(commands, 'getstatusoutput', MagicMock( return_value={1: "package1=1.0\n" "package2=2.0"})): self.assertTrue(self.pkg.pkg_is_installed(packages)) def test_pkg_is_installed_no(self): packages = ["package1=1.0", "package2", "package3=3.0"] with patch.object(commands, 'getstatusoutput', MagicMock( return_value={1: "package1=1.0\n" "package2=2.0"})): self.assertFalse(self.pkg.pkg_is_installed(packages)) def test_permission_error(self): # test pexpect.spawn.return_value.expect.return_value = 0 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_not_found(self): # test pexpect.spawn.return_value.expect.return_value = 1 pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_conflict_remove(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) self.pkg._rpm_remove_nodeps = Mock() # test and verify self.pkg._install(self.pkgName, 5000) self.assertTrue(self.pkg._rpm_remove_nodeps.called) def test_package_conflict_remove_install(self): with patch.object(self.pkg, '_install', side_effect=[3, 3, 0]): self.assertTrue( self.pkg.pkg_install(self.pkgName, {}, 5000) is None) self.assertEqual(3, self.pkg._install.call_count) @patch.object(utils, 'execute') def test__rpm_remove_nodeps(self, mock_execute): self.pkg._rpm_remove_nodeps(self.pkgName) mock_execute.assert_called_with('rpm', '-e', '--nodeps', self.pkgName, run_as_root=True, root_helper='sudo') def test_package_scriptlet_error(self): # test pexpect.spawn.return_value.expect.return_value = 5 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgScriptletError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_http_error(self): # test pexpect.spawn.return_value.expect.return_value = 6 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_nomirrors_error(self): # test pexpect.spawn.return_value.expect.return_value = 7 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_sign_error(self): # test pexpect.spawn.return_value.expect.return_value = 8 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgSignError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_already_installed(self): # test pexpect.spawn.return_value.expect.return_value = 9 pexpect.spawn.return_value.match = False # test and verify self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) def test_package_success_updated(self): # test pexpect.spawn.return_value.expect.return_value = 10 pexpect.spawn.return_value.match = False # test and verify self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) def test_package_success_installed(self): # test pexpect.spawn.return_value.expect.return_value = 11 pexpect.spawn.return_value.match = False # test and verify self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) def test_timeout_error(self): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install, self.pkgName, {}, 5000) class PkgRPMRemoveTestCase(trove_testtools.TestCase): def setUp(self): super(PkgRPMRemoveTestCase, self).setUp() self.pkg = pkg.RedhatPackagerMixin() self.pkg_version = self.pkg.pkg_version self.pkg_install = self.pkg._install p0 = patch('pexpect.spawn') p0.start() self.addCleanup(p0.stop) p1 = patch('trove.common.utils.execute') p1.start() self.addCleanup(p1.stop) self.pkg.pkg_version = Mock(return_value="OK") self.pkg._install = Mock(return_value=None) self.pkgName = 'packageName' def tearDown(self): super(PkgRPMRemoveTestCase, self).tearDown() self.pkg.pkg_version = self.pkg_version self.pkg._install = self.pkg_install def test_permission_error(self): # test pexpect.spawn.return_value.expect.return_value = 0 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove, self.pkgName, 5000) def test_package_not_found(self): # test pexpect.spawn.return_value.expect.return_value = 1 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove, self.pkgName, 5000) def test_remove_no_pkg_version(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = False with patch.object(self.pkg, 'pkg_version', return_value=None): self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) def test_success_remove(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = False self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) def test_timeout_error(self): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, self.pkgName, 5000) class PkgDEBFixPackageSelections(trove_testtools.TestCase): def setUp(self): super(PkgDEBFixPackageSelections, self).setUp() self.pkg = pkg.DebianPackagerMixin() self.commands_output = commands.getstatusoutput def tearDown(self): super(PkgDEBFixPackageSelections, self).tearDown() commands.getstatusoutput = self.commands_output @patch.object(os, 'remove') @patch.object(pkg, 'NamedTemporaryFile') @patch.object(utils, 'execute') def test__fix_package_selections(self, mock_execute, mock_temp_file, mock_remove): packages = ["package1"] config_opts = {'option': 'some_opt'} commands.getstatusoutput = Mock( return_value=(0, "* package1/option: some_opt")) self.pkg._fix_package_selections(packages, config_opts) self.assertEqual(2, mock_execute.call_count) self.assertEqual(1, mock_remove.call_count) @patch.object(os, 'remove') @patch.object(pkg, 'NamedTemporaryFile') @patch.object(utils, 'execute', side_effect=exception.ProcessExecutionError) def test_fail__fix_package_selections(self, mock_execute, mock_temp_file, mock_remove): packages = ["package1"] config_opts = {'option': 'some_opt'} commands.getstatusoutput = Mock( return_value=(0, "* package1/option: some_opt")) self.assertRaises(pkg.PkgConfigureError, self.pkg._fix_package_selections, packages, config_opts) self.assertEqual(1, mock_remove.call_count) @patch.object(utils, 'execute') def test__fix(self, mock_execute): self.pkg._fix(30) mock_execute.assert_called_with('dpkg', '--configure', '-a', run_as_root=True, root_helper='sudo') trove-5.0.0/trove/tests/unittests/guestagent/test_couchdb_manager.py0000664000567000056710000003326112701410316027237 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from mock import MagicMock from mock import patch from oslo_utils import netutils from testtools.matchers import Is, Equals, Not from trove.common.instance import ServiceStatuses from trove.guestagent import backup from trove.guestagent.datastore.experimental.couchdb import ( manager as couchdb_manager) from trove.guestagent.datastore.experimental.couchdb import ( service as couchdb_service) from trove.guestagent import pkg as pkg from trove.guestagent import volume from trove.tests.unittests import trove_testtools class GuestAgentCouchDBManagerTest(trove_testtools.TestCase): def setUp(self): super(GuestAgentCouchDBManagerTest, self).setUp() self.real_status = couchdb_service.CouchDBAppStatus.set_status class FakeInstanceServiceStatus(object): status = ServiceStatuses.NEW def save(self): pass couchdb_service.CouchDBAppStatus.set_status = MagicMock( return_value=FakeInstanceServiceStatus()) self.context = trove_testtools.TroveTestContext(self) self.manager = couchdb_manager.Manager() self.pkg = couchdb_service.packager self.real_db_app_status = couchdb_service.CouchDBAppStatus self.origin_os_path_exists = os.path.exists self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_db = couchdb_service.CouchDBApp.stop_db self.origin_start_db = couchdb_service.CouchDBApp.start_db self.original_get_ip = netutils.get_my_ipv4 self.orig_make_host_reachable = ( couchdb_service.CouchDBApp.make_host_reachable) self.orig_backup_restore = backup.restore self.orig_create_users = couchdb_service.CouchDBAdmin.create_user self.orig_delete_user = couchdb_service.CouchDBAdmin.delete_user self.orig_list_users = couchdb_service.CouchDBAdmin.list_users self.orig_get_user = couchdb_service.CouchDBAdmin.get_user self.orig_grant_access = couchdb_service.CouchDBAdmin.grant_access self.orig_revoke_access = couchdb_service.CouchDBAdmin.revoke_access self.orig_list_access = couchdb_service.CouchDBAdmin.list_access self.orig_enable_root = couchdb_service.CouchDBAdmin.enable_root self.orig_is_root_enabled = ( couchdb_service.CouchDBAdmin.is_root_enabled) self.orig_create_databases = ( couchdb_service.CouchDBAdmin.create_database) self.orig_list_databases = couchdb_service.CouchDBAdmin.list_databases self.orig_delete_database = ( couchdb_service.CouchDBAdmin.delete_database) def tearDown(self): super(GuestAgentCouchDBManagerTest, self).tearDown() couchdb_service.packager = self.pkg couchdb_service.CouchDBAppStatus.set_status = self.real_db_app_status os.path.exists = self.origin_os_path_exists volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.migrate_data = self.origin_migrate_data volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.mount_points = self.origin_mount_points couchdb_service.CouchDBApp.stop_db = self.origin_stop_db couchdb_service.CouchDBApp.start_db = self.origin_start_db netutils.get_my_ipv4 = self.original_get_ip couchdb_service.CouchDBApp.make_host_reachable = ( self.orig_make_host_reachable) backup.restore = self.orig_backup_restore couchdb_service.CouchDBAdmin.create_user = self.orig_create_users couchdb_service.CouchDBAdmin.delete_user = self.orig_delete_user couchdb_service.CouchDBAdmin.list_users = self.orig_list_users couchdb_service.CouchDBAdmin.get_user = self.orig_get_user couchdb_service.CouchDBAdmin.grant_access = self.orig_grant_access couchdb_service.CouchDBAdmin.revoke_access = self.orig_revoke_access couchdb_service.CouchDBAdmin.list_access = self.orig_list_access couchdb_service.CouchDBAdmin.enable_root = self.orig_enable_root couchdb_service.CouchDBAdmin.is_root_enabled = ( self.orig_is_root_enabled) couchdb_service.CouchDBAdmin.create_database = ( self.orig_create_databases) couchdb_service.CouchDBAdmin.list_databases = self.orig_list_databases couchdb_service.CouchDBAdmin.delete_database = ( self.orig_delete_database) def test_update_status(self): mock_status = MagicMock() self.manager.appStatus = mock_status self.manager.update_status(self.context) mock_status.update.assert_any_call() def _prepare_dynamic(self, packages=None, databases=None, config_content=None, device_path='/dev/vdb', is_db_installed=True, backup_id=None, overrides=None): mock_status = MagicMock() mock_app = MagicMock() self.manager.appStatus = mock_status self.manager.app = mock_app mount_point = '/var/lib/couchdb' mock_status.begin_install = MagicMock(return_value=None) mock_app.install_if_needed = MagicMock(return_value=None) mock_app.make_host_reachable = MagicMock(return_value=None) mock_app.restart = MagicMock(return_value=None) mock_app.start_db = MagicMock(return_value=None) mock_app.stop_db = MagicMock(return_value=None) os.path.exists = MagicMock(return_value=True) volume.VolumeDevice.format = MagicMock(return_value=None) volume.VolumeDevice.migrate_data = MagicMock(return_value=None) volume.VolumeDevice.mount = MagicMock(return_value=None) volume.VolumeDevice.mount_points = MagicMock(return_value=[]) backup.restore = MagicMock(return_value=None) backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'CouchDBBackup', 'checksum': 'fake-checksum'} if backup_id else None couchdb_service.CouchDBAdmin.create_database = MagicMock( return_value=None) couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None) with patch.object(pkg.Package, 'pkg_is_installed', return_value=MagicMock( return_value=is_db_installed)): self.manager.prepare(context=self.context, packages=packages, config_contents=config_content, databases=databases, memory_mb='2048', users=None, device_path=device_path, mount_point=mount_point, backup_info=backup_info, overrides=None, cluster_config=None) # verification/assertion mock_status.begin_install.assert_any_call() mock_app.install_if_needed.assert_any_call(packages) mock_app.make_host_reachable.assert_any_call() mock_app.change_permissions.assert_any_call() if backup_id: backup.restore.assert_any_call(self.context, backup_info, mount_point) def test_prepare_pkg(self): self._prepare_dynamic(['couchdb']) def test_prepare_no_pkg(self): self._prepare_dynamic([]) def test_prepare_from_backup(self): self._prepare_dynamic(['couchdb'], backup_id='123abc456') def test_prepare_database(self): self._prepare_dynamic(databases=['db1']) def test_restart(self): mock_status = MagicMock() self.manager.appStatus = mock_status with patch.object(couchdb_service.CouchDBApp, 'restart', return_value=None): # invocation self.manager.restart(self.context) # verification/assertion couchdb_service.CouchDBApp.restart.assert_any_call() def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBApp.stop_db = MagicMock(return_value=None) # invocation self.manager.stop_db(self.context) # verification/assertion couchdb_service.CouchDBApp.stop_db.assert_any_call( do_not_start_on_reboot=False) def test_reset_configuration(self): try: configuration = {'config_contents': 'some junk'} self.manager.reset_configuration(self.context, configuration) except Exception: self.fail("reset_configuration raised exception unexpectedly.") def test_rpc_ping(self): output = self.manager.rpc_ping(self.context) self.assertTrue(output) def test_create_user(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None) self.manager.create_user(self.context, ['user1']) couchdb_service.CouchDBAdmin.create_user.assert_any_call(['user1']) def test_delete_user(self): user = ['user1'] mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.delete_user = MagicMock(return_value=None) self.manager.delete_user(self.context, user) couchdb_service.CouchDBAdmin.delete_user.assert_any_call(user) def test_list_users(self): couchdb_service.CouchDBAdmin.list_users = MagicMock( return_value=['user1']) users = self.manager.list_users(self.context) self.assertThat(users, Equals(['user1'])) couchdb_service.CouchDBAdmin.list_users.assert_any_call( None, None, False) def test_get_user(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.get_user = MagicMock( return_value=['user1']) self.manager.get_user(self.context, 'user1', None) couchdb_service.CouchDBAdmin.get_user.assert_any_call( 'user1', None) def test_grant_access(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.grant_access = MagicMock( return_value=None) self.manager.grant_access(self.context, 'user1', None, ['db1']) couchdb_service.CouchDBAdmin.grant_access.assert_any_call( 'user1', ['db1']) def test_revoke_access(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.revoke_access = MagicMock( return_value=None) self.manager.revoke_access(self.context, 'user1', None, ['db1']) couchdb_service.CouchDBAdmin.revoke_access.assert_any_call( 'user1', ['db1']) def test_list_access(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.list_access = MagicMock( return_value=['user1']) self.manager.list_access(self.context, 'user1', None) couchdb_service.CouchDBAdmin.list_access.assert_any_call( 'user1', None) def test_enable_root(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.enable_root = MagicMock( return_value=True) result = self.manager.enable_root(self.context) self.assertThat(result, Equals(True)) def test_is_root_enabled(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.is_root_enabled = MagicMock( return_value=True) result = self.manager.is_root_enabled(self.context) self.assertThat(result, Equals(True)) def test_create_databases(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.create_database = MagicMock( return_value=None) self.manager.create_database(self.context, ['db1']) couchdb_service.CouchDBAdmin.create_database.assert_any_call(['db1']) def test_delete_database(self): databases = ['db1'] mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.delete_database = MagicMock( return_value=None) self.manager.delete_database(self.context, databases) couchdb_service.CouchDBAdmin.delete_database.assert_any_call( databases) def test_list_databases(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.list_databases = MagicMock( return_value=['database1']) databases = self.manager.list_databases(self.context) self.assertThat(databases, Not(Is(None))) self.assertThat(databases, Equals(['database1'])) couchdb_service.CouchDBAdmin.list_databases.assert_any_call( None, None, False) trove-5.0.0/trove/tests/unittests/guestagent/test_agent_heartbeats_models.py0000664000567000056710000001755012701410316031004 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import patch import uuid from trove.common import exception from trove.guestagent.models import AgentHeartBeat from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class AgentHeartBeatTest(trove_testtools.TestCase): def setUp(self): super(AgentHeartBeatTest, self).setUp() util.init_db() def tearDown(self): super(AgentHeartBeatTest, self).tearDown() def test_create(self): """ Test the creation of a new agent heartbeat record """ instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id) self.assertIsNotNone(heartbeat) self.assertIsNotNone(heartbeat.id) self.assertIsNotNone(heartbeat.instance_id) self.assertEqual(instance_id, heartbeat.instance_id) self.assertIsNotNone(heartbeat.updated_at) self.assertIsNone(heartbeat.guest_agent_version) def test_create_with_version(self): """ Test the creation of a new agent heartbeat record w/ guest version """ instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id, guest_agent_version="1.2.3") self.assertIsNotNone(heartbeat) self.assertIsNotNone(heartbeat.id) self.assertIsNotNone(heartbeat.instance_id) self.assertEqual(instance_id, heartbeat.instance_id) self.assertIsNotNone(heartbeat.updated_at) self.assertIsNotNone(heartbeat.guest_agent_version) self.assertEqual("1.2.3", heartbeat.guest_agent_version) def test_find_by_instance_id(self): """ Test to retrieve a guest agents by it's id """ # create a unique record instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id, guest_agent_version="1.2.3") self.assertIsNotNone(heartbeat) self.assertIsNotNone(heartbeat.id) self.assertIsNotNone(heartbeat.instance_id) self.assertEqual(instance_id, heartbeat.instance_id) self.assertIsNotNone(heartbeat.updated_at) self.assertIsNotNone(heartbeat.guest_agent_version) self.assertEqual("1.2.3", heartbeat.guest_agent_version) # retrieve the record heartbeat_found = AgentHeartBeat.find_by_instance_id( instance_id=instance_id) self.assertIsNotNone(heartbeat_found) self.assertEqual(heartbeat.id, heartbeat_found.id) self.assertEqual(heartbeat.instance_id, heartbeat_found.instance_id) self.assertEqual(heartbeat.updated_at, heartbeat_found.updated_at) self.assertEqual(heartbeat.guest_agent_version, heartbeat_found.guest_agent_version) def test_find_by_instance_id_none(self): """ Test to retrieve a guest agents when id is None """ heartbeat_found = None exception_raised = False try: heartbeat_found = AgentHeartBeat.find_by_instance_id( instance_id=None) except exception.ModelNotFoundError: exception_raised = True self.assertIsNone(heartbeat_found) self.assertTrue(exception_raised) @patch('trove.guestagent.models.LOG') def test_find_by_instance_id_not_found(self, mock_logging): """ Test to retrieve a guest agents when id is not found """ instance_id = str(uuid.uuid4()) heartbeat_found = None exception_raised = False try: heartbeat_found = AgentHeartBeat.find_by_instance_id( instance_id=instance_id) except exception.ModelNotFoundError: exception_raised = True self.assertIsNone(heartbeat_found) self.assertTrue(exception_raised) def test_find_all_by_version(self): """ Test to retrieve all guest agents with a particular version """ # create some unique records with the same version version = str(uuid.uuid4()) for x in range(5): instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id, guest_agent_version=version, deleted=0) self.assertIsNotNone(heartbeat) # get all guests by version heartbeats = AgentHeartBeat.find_all_by_version(version) self.assertIsNotNone(heartbeats) self.assertEqual(5, heartbeats.count()) def test_find_all_by_version_none(self): """ Test to retrieve all guest agents with a None version """ heartbeats = None exception_raised = False try: heartbeats = AgentHeartBeat.find_all_by_version(None) except exception.ModelNotFoundError: exception_raised = True self.assertIsNone(heartbeats) self.assertTrue(exception_raised) def test_find_all_by_version_not_found(self): """ Test to retrieve all guest agents with a non-existing version """ version = str(uuid.uuid4()) exception_raised = False heartbeats = None try: heartbeats = AgentHeartBeat.find_all_by_version(version) except exception.ModelNotFoundError: exception_raised = True self.assertIsNone(heartbeats) self.assertTrue(exception_raised) def test_update_heartbeat(self): """ Test to show the upgrade scenario that will be used by conductor """ # create a unique record instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id, guest_agent_version="1.2.3") self.assertIsNotNone(heartbeat) self.assertIsNotNone(heartbeat.id) self.assertIsNotNone(heartbeat.instance_id) self.assertEqual(instance_id, heartbeat.instance_id) self.assertIsNotNone(heartbeat.updated_at) self.assertIsNotNone(heartbeat.guest_agent_version) self.assertEqual("1.2.3", heartbeat.guest_agent_version) # retrieve the record heartbeat_found = AgentHeartBeat.find_by_instance_id( instance_id=instance_id) self.assertIsNotNone(heartbeat_found) self.assertEqual(heartbeat.id, heartbeat_found.id) self.assertEqual(heartbeat.instance_id, heartbeat_found.instance_id) self.assertEqual(heartbeat.updated_at, heartbeat_found.updated_at) self.assertEqual(heartbeat.guest_agent_version, heartbeat_found.guest_agent_version) # update AgentHeartBeat().update(id=heartbeat_found.id, instance_id=instance_id, guest_agent_version="1.2.3") # retrieve the record updated_heartbeat = AgentHeartBeat.find_by_instance_id( instance_id=instance_id) self.assertIsNotNone(updated_heartbeat) self.assertEqual(heartbeat.id, updated_heartbeat.id) self.assertEqual(heartbeat.instance_id, updated_heartbeat.instance_id) self.assertEqual(heartbeat.guest_agent_version, updated_heartbeat.guest_agent_version) self.assertEqual(heartbeat.updated_at, updated_heartbeat.updated_at) trove-5.0.0/trove/tests/unittests/guestagent/test_dbaas.py0000664000567000056710000051660612701410320025214 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import ConfigParser import os import subprocess import tempfile import time from uuid import uuid4 from mock import ANY from mock import call from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch from mock import PropertyMock from oslo_utils import netutils import sqlalchemy from trove.common import cfg from trove.common import context as trove_context from trove.common.exception import BadRequest from trove.common.exception import GuestError from trove.common.exception import PollTimeOut from trove.common.exception import ProcessExecutionError from trove.common import instance as rd_instance from trove.common import utils from trove.conductor import api as conductor_api from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.common import sql_query from trove.guestagent.datastore.experimental.cassandra import ( service as cass_service) from trove.guestagent.datastore.experimental.couchbase import ( service as couchservice) from trove.guestagent.datastore.experimental.couchdb import ( service as couchdb_service) from trove.guestagent.datastore.experimental.db2 import ( service as db2service) from trove.guestagent.datastore.experimental.mariadb import ( service as mariadb_service) from trove.guestagent.datastore.experimental.mongodb import ( service as mongo_service) from trove.guestagent.datastore.experimental.mongodb import ( system as mongo_system) from trove.guestagent.datastore.experimental.postgresql import ( manager as pg_manager) from trove.guestagent.datastore.experimental.postgresql.service import ( config as pg_config) from trove.guestagent.datastore.experimental.postgresql.service import ( status as pg_status) from trove.guestagent.datastore.experimental.pxc import ( service as pxc_service) from trove.guestagent.datastore.experimental.redis import service as rservice from trove.guestagent.datastore.experimental.redis.service import RedisApp from trove.guestagent.datastore.experimental.redis import system as RedisSystem from trove.guestagent.datastore.experimental.vertica import ( system as vertica_system) from trove.guestagent.datastore.experimental.vertica.service import ( VerticaAppStatus) from trove.guestagent.datastore.experimental.vertica.service import VerticaApp import trove.guestagent.datastore.mysql.service as dbaas from trove.guestagent.datastore.mysql.service import KeepAliveConnection from trove.guestagent.datastore.mysql.service import MySqlAdmin from trove.guestagent.datastore.mysql.service import MySqlApp from trove.guestagent.datastore.mysql.service import MySqlAppStatus from trove.guestagent.datastore.mysql.service import MySqlRootAccess import trove.guestagent.datastore.mysql_common.service as mysql_common_service import trove.guestagent.datastore.service as base_datastore_service from trove.guestagent.datastore.service import BaseDbStatus from trove.guestagent.db import models from trove.guestagent import dbaas as dbaas_sr from trove.guestagent.dbaas import get_filesystem_volume_stats from trove.guestagent.dbaas import to_gb from trove.guestagent.dbaas import to_mb from trove.guestagent import pkg from trove.guestagent.volume import VolumeDevice from trove.instance.models import InstanceServiceStatus from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util CONF = cfg.CONF """ Unit tests for the classes and functions in dbaas.py. """ FAKE_DB = {"_name": "testDB", "_character_set": "latin2", "_collate": "latin2_general_ci"} FAKE_DB_2 = {"_name": "testDB2", "_character_set": "latin2", "_collate": "latin2_general_ci"} FAKE_USER = [{"_name": "random", "_password": "guesswhat", "_host": "%", "_databases": [FAKE_DB]}] conductor_api.API.get_client = Mock() conductor_api.API.heartbeat = Mock() class FakeTime: COUNTER = 0 @classmethod def time(cls): cls.COUNTER += 1 return cls.COUNTER def faketime(*args, **kwargs): return FakeTime.time() class FakeAppStatus(BaseDbStatus): def __init__(self, id, status): self.id = id self.status = status self.next_fake_status = status self._prepare_completed = None self.start_db_service = MagicMock() self.stop_db_service = MagicMock() self.restart_db_service = MagicMock() def _get_actual_db_status(self): return self.next_fake_status def set_next_status(self, next_status): self.next_fake_status = next_status def _is_query_router(self): return False class DbaasTest(trove_testtools.TestCase): def setUp(self): super(DbaasTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_utils_execute = mysql_common_service.utils.execute def tearDown(self): super(DbaasTest, self).tearDown() mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout mysql_common_service.utils.execute = self.orig_utils_execute @patch.object(operating_system, 'remove') def test_clear_expired_password(self, mock_remove): secret_content = ("# The random password set for the " "root user at Wed May 14 14:06:38 2014 " "(local time): somepassword") with patch.object(mysql_common_service.utils, 'execute', return_value=(secret_content, None)): mysql_common_service.clear_expired_password() self.assertEqual(2, mysql_common_service.utils.execute.call_count) self.assertEqual(1, mock_remove.call_count) @patch.object(operating_system, 'remove') def test_no_secret_content_clear_expired_password(self, mock_remove): with patch.object(mysql_common_service.utils, 'execute', return_value=('', None)): mysql_common_service.clear_expired_password() self.assertEqual(1, mysql_common_service.utils.execute.call_count) mock_remove.assert_not_called() @patch.object(operating_system, 'remove') @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_fail_password_update_content_clear_expired_password(self, mock_logging, mock_remove): secret_content = ("# The random password set for the " "root user at Wed May 14 14:06:38 2014 " "(local time): somepassword") with patch.object(mysql_common_service.utils, 'execute', side_effect=[(secret_content, None), ProcessExecutionError]): mysql_common_service.clear_expired_password() self.assertEqual(2, mysql_common_service.utils.execute.call_count) mock_remove.assert_not_called() @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(operating_system, 'remove') @patch.object(mysql_common_service.utils, 'execute', side_effect=ProcessExecutionError) def test_fail_retrieve_secret_content_clear_expired_password(self, mock_execute, mock_remove, mock_logging): mysql_common_service.clear_expired_password() self.assertEqual(1, mock_execute.call_count) mock_remove.assert_not_called() @patch.object(operating_system, 'read_file', return_value={'client': {'password': 'some password'}}) @patch.object(mysql_common_service.BaseMySqlApp.configuration_manager, 'get_value', return_value=MagicMock({'get': 'some password'})) def test_get_auth_password(self, get_cnf_mock, read_file_mock): password = MySqlApp.get_auth_password() read_file_mock.assert_called_once_with(MySqlApp.get_client_auth_file(), codec=MySqlApp.CFG_CODEC) self.assertEqual("some password", password) @patch.object(mysql_common_service.BaseMySqlApp.configuration_manager, 'get_value', side_effect=RuntimeError('Error')) @patch.object(operating_system, 'read_file', side_effect=RuntimeError('read_file error')) def test_get_auth_password_error(self, _, get_cnf_mock): self.assertRaisesRegexp(RuntimeError, "read_file error", MySqlApp.get_auth_password) def test_service_discovery(self): with patch.object(os.path, 'isfile', return_value=True): mysql_service = mysql_common_service.operating_system.\ service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_load_mysqld_options(self): output = "mysqld would've been started with the these args:\n"\ "--user=mysql --port=3306 --basedir=/usr "\ "--tmpdir=/tmp --skip-external-locking" with patch.object(os.path, 'isfile', return_value=True): mysql_common_service.utils.execute = Mock( return_value=(output, None)) options = mysql_common_service.load_mysqld_options() self.assertEqual(5, len(options)) self.assertEqual(["mysql"], options["user"]) self.assertEqual(["3306"], options["port"]) self.assertEqual(["/usr"], options["basedir"]) self.assertEqual(["/tmp"], options["tmpdir"]) self.assertTrue("skip-external-locking" in options) def test_load_mysqld_options_contains_plugin_loads_options(self): output = ("mysqld would've been started with the these args:\n" "--plugin-load=blackhole=ha_blackhole.so " "--plugin-load=federated=ha_federated.so") with patch.object(os.path, 'isfile', return_value=True): mysql_common_service.utils.execute = Mock( return_value=(output, None)) options = mysql_common_service.load_mysqld_options() self.assertEqual(1, len(options)) self.assertEqual(["blackhole=ha_blackhole.so", "federated=ha_federated.so"], options["plugin-load"]) @patch.object(os.path, 'isfile', return_value=True) def test_load_mysqld_options_error(self, mock_exists): mysql_common_service.utils.execute = Mock( side_effect=ProcessExecutionError()) self.assertFalse(mysql_common_service.load_mysqld_options()) class ResultSetStub(object): def __init__(self, rows): self._rows = rows def __iter__(self): return self._rows.__iter__() @property def rowcount(self): return len(self._rows) def __repr__(self): return self._rows.__repr__() class BaseAppTest(object): """A wrapper to inhibit the base test methods from executing during a normal test run. """ class AppTestCase(trove_testtools.TestCase): def setUp(self, fake_id): super(BaseAppTest.AppTestCase, self).setUp() self.FAKE_ID = fake_id InstanceServiceStatus.create( instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) def tearDown(self): InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() super(BaseAppTest.AppTestCase, self).tearDown() @abc.abstractproperty def appStatus(self): pass @abc.abstractproperty def expected_state_change_timeout(self): pass @abc.abstractproperty def expected_service_candidates(self): pass def test_start_db(self): with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.appStatus.set_next_status( rd_instance.ServiceStatuses.RUNNING) self.app.start_db() self.appStatus.start_db_service.assert_called_once_with( self.expected_service_candidates, self.expected_state_change_timeout, enable_on_boot=True, update_db=False) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_stop_db(self): with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) self.app.stop_db() self.appStatus.stop_db_service.assert_called_once_with( self.expected_service_candidates, self.expected_state_change_timeout, disable_on_boot=False, update_db=False) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_restart_db(self): self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.app.restart() self.appStatus.restart_db_service.assert_called_once_with( self.expected_service_candidates, self.expected_state_change_timeout) def assert_reported_status(self, expected_status): service_status = InstanceServiceStatus.find_by( instance_id=self.FAKE_ID) self.assertEqual(expected_status, service_status.status) class MySqlAdminMockTest(trove_testtools.TestCase): def setUp(self): super(MySqlAdminMockTest, self).setUp() mysql_app_patcher = patch.multiple(MySqlApp, get_engine=DEFAULT, configuration_manager=DEFAULT) self.addCleanup(mysql_app_patcher.stop) mysql_app_patcher.start() create_engine_patcher = patch.object(sqlalchemy, 'create_engine') self.addCleanup(create_engine_patcher.stop) create_engine_patcher.start() exec_timeout_patcher = patch.object(utils, 'execute_with_timeout') self.addCleanup(exec_timeout_patcher.stop) exec_timeout_patcher.start() self.mock_cli_ctx_mgr = Mock() self.mock_client = MagicMock() self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) self.mock_cli_ctx_mgr.__exit__ = Mock() local_client_patcher = patch.object(dbaas.MySqlAdmin, 'local_sql_client', return_value=self.mock_cli_ctx_mgr) self.addCleanup(local_client_patcher.stop) local_client_patcher.start() def tearDown(self): super(MySqlAdminMockTest, self).tearDown() @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', Mock(return_value='some_password')) def test_list_databases(self): with patch.object(self.mock_client, 'execute', return_value=ResultSetStub( [('db1', 'utf8', 'utf8_bin'), ('db2', 'utf8', 'utf8_bin'), ('db3', 'utf8', 'utf8_bin')])): databases, next_marker = MySqlAdmin().list_databases(limit=10) self.assertIsNone(next_marker) self.assertEqual(3, len(databases)) class MySqlAdminTest(trove_testtools.TestCase): def setUp(self): super(MySqlAdminTest, self).setUp() self.orig_get_engine = dbaas.get_engine self.mock_cli_ctx_mgr = Mock() self.mock_client = MagicMock() self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) self.mock_cli_ctx_mgr.__exit__ = Mock() local_client_patcher = patch.object(dbaas.MySqlAdmin, 'local_sql_client', return_value=self.mock_cli_ctx_mgr) self.addCleanup(local_client_patcher.stop) local_client_patcher.start() self.orig_MySQLUser_is_valid_user_name = ( models.MySQLUser._is_valid_user_name) dbaas.get_engine = MagicMock(name='get_engine') # trove.guestagent.common.configuration import ConfigurationManager dbaas.orig_configuration_manager = dbaas.MySqlApp.configuration_manager dbaas.MySqlApp.configuration_manager = Mock() dbaas.orig_get_auth_password = dbaas.MySqlApp.get_auth_password dbaas.MySqlApp.get_auth_password = Mock() self.orig_configuration_manager = \ mysql_common_service.BaseMySqlApp.configuration_manager mysql_common_service.BaseMySqlApp.configuration_manager = Mock() self.mySqlAdmin = MySqlAdmin() def tearDown(self): dbaas.get_engine = self.orig_get_engine models.MySQLUser._is_valid_user_name = ( self.orig_MySQLUser_is_valid_user_name) dbaas.MySqlApp.configuration_manager = \ dbaas.orig_configuration_manager dbaas.MySqlApp.get_auth_password = \ dbaas.orig_get_auth_password mysql_common_service.BaseMySqlApp.configuration_manager = \ self.orig_configuration_manager super(MySqlAdminTest, self).tearDown() def test__associate_dbs(self): db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"}, {"grantee": "'test_user'@'%'", "table_schema": "db2"}, {"grantee": "'test_user'@'%'", "table_schema": "db3"}, {"grantee": "'test_user1'@'%'", "table_schema": "db1"}, {"grantee": "'test_user1'@'%'", "table_schema": "db3"}] user = MagicMock() user.name = "test_user" user.host = "%" user.databases = [] expected = ("SELECT grantee, table_schema FROM " "information_schema.SCHEMA_PRIVILEGES WHERE privilege_type" " != 'USAGE' GROUP BY grantee, table_schema;") with patch.object(self.mock_client, 'execute', return_value=db_result) as mock_execute: self.mySqlAdmin._associate_dbs(user) self.assertEqual(3, len(user.databases)) self._assert_execute_call(expected, mock_execute) def _assert_execute_call(self, expected_query, execute_mock, call_idx=0): args, _ = execute_mock.call_args_list[call_idx] self.assertTrue(execute_mock.called, "The client object was not called.") self.assertEqual(expected_query, args[0].text, "Queries are not the same.") def test_change_passwords(self): user = [{"name": "test_user", "host": "%", "password": "password"}] expected = ("UPDATE mysql.user SET Password=" "PASSWORD('password') WHERE User = 'test_user' " "AND Host = '%';") with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.change_passwords(user) self._assert_execute_call(expected, mock_execute) def test_update_attributes_password(self): db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"}, {"grantee": "'test_user'@'%'", "table_schema": "db2"}] expected = ("UPDATE mysql.user SET Password=" "PASSWORD('password') WHERE User = 'test_user' " "AND Host = '%';") user = MagicMock() user.name = "test_user" user.host = "%" user_attrs = {"password": "password"} with patch.object(self.mock_client, 'execute', return_value=db_result) as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): with patch.object(self.mySqlAdmin, 'grant_access'): self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) self.assertEqual(0, self.mySqlAdmin.grant_access.call_count) self._assert_execute_call(expected, mock_execute, call_idx=1) def test_update_attributes_name(self): user = MagicMock() user.name = "test_user" user.host = "%" user_attrs = {"name": "new_name"} expected = ("UPDATE mysql.user SET User='new_name' " "WHERE User = 'test_user' AND Host = '%';") with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): with patch.object(self.mySqlAdmin, 'grant_access'): self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) self.mySqlAdmin.grant_access.assert_called_with( 'new_name', '%', set([])) self._assert_execute_call(expected, mock_execute, call_idx=1) def test_update_attributes_host(self): user = MagicMock() user.name = "test_user" user.host = "%" user_attrs = {"host": "new_host"} expected = ("UPDATE mysql.user SET Host='new_host' " "WHERE User = 'test_user' AND Host = '%';") with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): with patch.object(self.mySqlAdmin, 'grant_access'): self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) self.mySqlAdmin.grant_access.assert_called_with( 'test_user', 'new_host', set([])) self._assert_execute_call(expected, mock_execute, call_idx=1) def test_create_database(self): databases = [] databases.append(FAKE_DB) expected = ("CREATE DATABASE IF NOT EXISTS " "`testDB` CHARACTER SET = 'latin2' " "COLLATE = 'latin2_general_ci';") with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.create_database(databases) self._assert_execute_call(expected, mock_execute) def test_create_database_more_than_1(self): databases = [] databases.append(FAKE_DB) databases.append(FAKE_DB_2) expected_1 = ("CREATE DATABASE IF NOT EXISTS " "`testDB` CHARACTER SET = 'latin2' " "COLLATE = 'latin2_general_ci';") expected_2 = ("CREATE DATABASE IF NOT EXISTS " "`testDB2` CHARACTER SET = 'latin2' " "COLLATE = 'latin2_general_ci';") with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.create_database(databases) self._assert_execute_call(expected_1, mock_execute, call_idx=0) self._assert_execute_call(expected_2, mock_execute, call_idx=1) def test_create_database_no_db(self): databases = [] with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.create_database(databases) mock_execute.assert_not_called() def test_delete_database(self): database = {"_name": "testDB"} expected = "DROP DATABASE `testDB`;" with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.delete_database(database) self._assert_execute_call(expected, mock_execute) def test_delete_user(self): user = {"_name": "testUser", "_host": None} expected = "DROP USER `testUser`@`%`;" with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.delete_user(user) self._assert_execute_call(expected, mock_execute) def test_create_user(self): access_grants_expected = ("GRANT ALL PRIVILEGES ON `testDB`.* TO " "`random`@`%` IDENTIFIED BY 'guesswhat';") create_user_expected = ("GRANT USAGE ON *.* TO `random`@`%` " "IDENTIFIED BY 'guesswhat';") with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.create_user(FAKE_USER) self._assert_execute_call(create_user_expected, mock_execute, call_idx=0) self._assert_execute_call(access_grants_expected, mock_execute, call_idx=1) @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', Mock(return_value='some_password')) def test_list_databases(self): expected = ("SELECT schema_name as name," " default_character_set_name as charset," " default_collation_name as collation" " FROM information_schema.schemata WHERE" " schema_name NOT IN ('" + "', '".join(cfg.get_ignored_dbs()) + "')" " ORDER BY schema_name ASC;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_databases() self._assert_execute_call(expected, mock_execute) def test_list_databases_with_limit(self): limit = 2 expected = ("SELECT schema_name as name," " default_character_set_name as charset," " default_collation_name as collation" " FROM information_schema.schemata WHERE" " schema_name NOT IN ('" + "', '".join(cfg.get_ignored_dbs()) + "')" " ORDER BY schema_name ASC LIMIT " + str(limit + 1) + ";" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_databases(limit) self._assert_execute_call(expected, mock_execute) def test_list_databases_with_marker(self): marker = "aMarker" expected = ("SELECT schema_name as name," " default_character_set_name as charset," " default_collation_name as collation" " FROM information_schema.schemata WHERE" " schema_name NOT IN ('" + "', '".join(cfg.get_ignored_dbs()) + "')" " AND schema_name > '" + marker + "'" " ORDER BY schema_name ASC;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_databases(marker=marker) self._assert_execute_call(expected, mock_execute) def test_list_databases_with_include_marker(self): marker = "aMarker" expected = ("SELECT schema_name as name," " default_character_set_name as charset," " default_collation_name as collation" " FROM information_schema.schemata WHERE" " schema_name NOT IN ('" + "', '".join(cfg.get_ignored_dbs()) + "')" " AND schema_name >= '" + marker + "'" " ORDER BY schema_name ASC;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_databases(marker=marker, include_marker=True) self._assert_execute_call(expected, mock_execute) def test_list_users(self): expected = ("SELECT User, Host, Marker FROM" " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" " ORDER BY Marker;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_users() self._assert_execute_call(expected, mock_execute) def test_list_users_with_limit(self): limit = 2 expected = ("SELECT User, Host, Marker FROM" " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" " ORDER BY Marker" " LIMIT " + str(limit + 1) + ";" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_users(limit) self._assert_execute_call(expected, mock_execute) def test_list_users_with_marker(self): marker = "aMarker" expected = ("SELECT User, Host, Marker FROM" " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" " AND Marker > '" + marker + "'" " ORDER BY Marker;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_users(marker=marker) self._assert_execute_call(expected, mock_execute) def test_list_users_with_include_marker(self): marker = "aMarker" expected = ("SELECT User, Host, Marker FROM" " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" " AND Marker >= '" + marker + "'" " ORDER BY Marker;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_users(marker=marker, include_marker=True) self._assert_execute_call(expected, mock_execute) @patch.object(dbaas.MySqlAdmin, '_associate_dbs') def test_get_user(self, mock_associate_dbs): """ Unit tests for mySqlAdmin.get_user. This test case checks if the sql query formed by the get_user method is correct or not by checking with expected query. """ username = "user1" hostname = "%" user = [{"User": "user1", "Host": "%", 'Password': 'some_thing'}] expected = ("SELECT User, Host, Password FROM mysql.user " "WHERE Host != 'localhost' AND User = 'user1' " "AND Host = '%' ORDER BY User, Host;") with patch.object(self.mock_client, 'execute') as mock_execute: fa_mock = Mock(return_value=user) mock_execute.return_value = Mock() mock_execute.return_value.fetchall = fa_mock self.mySqlAdmin.get_user(username, hostname) self.assertEqual(1, mock_associate_dbs.call_count) self._assert_execute_call(expected, mock_execute) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_fail_get_user(self, *args): username = "os_admin" hostname = "host" self.assertRaisesRegexp(BadRequest, "Username os_admin is not valid", self.mySqlAdmin.get_user, username, hostname) def test_grant_access(self): user = MagicMock() user.name = "test_user" user.host = "%" user.password = 'some_password' databases = ['db1'] expected = ("GRANT ALL PRIVILEGES ON `db1`.* TO `test_user`@`%` " "IDENTIFIED BY PASSWORD 'some_password';") with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.grant_access('test_user', '%', databases) self._assert_execute_call(expected, mock_execute) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_fail_grant_access(self, *args): user = MagicMock() user.name = "test_user" user.host = "%" user.password = 'some_password' databases = ['mysql'] with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.grant_access('test_user', '%', databases) # since mysql is not a database to be provided access to, # testing that executed was not called in grant access. mock_execute.assert_not_called() def test_is_root_enabled(self): expected = ("SELECT User FROM mysql.user WHERE " "User = 'root' AND Host != 'localhost';") with patch.object(dbaas.MySqlRootAccess, 'local_sql_client', return_value=self.mock_cli_ctx_mgr): with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.is_root_enabled() self._assert_execute_call(expected, mock_execute) def test_revoke_access(self): user = MagicMock() user.name = "test_user" user.host = "%" user.password = 'some_password' databases = ['db1'] expected = ("REVOKE ALL ON `['db1']`.* FROM `test_user`@`%`;") with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.revoke_access('test_usr', '%', databases) self._assert_execute_call(expected, mock_execute) def test_list_access(self): user = MagicMock() user.name = "test_user" user.host = "%" user.databases = ['db1', 'db2'] with patch.object(self.mock_client, 'execute'): with patch.object(self.mySqlAdmin, '_get_user', return_value=user): databases = self.mySqlAdmin.list_access('test_usr', '%') self.assertEqual(2, len(databases), "List access queries are not the same") class MySqlAppTest(trove_testtools.TestCase): def setUp(self): super(MySqlAppTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_time_sleep = time.sleep self.orig_time_time = time.time self.orig_unlink = os.unlink self.orig_service_discovery = operating_system.service_discovery mysql_app_patcher = patch.multiple(mysql_common_service.BaseMySqlApp, get_engine=DEFAULT, get_auth_password=DEFAULT, configuration_manager=DEFAULT) self.addCleanup(mysql_app_patcher.stop) mysql_app_patcher.start() self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.mySqlApp = MySqlApp(self.appStatus) mysql_service = {'cmd_start': Mock(), 'cmd_stop': Mock(), 'cmd_enable': Mock(), 'cmd_disable': Mock(), 'cmd_bootstrap_galera_cluster': Mock(), 'bin': Mock()} operating_system.service_discovery = Mock( return_value=mysql_service) time.sleep = Mock() time.time = Mock(side_effect=faketime) os.unlink = Mock() self.mock_client = Mock() self.mock_execute = Mock() self.mock_client.__enter__ = Mock() self.mock_client.__exit__ = Mock() self.mock_client.__enter__.return_value.execute = self.mock_execute self.orig_create_engine = sqlalchemy.create_engine def tearDown(self): mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout time.sleep = self.orig_time_sleep time.time = self.orig_time_time os.unlink = self.orig_unlink operating_system.service_discovery = self.orig_service_discovery InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() sqlalchemy.create_engine = self.orig_create_engine super(MySqlAppTest, self).tearDown() def assert_reported_status(self, expected_status): service_status = InstanceServiceStatus.find_by( instance_id=self.FAKE_ID) self.assertEqual(expected_status, service_status.status) def mysql_starts_successfully(self): def start(update_db=False): self.appStatus.set_next_status( rd_instance.ServiceStatuses.RUNNING) self.mySqlApp.start_mysql.side_effect = start def mysql_starts_unsuccessfully(self): def start(): raise RuntimeError("MySQL failed to start!") self.mySqlApp.start_mysql.side_effect = start def mysql_stops_successfully(self): def stop(): self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) self.mySqlApp.stop_db.side_effect = stop def mysql_stops_unsuccessfully(self): def stop(): raise RuntimeError("MySQL failed to stop!") self.mySqlApp.stop_db.side_effect = stop def test_stop_mysql(self): mysql_common_service.utils.execute_with_timeout = Mock() self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.stop_db() self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_stop_mysql_with_db_update(self): mysql_common_service.utils.execute_with_timeout = Mock() self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.stop_db(True) self.assertTrue(conductor_api.API.heartbeat.called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.SHUTDOWN.description})) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_stop_mysql_do_not_start_on_reboot(self, mock_execute): self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.stop_db(True, True) self.assertTrue(conductor_api.API.heartbeat.called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.SHUTDOWN.description})) self.assertEqual(2, mock_execute.call_count) @patch('trove.guestagent.datastore.service.LOG') @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_stop_mysql_error(self, *args): mysql_common_service.utils.execute_with_timeout = Mock() self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) self.mySqlApp.state_change_wait_time = 1 with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaises(RuntimeError, self.mySqlApp.stop_db) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(operating_system, 'service_discovery', side_effect=KeyError('error')) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_stop_mysql_key_error(self, mock_execute, mock_service, mock_logging): with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.', self.mySqlApp.stop_db) self.assertEqual(0, mock_execute.call_count) def test_restart_is_successful(self): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mysql_stops_successfully() self.mysql_starts_successfully() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.restart() self.assertTrue(self.mySqlApp.stop_db.called) self.assertTrue(self.mySqlApp.start_mysql.called) self.assertTrue(conductor_api.API.heartbeat.called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.RUNNING.description})) def test_restart_mysql_wont_start_up(self): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mysql_stops_unsuccessfully() self.mysql_starts_unsuccessfully() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaises(RuntimeError, self.mySqlApp.restart) self.assertTrue(self.mySqlApp.stop_db.called) self.assertFalse(self.mySqlApp.start_mysql.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(dbaas.MySqlApp, 'get_data_dir', return_value='some path') def test_wipe_ib_logfiles_error(self, get_datadir_mock, mock_logging): mocked = Mock(side_effect=ProcessExecutionError('Error')) mysql_common_service.utils.execute_with_timeout = mocked self.assertRaises(ProcessExecutionError, self.mySqlApp.wipe_ib_logfiles) def test_start_mysql(self): mysql_common_service.utils.execute_with_timeout = Mock() self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) self.mySqlApp._enable_mysql_on_boot = Mock() self.mySqlApp.start_mysql() self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_start_mysql_with_db_update(self): mysql_common_service.utils.execute_with_timeout = Mock() self.mySqlApp._enable_mysql_on_boot = Mock() self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.start_mysql(update_db=True) self.assertTrue(conductor_api.API.heartbeat.called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.RUNNING.description})) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch('trove.guestagent.datastore.service.LOG') def test_start_mysql_runs_forever(self, *args): mysql_common_service.utils.execute_with_timeout = Mock() self.mySqlApp._enable_mysql_on_boot = Mock() self.mySqlApp.state_change_wait_time = 1 self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaises(RuntimeError, self.mySqlApp.start_mysql) self.assertTrue(conductor_api.API.heartbeat.called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.SHUTDOWN.description})) @patch('trove.guestagent.datastore.service.LOG') @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_start_mysql_error(self, *args): self.mySqlApp._enable_mysql_on_boot = Mock() mocked = Mock(side_effect=ProcessExecutionError('Error')) mysql_common_service.utils.execute_with_timeout = mocked with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaises(RuntimeError, self.mySqlApp.start_mysql) def test_start_db_with_conf_changes(self): self.mySqlApp.start_mysql = Mock() self.mysql_starts_successfully() self.appStatus.status = rd_instance.ServiceStatuses.SHUTDOWN with patch.object(self.mySqlApp, '_reset_configuration') as cfg_reset: configuration = 'some junk' self.mySqlApp.start_db_with_conf_changes(configuration) cfg_reset.assert_called_once_with(configuration) self.assertTrue(self.mySqlApp.start_mysql.called) self.assertEqual(rd_instance.ServiceStatuses.RUNNING, self.appStatus._get_actual_db_status()) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_start_db_with_conf_changes_mysql_is_running(self, *args): self.mySqlApp.start_mysql = Mock() self.appStatus.status = rd_instance.ServiceStatuses.RUNNING self.assertRaises(RuntimeError, self.mySqlApp.start_db_with_conf_changes, Mock()) def test_configuration_reset(self): with patch.object(self.mySqlApp, '_reset_configuration') as cfg_reset: configuration = {'config_contents': 'some junk'} self.mySqlApp.reset_configuration(configuration=configuration) cfg_reset.assert_called_once_with('some junk') @patch.object(dbaas.MySqlApp, 'get_auth_password', return_value='some_password') def test_reset_configuration(self, auth_pwd_mock): save_cfg_mock = Mock() save_auth_mock = Mock() wipe_ib_mock = Mock() configuration = {'config_contents': 'some junk'} self.mySqlApp.configuration_manager.save_configuration = save_cfg_mock self.mySqlApp._save_authentication_properties = save_auth_mock self.mySqlApp.wipe_ib_logfiles = wipe_ib_mock self.mySqlApp.reset_configuration(configuration=configuration) save_cfg_mock.assert_called_once_with('some junk') save_auth_mock.assert_called_once_with( auth_pwd_mock.return_value) wipe_ib_mock.assert_called_once_with() @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test__enable_mysql_on_boot(self, mock_execute): mysql_service = \ mysql_common_service.operating_system.service_discovery(["mysql"]) self.mySqlApp._enable_mysql_on_boot() self.assertEqual(1, mock_execute.call_count) mock_execute.assert_called_with(mysql_service['cmd_enable'], shell=True) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(operating_system, 'service_discovery', side_effect=KeyError('error')) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_fail__enable_mysql_on_boot(self, mock_execute, mock_service, mock_logging): self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.', self.mySqlApp._enable_mysql_on_boot) self.assertEqual(0, mock_execute.call_count) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test__disable_mysql_on_boot(self, mock_execute): mysql_service = \ mysql_common_service.operating_system.service_discovery(["mysql"]) self.mySqlApp._disable_mysql_on_boot() self.assertEqual(1, mock_execute.call_count) mock_execute.assert_called_with(mysql_service['cmd_disable'], shell=True) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(operating_system, 'service_discovery', side_effect=KeyError('error')) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_fail__disable_mysql_on_boot(self, mock_execute, mock_service, mock_logging): self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.', self.mySqlApp._disable_mysql_on_boot) self.assertEqual(0, mock_execute.call_count) def test_update_overrides(self): override_value = {'key': 'value'} with patch.object(self.mySqlApp.configuration_manager, 'apply_user_override') as apply_usr_mock: self.mySqlApp.update_overrides(override_value) apply_usr_mock.assert_called_once_with({'mysqld': override_value}) def test_remove_override(self): with patch.object(self.mySqlApp.configuration_manager, 'remove_user_override') as remove_usr_mock: self.mySqlApp.remove_overrides() remove_usr_mock.assert_called_once_with() def test_write_replication_source_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'apply_system_override') as apply_sys_mock: self.mySqlApp.write_replication_source_overrides('something') apply_sys_mock.assert_called_once_with( 'something', mysql_common_service.CNF_MASTER) def test_write_replication_replica_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'apply_system_override') as apply_sys_mock: self.mySqlApp.write_replication_replica_overrides('something') apply_sys_mock.assert_called_once_with( 'something', mysql_common_service.CNF_SLAVE) def test_remove_replication_source_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'remove_system_override') as remove_sys_mock: self.mySqlApp.remove_replication_source_overrides() remove_sys_mock.assert_called_once_with( mysql_common_service.CNF_MASTER) def test_remove_replication_replica_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'remove_system_override') as remove_sys_mock: self.mySqlApp.remove_replication_replica_overrides() remove_sys_mock.assert_called_once_with( mysql_common_service.CNF_SLAVE) def test_exists_replication_source_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'has_system_override', return_value=Mock()) as exists_mock: self.assertEqual( exists_mock.return_value, self.mySqlApp.exists_replication_source_overrides()) @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_grant_replication_privilege(self, *args): replication_user = {'name': 'testUSr', 'password': 'somePwd'} with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.grant_replication_privilege(replication_user) args, _ = self.mock_execute.call_args_list[0] expected = ("GRANT REPLICATION SLAVE ON *.* TO `testUSr`@`%` " "IDENTIFIED BY 'somePwd';") self.assertEqual(expected, args[0].text, "Replication grant statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_get_port(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.get_port() args, _ = self.mock_execute.call_args_list[0] expected = ("SELECT @@port") self.assertEqual(expected, args[0], "Port queries are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_get_binlog_position(self, *args): result = {'File': 'mysql-bin.003', 'Position': '73'} self.mock_execute.return_value.first = Mock(return_value=result) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): found_result = self.mySqlApp.get_binlog_position() self.assertEqual(result['File'], found_result['log_file']) self.assertEqual(result['Position'], found_result['position']) args, _ = self.mock_execute.call_args_list[0] expected = ("SHOW MASTER STATUS") self.assertEqual(expected, args[0], "Master status queries are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_execute_on_client(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.execute_on_client('show tables') args, _ = self.mock_execute.call_args_list[0] expected = ("show tables") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') def test_start_slave(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.start_slave() args, _ = self.mock_execute.call_args_list[0] expected = ("START SLAVE") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') def test_stop_slave_with_failover(self, *args): self.mock_execute.return_value.first = Mock( return_value={'Master_User': 'root'}) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp.stop_slave(True) self.assertEqual('root', result['replication_user']) expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL"] self.assertEqual(len(expected), len(self.mock_execute.call_args_list)) for i in range(len(self.mock_execute.call_args_list)): args, _ = self.mock_execute.call_args_list[i] self.assertEqual(expected[i], args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') def test_stop_slave_without_failover(self, *args): self.mock_execute.return_value.first = Mock( return_value={'Master_User': 'root'}) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp.stop_slave(False) self.assertEqual('root', result['replication_user']) expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL", "DROP USER root"] self.assertEqual(len(expected), len(self.mock_execute.call_args_list)) for i in range(len(self.mock_execute.call_args_list)): args, _ = self.mock_execute.call_args_list[i] self.assertEqual(expected[i], args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_stop_master(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.stop_master() args, _ = self.mock_execute.call_args_list[0] expected = ("RESET MASTER") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test__wait_for_slave_status(self, *args): mock_client = Mock() mock_client.execute = Mock() result = ['Slave_running', 'on'] mock_client.execute.return_value.first = Mock(return_value=result) self.mySqlApp._wait_for_slave_status('ON', mock_client, 5) args, _ = mock_client.execute.call_args_list[0] expected = ("SHOW GLOBAL STATUS like 'slave_running'") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) @patch.object(utils, 'poll_until', side_effect=PollTimeOut) def test_fail__wait_for_slave_status(self, *args): self.assertRaisesRegexp(RuntimeError, "Replication is not on after 5 seconds.", self.mySqlApp._wait_for_slave_status, 'ON', Mock(), 5) @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test__get_slave_status(self, *args): self.mock_execute.return_value.first = Mock(return_value='some_thing') with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp._get_slave_status() self.assertEqual('some_thing', result) args, _ = self.mock_execute.call_args_list[0] expected = ("SHOW SLAVE STATUS") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_get_latest_txn_id(self, *args): self.mock_execute.return_value.first = Mock(return_value=['some_thing'] ) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp.get_latest_txn_id() self.assertEqual('some_thing', result) args, _ = self.mock_execute.call_args_list[0] expected = ("SELECT @@global.gtid_executed") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_wait_for_txn(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.wait_for_txn('abcd') args, _ = self.mock_execute.call_args_list[0] expected = ("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('abcd')") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_get_txn_count(self, *args): self.mock_execute.return_value.first = Mock( return_value=['b1f3f33a-0789-ee1c-43f3-f8373e12f1ea:1']) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp.get_txn_count() self.assertEqual(1, result) args, _ = self.mock_execute.call_args_list[0] expected = ("SELECT @@global.gtid_executed") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.multiple(pkg.Package, pkg_is_installed=Mock(return_value=False), pkg_install=DEFAULT) def test_install(self, pkg_install): self.mySqlApp._install_mysql = Mock() utils.execute_with_timeout = Mock() self.mySqlApp._clear_mysql_config = Mock() self.mySqlApp._create_mysql_confd_dir = Mock() self.mySqlApp.start_mysql = Mock() self.mySqlApp.install_if_needed(["package"]) self.assertTrue(pkg_install.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(operating_system, 'write_file') def test_save_authentication_properties(self, write_file_mock): self.mySqlApp._save_authentication_properties("some_password") write_file_mock.assert_called_once_with( MySqlApp.get_client_auth_file(), {'client': {'host': '127.0.0.1', 'password': 'some_password', 'user': mysql_common_service.ADMIN_USER_NAME}}, codec=MySqlApp.CFG_CODEC) @patch.object(utils, 'generate_random_password', return_value='some_password') @patch.object(mysql_common_service, 'clear_expired_password') def test_secure(self, clear_pwd_mock, auth_pwd_mock): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mySqlApp._reset_configuration = Mock() self.mySqlApp._apply_user_overrides = Mock() self.mysql_stops_successfully() self.mysql_starts_successfully() sqlalchemy.create_engine = Mock() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.secure('contents') self.assertTrue(self.mySqlApp.stop_db.called) self.mySqlApp._reset_configuration.assert_has_calls( [call('contents', auth_pwd_mock.return_value)]) self.assertTrue(self.mySqlApp.start_mysql.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(dbaas, 'get_engine') @patch.object(utils, 'generate_random_password', return_value='some_password') def test_secure_root(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.secure_root() update_root_password, _ = self.mock_execute.call_args_list[0] update_expected = ("UPDATE mysql.user SET Password=" "PASSWORD('some_password') " "WHERE User = 'root' AND Host = 'localhost';") remove_root, _ = self.mock_execute.call_args_list[1] remove_expected = ("DELETE FROM mysql.user WHERE " "User = 'root' AND Host != 'localhost';") self.assertEqual(update_expected, update_root_password[0].text, "Update root password queries are not the same") self.assertEqual(remove_expected, remove_root[0].text, "Remove root queries are not the same") @patch.object(operating_system, 'create_directory') def test__create_mysql_confd_dir(self, mkdir_mock): self.mySqlApp._create_mysql_confd_dir() mkdir_mock.assert_called_once_with('/etc/mysql/conf.d', as_root=True) @patch.object(operating_system, 'move') def test__clear_mysql_config(self, mock_move): self.mySqlApp._clear_mysql_config() self.assertEqual(3, mock_move.call_count) @patch.object(operating_system, 'move', side_effect=ProcessExecutionError) def test_exception__clear_mysql_config(self, mock_move): self.mySqlApp._clear_mysql_config() # call-count needs to be same as normal, # because exception is eaten to make the flow goto next file-move. self.assertEqual(3, mock_move.call_count) @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_apply_overrides(self, *args): overrides = {'sort_buffer_size': 1000000} with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.apply_overrides(overrides) args, _ = self.mock_execute.call_args_list[0] expected = ("SET GLOBAL sort_buffer_size=1000000") self.assertEqual(expected, args[0].text, "Set global statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_make_read_only(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.make_read_only('ON') args, _ = self.mock_execute.call_args_list[0] expected = ("set global read_only = ON") self.assertEqual(expected, args[0].text, "Set read_only statements are not the same") @patch.multiple(pkg.Package, pkg_is_installed=Mock(return_value=False), pkg_install=Mock( side_effect=pkg.PkgPackageStateError("Install error"))) def test_install_install_error(self): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mySqlApp._clear_mysql_config = Mock() self.mySqlApp._create_mysql_confd_dir = Mock() self.assertRaises(pkg.PkgPackageStateError, self.mySqlApp.install_if_needed, ["package"]) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(mysql_common_service, 'clear_expired_password') def test_secure_write_conf_error(self, clear_pwd_mock): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mySqlApp._reset_configuration = Mock( side_effect=IOError("Could not write file")) self.mySqlApp._apply_user_overrides = Mock() self.mysql_stops_successfully() self.mysql_starts_successfully() sqlalchemy.create_engine = Mock() self.assertRaises(IOError, self.mySqlApp.secure, "foo") self.assertTrue(self.mySqlApp.stop_db.called) self.assertFalse(self.mySqlApp.start_mysql.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(dbaas.MySqlApp, '_save_authentication_properties') @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_reset_admin_password(self, mock_engine, mock_save_auth): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp._create_admin_user = Mock() self.mySqlApp.reset_admin_password("newpassword") self.assertEqual(1, self.mySqlApp._create_admin_user.call_count) mock_save_auth.assert_called_once_with("newpassword") class TextClauseMatcher(object): def __init__(self, text): self.text = text def __repr__(self): return "TextClause(%s)" % self.text def __eq__(self, arg): print("Matching %s" % arg.text) return self.text in arg.text class MySqlAppMockTest(trove_testtools.TestCase): def setUp(self): super(MySqlAppMockTest, self).setUp() self.orig_utils_execute_with_timeout = utils.execute_with_timeout create_engine_patcher = patch.object(sqlalchemy, 'create_engine') self.addCleanup(create_engine_patcher.stop) create_engine_patcher.start() self.mock_cli_ctx_mgr = Mock() self.mock_client = MagicMock() self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) self.mock_cli_ctx_mgr.__exit__ = Mock() local_client_patcher = patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_cli_ctx_mgr) self.addCleanup(local_client_patcher.stop) local_client_patcher.start() def tearDown(self): utils.execute_with_timeout = self.orig_utils_execute_with_timeout super(MySqlAppMockTest, self).tearDown() @patch.object(mysql_common_service, 'clear_expired_password') @patch.object(utils, 'generate_random_password', return_value='some_password') def test_secure_keep_root(self, auth_pwd_mock, clear_pwd_mock): with patch.object(self.mock_client, 'execute', return_value=None) as mock_execute: utils.execute_with_timeout = MagicMock(return_value=None) # skip writing the file for now with patch.object(os.path, 'isfile', return_value=False): mock_status = MagicMock() mock_status.wait_for_real_status_to_change_to = MagicMock( return_value=True) app = MySqlApp(mock_status) app._reset_configuration = MagicMock() app.start_mysql = MagicMock(return_value=None) app._wait_for_mysql_to_be_really_alive = MagicMock( return_value=True) app.stop_db = MagicMock(return_value=None) app.secure('foo') reset_config_calls = [call('foo', auth_pwd_mock.return_value)] app._reset_configuration.assert_has_calls(reset_config_calls) self.assertTrue(mock_execute.called) @patch.object(mysql_common_service, 'clear_expired_password') @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', return_value='some_password') def test_secure_with_mycnf_error(self, auth_pwd_mock, clear_pwd_mock): with patch.object(self.mock_client, 'execute', return_value=None) as mock_execute: with patch.object(operating_system, 'service_discovery', return_value={'cmd_stop': 'service mysql stop'}): utils.execute_with_timeout = MagicMock(return_value=None) # skip writing the file for now with patch.object(dbaas.MySqlApp, '_reset_configuration', side_effect=RuntimeError('Error')): mock_status = MagicMock() mock_status.wait_for_real_status_to_change_to = MagicMock( return_value=True) mysql_common_service.clear_expired_password = \ MagicMock(return_value=None) app = MySqlApp(mock_status) mysql_common_service.clear_expired_password = \ MagicMock(return_value=None) self.assertRaises(RuntimeError, app.secure, None) self.assertTrue(mock_execute.called) # At least called twice self.assertTrue(mock_execute.call_count >= 2) (mock_status.wait_for_real_status_to_change_to. assert_called_with(rd_instance.ServiceStatuses.SHUTDOWN, app.state_change_wait_time, False)) class MySqlRootStatusTest(trove_testtools.TestCase): def setUp(self): super(MySqlRootStatusTest, self).setUp() self.orig_utils_execute_with_timeout = utils.execute_with_timeout create_engine_patcher = patch.object(sqlalchemy, 'create_engine') self.addCleanup(create_engine_patcher.stop) create_engine_patcher.start() mysql_app_patcher = patch.multiple(MySqlApp, get_engine=DEFAULT, configuration_manager=DEFAULT) self.addCleanup(mysql_app_patcher.stop) mysql_app_patcher.start() self.mock_cli_ctx_mgr = Mock() self.mock_client = MagicMock() self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) self.mock_cli_ctx_mgr.__exit__ = Mock() local_client_patcher = patch.object(dbaas.MySqlRootAccess, 'local_sql_client', return_value=self.mock_cli_ctx_mgr) self.addCleanup(local_client_patcher.stop) local_client_patcher.start() def tearDown(self): utils.execute_with_timeout = self.orig_utils_execute_with_timeout super(MySqlRootStatusTest, self).tearDown() @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', return_value='some_password') def test_root_is_enabled(self, auth_pwd_mock): mock_rs = MagicMock() mock_rs.rowcount = 1 with patch.object(self.mock_client, 'execute', return_value=mock_rs): self.assertTrue(MySqlRootAccess().is_root_enabled()) @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', return_value='some_password') def test_root_is_not_enabled(self, auth_pwd_mock): mock_rs = MagicMock() mock_rs.rowcount = 0 with patch.object(self.mock_client, 'execute', return_value=mock_rs): self.assertFalse(MySqlRootAccess().is_root_enabled()) @patch.object(mysql_common_service, 'clear_expired_password') @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', return_value='some_password') def test_enable_root(self, auth_pwd_mock, clear_pwd_mock): with patch.object(self.mock_client, 'execute', return_value=None) as mock_execute: # invocation user_ser = MySqlRootAccess().enable_root() # verification self.assertIsNotNone(user_ser) mock_execute.assert_any_call(TextClauseMatcher('CREATE USER'), user='root', host='%') mock_execute.assert_any_call(TextClauseMatcher( 'GRANT ALL PRIVILEGES ON *.*')) mock_execute.assert_any_call(TextClauseMatcher( 'UPDATE mysql.user')) def test_root_disable(self): with patch.object(self.mock_client, 'execute', return_value=None) as mock_execute: # invocation MySqlRootAccess().disable_root() # verification mock_execute.assert_any_call(TextClauseMatcher( sql_query.REMOVE_ROOT)) class MockStats: f_blocks = 1024 ** 2 f_bsize = 4096 f_bfree = 512 * 1024 class InterrogatorTest(trove_testtools.TestCase): def tearDown(self): super(InterrogatorTest, self).tearDown() def test_to_gb(self): result = to_gb(123456789) self.assertEqual(0.11, result) def test_to_gb_small(self): result = to_gb(2) self.assertEqual(0.01, result) def test_to_gb_zero(self): result = to_gb(0) self.assertEqual(0.0, result) def test_to_mb(self): result = to_mb(123456789) self.assertEqual(117.74, result) def test_to_mb_small(self): result = to_mb(2) self.assertEqual(0.01, result) def test_to_mb_zero(self): result = to_mb(0) self.assertEqual(0.0, result) def test_get_filesystem_volume_stats(self): with patch.object(os, 'statvfs', return_value=MockStats): result = get_filesystem_volume_stats('/some/path/') self.assertEqual(4096, result['block_size']) self.assertEqual(1048576, result['total_blocks']) self.assertEqual(524288, result['free_blocks']) self.assertEqual(4.0, result['total']) self.assertEqual(2147483648, result['free']) self.assertEqual(2.0, result['used']) @patch('trove.guestagent.dbaas.LOG') def test_get_filesystem_volume_stats_error(self, *args): with patch.object(os, 'statvfs', side_effect=OSError): self.assertRaises( RuntimeError, get_filesystem_volume_stats, '/nonexistent/path') class ServiceRegistryTest(trove_testtools.TestCase): def setUp(self): super(ServiceRegistryTest, self).setUp() def tearDown(self): super(ServiceRegistryTest, self).tearDown() def test_datastore_registry_with_extra_manager(self): datastore_registry_ext_test = { 'test': 'trove.guestagent.datastore.test.manager.Manager', } with patch.object(dbaas_sr, 'get_custom_managers', return_value=datastore_registry_ext_test): test_dict = dbaas_sr.datastore_registry() self.assertEqual(datastore_registry_ext_test.get('test', None), test_dict.get('test')) self.assertEqual('trove.guestagent.datastore.mysql.' 'manager.Manager', test_dict.get('mysql')) self.assertEqual('trove.guestagent.datastore.experimental.' 'percona.manager.Manager', test_dict.get('percona')) self.assertEqual('trove.guestagent.datastore.experimental.redis.' 'manager.Manager', test_dict.get('redis')) self.assertEqual('trove.guestagent.datastore.experimental.' 'cassandra.manager.Manager', test_dict.get('cassandra')) self.assertEqual('trove.guestagent.datastore.experimental.' 'couchbase.manager.Manager', test_dict.get('couchbase')) self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' 'manager.Manager', test_dict.get('mongodb')) self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' 'manager.Manager', test_dict.get('couchdb')) self.assertEqual('trove.guestagent.datastore.experimental.db2.' 'manager.Manager', test_dict.get('db2')) def test_datastore_registry_with_existing_manager(self): datastore_registry_ext_test = { 'mysql': 'trove.guestagent.datastore.mysql.' 'manager.Manager123', } with patch.object(dbaas_sr, 'get_custom_managers', return_value=datastore_registry_ext_test): test_dict = dbaas_sr.datastore_registry() self.assertEqual('trove.guestagent.datastore.mysql.' 'manager.Manager123', test_dict.get('mysql')) self.assertEqual('trove.guestagent.datastore.experimental.' 'percona.manager.Manager', test_dict.get('percona')) self.assertEqual('trove.guestagent.datastore.experimental.redis.' 'manager.Manager', test_dict.get('redis')) self.assertEqual('trove.guestagent.datastore.experimental.' 'cassandra.manager.Manager', test_dict.get('cassandra')) self.assertEqual('trove.guestagent.datastore.experimental.' 'couchbase.manager.Manager', test_dict.get('couchbase')) self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' 'manager.Manager', test_dict.get('mongodb')) self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' 'manager.Manager', test_dict.get('couchdb')) self.assertEqual('trove.guestagent.datastore.experimental.vertica.' 'manager.Manager', test_dict.get('vertica')) self.assertEqual('trove.guestagent.datastore.experimental.db2.' 'manager.Manager', test_dict.get('db2')) self.assertEqual('trove.guestagent.datastore.experimental.mariadb.' 'manager.Manager', test_dict.get('mariadb')) def test_datastore_registry_with_blank_dict(self): datastore_registry_ext_test = dict() with patch.object(dbaas_sr, 'get_custom_managers', return_value=datastore_registry_ext_test): test_dict = dbaas_sr.datastore_registry() self.assertEqual('trove.guestagent.datastore.mysql.' 'manager.Manager', test_dict.get('mysql')) self.assertEqual('trove.guestagent.datastore.experimental.' 'percona.manager.Manager', test_dict.get('percona')) self.assertEqual('trove.guestagent.datastore.experimental.redis.' 'manager.Manager', test_dict.get('redis')) self.assertEqual('trove.guestagent.datastore.experimental.' 'cassandra.manager.Manager', test_dict.get('cassandra')) self.assertEqual('trove.guestagent.datastore.experimental.' 'couchbase.manager.Manager', test_dict.get('couchbase')) self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' 'manager.Manager', test_dict.get('mongodb')) self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' 'manager.Manager', test_dict.get('couchdb')) self.assertEqual('trove.guestagent.datastore.experimental.vertica.' 'manager.Manager', test_dict.get('vertica')) self.assertEqual('trove.guestagent.datastore.experimental.db2.' 'manager.Manager', test_dict.get('db2')) self.assertEqual('trove.guestagent.datastore.experimental.mariadb.' 'manager.Manager', test_dict.get('mariadb')) class KeepAliveConnectionTest(trove_testtools.TestCase): class OperationalError(Exception): def __init__(self, value): self.args = [value] def __str__(self): return repr(self.value) def setUp(self): super(KeepAliveConnectionTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_LOG_err = dbaas.LOG def tearDown(self): super(KeepAliveConnectionTest, self).tearDown() mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout dbaas.LOG = self.orig_LOG_err def test_checkout_type_error(self): dbapi_con = Mock() dbapi_con.ping = Mock(side_effect=TypeError("Type Error")) self.keepAliveConn = KeepAliveConnection() self.assertRaises(TypeError, self.keepAliveConn.checkout, dbapi_con, Mock(), Mock()) def test_checkout_disconnection_error(self): dbapi_con = Mock() dbapi_con.OperationalError = self.OperationalError dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(2013)) self.keepAliveConn = KeepAliveConnection() self.assertRaises(sqlalchemy.exc.DisconnectionError, self.keepAliveConn.checkout, dbapi_con, Mock(), Mock()) def test_checkout_operation_error(self): dbapi_con = Mock() dbapi_con.OperationalError = self.OperationalError dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(1234)) self.keepAliveConn = KeepAliveConnection() self.assertRaises(self.OperationalError, self.keepAliveConn.checkout, dbapi_con, Mock(), Mock()) class BaseDbStatusTest(trove_testtools.TestCase): def setUp(self): super(BaseDbStatusTest, self).setUp() util.init_db() self.orig_dbaas_time_sleep = time.sleep self.orig_time_time = time.time self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) dbaas.CONF.guest_id = self.FAKE_ID patcher_log = patch.object(base_datastore_service, 'LOG') patcher_context = patch.object(trove_context, 'TroveContext') patcher_api = patch.object(conductor_api, 'API') patcher_log.start() patcher_context.start() patcher_api.start() self.addCleanup(patcher_log.stop) self.addCleanup(patcher_context.stop) self.addCleanup(patcher_api.stop) def tearDown(self): time.sleep = self.orig_dbaas_time_sleep time.time = self.orig_time_time InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() dbaas.CONF.guest_id = None super(BaseDbStatusTest, self).tearDown() @patch.object(operating_system, 'write_file') def test_begin_install(self, mock_write_file): base_db_status = BaseDbStatus() base_db_status.begin_install() self.assertEqual(rd_instance.ServiceStatuses.BUILDING, base_db_status.status) def test_begin_restart(self): base_db_status = BaseDbStatus() base_db_status.restart_mode = False base_db_status.begin_restart() self.assertTrue(base_db_status.restart_mode) def test_end_restart(self): base_db_status = BaseDbStatus() base_db_status._get_actual_db_status = Mock( return_value=rd_instance.ServiceStatuses.SHUTDOWN) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) base_db_status.end_restart() self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, base_db_status.status) self.assertFalse(base_db_status.restart_mode) def test_is_installed(self): base_db_status = BaseDbStatus() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertTrue(base_db_status.is_installed) def test_is_installed_failed(self): base_db_status = BaseDbStatus() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=False) self.assertFalse(base_db_status.is_installed) def test_is_restarting(self): base_db_status = BaseDbStatus() base_db_status.restart_mode = True self.assertTrue(base_db_status._is_restarting) def test_is_running(self): base_db_status = BaseDbStatus() base_db_status.status = rd_instance.ServiceStatuses.RUNNING self.assertTrue(base_db_status.is_running) def test_is_running_not(self): base_db_status = BaseDbStatus() base_db_status.status = rd_instance.ServiceStatuses.SHUTDOWN self.assertFalse(base_db_status.is_running) def test_wait_for_real_status_to_change_to(self): base_db_status = BaseDbStatus() base_db_status._get_actual_db_status = Mock( return_value=rd_instance.ServiceStatuses.RUNNING) time.sleep = Mock() time.time = Mock(side_effect=faketime) self.assertTrue(base_db_status. wait_for_real_status_to_change_to (rd_instance.ServiceStatuses.RUNNING, 10)) def test_wait_for_real_status_to_change_to_timeout(self): base_db_status = BaseDbStatus() base_db_status._get_actual_db_status = Mock( return_value=rd_instance.ServiceStatuses.RUNNING) time.sleep = Mock() time.time = Mock(side_effect=faketime) self.assertFalse(base_db_status. wait_for_real_status_to_change_to (rd_instance.ServiceStatuses.SHUTDOWN, 10)) def _test_set_status(self, initial_status, new_status, expected_status, install_done=False, force=False): base_db_status = BaseDbStatus() base_db_status.status = initial_status with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=install_done) base_db_status.set_status(new_status, force=force) self.assertEqual(expected_status, base_db_status.status) def test_set_status_force_heartbeat(self): self._test_set_status(rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.RUNNING, rd_instance.ServiceStatuses.RUNNING, force=True) def test_set_status_skip_heartbeat_with_building(self): self._test_set_status(rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.RUNNING, rd_instance.ServiceStatuses.BUILDING) def test_set_status_skip_heartbeat_with_new(self): self._test_set_status(rd_instance.ServiceStatuses.NEW, rd_instance.ServiceStatuses.RUNNING, rd_instance.ServiceStatuses.NEW) def test_set_status_to_failed(self): self._test_set_status(rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.FAILED, rd_instance.ServiceStatuses.FAILED, force=True) def test_set_status_to_build_pending(self): self._test_set_status(rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.INSTANCE_READY, rd_instance.ServiceStatuses.INSTANCE_READY, force=True) def test_set_status_to_shutdown(self): self._test_set_status(rd_instance.ServiceStatuses.RUNNING, rd_instance.ServiceStatuses.SHUTDOWN, rd_instance.ServiceStatuses.SHUTDOWN, install_done=True) def test_wait_for_database_service_status(self): status = BaseDbStatus() expected_status = rd_instance.ServiceStatuses.RUNNING timeout = 10 update_db = False # Test a successful call. with patch.multiple( status, wait_for_real_status_to_change_to=Mock(return_value=True), cleanup_stalled_db_services=DEFAULT): self.assertTrue( status._wait_for_database_service_status( expected_status, timeout, update_db)) status.wait_for_real_status_to_change_to.assert_called_once_with( expected_status, timeout, update_db) self.assertFalse(status.cleanup_stalled_db_services.called) # Test a failing call. with patch.multiple( status, wait_for_real_status_to_change_to=Mock(return_value=False), cleanup_stalled_db_services=DEFAULT): self.assertFalse( status._wait_for_database_service_status( expected_status, timeout, update_db)) status.wait_for_real_status_to_change_to.assert_called_once_with( expected_status, timeout, update_db) status.cleanup_stalled_db_services.assert_called_once_with() # Test a failing call with an error raised from the cleanup code. # No exception should propagate out of the cleanup block. with patch.multiple( status, wait_for_real_status_to_change_to=Mock(return_value=False), cleanup_stalled_db_services=Mock( side_effect=Exception("Error in cleanup."))): self.assertFalse( status._wait_for_database_service_status( expected_status, timeout, update_db)) status.wait_for_real_status_to_change_to.assert_called_once_with( expected_status, timeout, update_db) status.cleanup_stalled_db_services.assert_called_once_with() def test_start_db_service(self): status = BaseDbStatus() service_candidates = ['name1', 'name2'] # Test a successful call with setting auto-start enabled. with patch.object( status, '_wait_for_database_service_status', return_value=True) as service_call: with patch.multiple(operating_system, start_service=DEFAULT, enable_service_on_boot=DEFAULT) as os_cmd: status.start_db_service( service_candidates, 10, enable_on_boot=True) service_call.assert_called_once_with( rd_instance.ServiceStatuses.RUNNING, 10, False) os_cmd['start_service'].assert_called_once_with( service_candidates) os_cmd['enable_service_on_boot'].assert_called_once_with( service_candidates) # Test a successful call without auto-start. with patch.object( status, '_wait_for_database_service_status', return_value=True) as service_call: with patch.multiple(operating_system, start_service=DEFAULT, enable_service_on_boot=DEFAULT) as os_cmd: status.start_db_service( service_candidates, 10, enable_on_boot=False) service_call.assert_called_once_with( rd_instance.ServiceStatuses.RUNNING, 10, False) os_cmd['start_service'].assert_called_once_with( service_candidates) self.assertFalse(os_cmd['enable_service_on_boot'].called) # Test a failing call. # The auto-start setting should not get updated if the service call # fails. with patch.object( status, '_wait_for_database_service_status', return_value=False) as service_call: with patch.multiple(operating_system, start_service=DEFAULT, enable_service_on_boot=DEFAULT) as os_cmd: self.assertRaisesRegexp( RuntimeError, "Database failed to start.", status.start_db_service, service_candidates, 10, enable_on_boot=True) os_cmd['start_service'].assert_called_once_with( service_candidates) self.assertFalse(os_cmd['enable_service_on_boot'].called) def test_stop_db_service(self): status = BaseDbStatus() service_candidates = ['name1', 'name2'] # Test a successful call with setting auto-start disabled. with patch.object( status, '_wait_for_database_service_status', return_value=True) as service_call: with patch.multiple(operating_system, stop_service=DEFAULT, disable_service_on_boot=DEFAULT) as os_cmd: status.stop_db_service( service_candidates, 10, disable_on_boot=True) service_call.assert_called_once_with( rd_instance.ServiceStatuses.SHUTDOWN, 10, False) os_cmd['stop_service'].assert_called_once_with( service_candidates) os_cmd['disable_service_on_boot'].assert_called_once_with( service_candidates) # Test a successful call without auto-start. with patch.object( status, '_wait_for_database_service_status', return_value=True) as service_call: with patch.multiple(operating_system, stop_service=DEFAULT, disable_service_on_boot=DEFAULT) as os_cmd: status.stop_db_service( service_candidates, 10, disable_on_boot=False) service_call.assert_called_once_with( rd_instance.ServiceStatuses.SHUTDOWN, 10, False) os_cmd['stop_service'].assert_called_once_with( service_candidates) self.assertFalse(os_cmd['disable_service_on_boot'].called) # Test a failing call. # The auto-start setting should not get updated if the service call # fails. with patch.object( status, '_wait_for_database_service_status', return_value=False) as service_call: with patch.multiple(operating_system, stop_service=DEFAULT, disable_service_on_boot=DEFAULT) as os_cmd: self.assertRaisesRegexp( RuntimeError, "Database failed to stop.", status.stop_db_service, service_candidates, 10, disable_on_boot=True) os_cmd['stop_service'].assert_called_once_with( service_candidates) self.assertFalse(os_cmd['disable_service_on_boot'].called) def test_restart_db_service(self): status = BaseDbStatus() service_candidates = ['name1', 'name2'] # Test the restart flow (stop followed by start). # Assert that the auto-start setting does not get changed and the # Trove instance status updates are suppressed during restart. with patch.multiple( status, start_db_service=DEFAULT, stop_db_service=DEFAULT, begin_restart=DEFAULT, end_restart=DEFAULT): status.restart_db_service(service_candidates, 10) status.begin_restart.assert_called_once_with() status.stop_db_service.assert_called_once_with( service_candidates, 10, disable_on_boot=False, update_db=False) status.start_db_service.assert_called_once_with( service_candidates, 10, enable_on_boot=False, update_db=False) status.end_restart.assert_called_once_with() # Test a failing call. # Assert the status heartbeat gets re-enabled. with patch.multiple( status, start_db_service=Mock( side_effect=Exception("Error in database start.")), stop_db_service=DEFAULT, begin_restart=DEFAULT, end_restart=DEFAULT): self.assertRaisesRegexp( RuntimeError, "Database restart failed.", status.restart_db_service, service_candidates, 10) status.begin_restart.assert_called_once_with() status.end_restart.assert_called_once_with() class MySqlAppStatusTest(trove_testtools.TestCase): def setUp(self): super(MySqlAppStatusTest, self).setUp() util.init_db() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_load_mysqld_options = \ mysql_common_service.load_mysqld_options self.orig_mysql_common_service_os_path_exists = \ mysql_common_service.os.path.exists self.orig_dbaas_time_sleep = time.sleep self.orig_time_time = time.time self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) dbaas.CONF.guest_id = self.FAKE_ID def tearDown(self): mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout mysql_common_service.load_mysqld_options = \ self.orig_load_mysqld_options mysql_common_service.os.path.exists = \ self.orig_mysql_common_service_os_path_exists time.sleep = self.orig_dbaas_time_sleep time.time = self.orig_time_time InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() dbaas.CONF.guest_id = None super(MySqlAppStatusTest, self).tearDown() def test_get_actual_db_status(self): mysql_common_service.utils.execute_with_timeout = \ Mock(return_value=(None, None)) self.mySqlAppStatus = MySqlAppStatus.get() status = self.mySqlAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status) @patch.object(utils, 'execute_with_timeout', side_effect=ProcessExecutionError()) @patch.object(os.path, 'exists', return_value=True) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_get_actual_db_status_error_crashed(self, mock_logging, mock_exists, mock_execute): mysql_common_service.load_mysqld_options = Mock(return_value={}) self.mySqlAppStatus = MySqlAppStatus.get() status = self.mySqlAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_get_actual_db_status_error_shutdown(self, *args): mocked = Mock(side_effect=ProcessExecutionError()) mysql_common_service.utils.execute_with_timeout = mocked mysql_common_service.load_mysqld_options = Mock(return_value={}) mysql_common_service.os.path.exists = Mock(return_value=False) self.mySqlAppStatus = MySqlAppStatus.get() status = self.mySqlAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_get_actual_db_status_error_blocked(self, *args): mysql_common_service.utils.execute_with_timeout = MagicMock( side_effect=[ProcessExecutionError(), ("some output", None)]) mysql_common_service.load_mysqld_options = Mock() mysql_common_service.os.path.exists = Mock(return_value=True) self.mySqlAppStatus = MySqlAppStatus.get() status = self.mySqlAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.BLOCKED, status) class TestRedisApp(BaseAppTest.AppTestCase): def setUp(self): super(TestRedisApp, self).setUp(str(uuid4())) self.orig_os_path_eu = os.path.expanduser os.path.expanduser = Mock(return_value='/tmp/.file') with patch.object(RedisApp, '_build_admin_client'): with patch.object(ImportOverrideStrategy, '_initialize_import_directory'): self.redis = RedisApp(state_change_wait_time=0) self.redis.status = FakeAppStatus( self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.orig_os_path_isfile = os.path.isfile self.orig_utils_execute_with_timeout = utils.execute_with_timeout utils.execute_with_timeout = Mock() @property def app(self): return self.redis @property def appStatus(self): return self.redis.status @property def expected_state_change_timeout(self): return self.redis.state_change_wait_time @property def expected_service_candidates(self): return RedisSystem.SERVICE_CANDIDATES def tearDown(self): os.path.isfile = self.orig_os_path_isfile os.path.expanduser = self.orig_os_path_eu utils.execute_with_timeout = self.orig_utils_execute_with_timeout super(TestRedisApp, self).tearDown() def test_install_if_needed_installed(self): with patch.object(pkg.Package, 'pkg_is_installed', return_value=True): with patch.object(RedisApp, '_install_redis', return_value=None): self.app.install_if_needed('bar') pkg.Package.pkg_is_installed.assert_any_call('bar') self.assertEqual(0, RedisApp._install_redis.call_count) def test_install_if_needed_not_installed(self): with patch.object(pkg.Package, 'pkg_is_installed', return_value=False): with patch.object(RedisApp, '_install_redis', return_value=None): self.app.install_if_needed('asdf') pkg.Package.pkg_is_installed.assert_any_call('asdf') RedisApp._install_redis.assert_any_call('asdf') def test_install_redis(self): with patch.object(utils, 'execute_with_timeout', return_value=('0', '')): with patch.object(pkg.Package, 'pkg_install', return_value=None): with patch.object(RedisApp, 'start_db', return_value=None): self.app._install_redis('redis') pkg.Package.pkg_install.assert_any_call('redis', {}, 1200) RedisApp.start_db.assert_any_call() self.assertTrue(utils.execute_with_timeout.called) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_service_cleanup(self, exec_mock): rservice.RedisAppStatus(Mock()).cleanup_stalled_db_services() exec_mock.assert_called_once_with('pkill', '-9', 'redis-server', run_as_root=True, root_helper='sudo') class CassandraDBAppTest(BaseAppTest.AppTestCase): @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def setUp(self, mock_logging, _): super(CassandraDBAppTest, self).setUp(str(uuid4())) self.sleep = time.sleep self.orig_time_time = time.time self.pkg_version = cass_service.packager.pkg_version self.pkg = cass_service.packager util.init_db() self.cassandra = cass_service.CassandraApp() self.cassandra.status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.orig_unlink = os.unlink @property def app(self): return self.cassandra @property def appStatus(self): return self.cassandra.status @property def expected_state_change_timeout(self): return self.cassandra.state_change_wait_time @property def expected_service_candidates(self): return self.cassandra.service_candidates def tearDown(self): time.sleep = self.sleep time.time = self.orig_time_time cass_service.packager.pkg_version = self.pkg_version cass_service.packager = self.pkg super(CassandraDBAppTest, self).tearDown() def assert_reported_status(self, expected_status): service_status = InstanceServiceStatus.find_by( instance_id=self.FAKE_ID) self.assertEqual(expected_status, service_status.status) @patch.object(utils, 'execute_with_timeout') def test_service_cleanup(self, exec_mock): cass_service.CassandraAppStatus(Mock()).cleanup_stalled_db_services() exec_mock.assert_called_once_with(self.cassandra.CASSANDRA_KILL_CMD, shell=True) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_install(self, _): self.cassandra._install_db = Mock() self.pkg.pkg_is_installed = Mock(return_value=False) self.cassandra.install_if_needed(['cassandra']) self.assertTrue(self.cassandra._install_db.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_install_install_error(self, _): self.cassandra.start_db = Mock() self.cassandra.stop_db = Mock() self.pkg.pkg_is_installed = Mock(return_value=False) self.cassandra._install_db = Mock( side_effect=pkg.PkgPackageStateError("Install error")) self.assertRaises(pkg.PkgPackageStateError, self.cassandra.install_if_needed, ['cassandra=1.2.10']) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class CouchbaseAppTest(BaseAppTest.AppTestCase): def fake_couchbase_service_discovery(self, candidates): return { 'cmd_start': 'start', 'cmd_stop': 'stop', 'cmd_enable': 'enable', 'cmd_disable': 'disable' } def setUp(self): super(CouchbaseAppTest, self).setUp(str(uuid4())) self.orig_utils_execute_with_timeout = ( couchservice.utils.execute_with_timeout) self.orig_time_sleep = time.sleep self.orig_time_time = time.time time.sleep = Mock() time.time = Mock(side_effect=faketime) self.orig_service_discovery = operating_system.service_discovery self.orig_get_ip = netutils.get_my_ipv4 operating_system.service_discovery = ( self.fake_couchbase_service_discovery) netutils.get_my_ipv4 = Mock() status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.couchbaseApp = couchservice.CouchbaseApp(status) dbaas.CONF.guest_id = self.FAKE_ID @property def app(self): return self.couchbaseApp @property def appStatus(self): return self.couchbaseApp.status @property def expected_state_change_timeout(self): return self.couchbaseApp.state_change_wait_time @property def expected_service_candidates(self): return couchservice.system.SERVICE_CANDIDATES @patch.object(utils, 'execute_with_timeout') def test_service_cleanup(self, exec_mock): couchservice.CouchbaseAppStatus().cleanup_stalled_db_services() exec_mock.assert_called_once_with(couchservice.system.cmd_kill) def tearDown(self): couchservice.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) netutils.get_my_ipv4 = self.orig_get_ip operating_system.service_discovery = self.orig_service_discovery time.sleep = self.orig_time_sleep time.time = self.orig_time_time dbaas.CONF.guest_id = None super(CouchbaseAppTest, self).tearDown() def test_install_when_couchbase_installed(self): couchservice.packager.pkg_is_installed = Mock(return_value=True) couchservice.utils.execute_with_timeout = Mock() self.couchbaseApp.install_if_needed(["package"]) self.assertTrue(couchservice.packager.pkg_is_installed.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class CouchDBAppTest(BaseAppTest.AppTestCase): def fake_couchdb_service_discovery(self, candidates): return { 'cmd_start': 'start', 'cmd_stop': 'stop', 'cmd_enable': 'enable', 'cmd_disable': 'disable' } def setUp(self): super(CouchDBAppTest, self).setUp(str(uuid4())) self.orig_utils_execute_with_timeout = ( couchdb_service.utils.execute_with_timeout) self.orig_time_sleep = time.sleep self.orig_time_time = time.time time.sleep = Mock() time.time = Mock(side_effect=faketime) self.orig_service_discovery = operating_system.service_discovery self.orig_get_ip = netutils.get_my_ipv4 operating_system.service_discovery = ( self.fake_couchdb_service_discovery) netutils.get_my_ipv4 = Mock() util.init_db() status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.couchdbApp = couchdb_service.CouchDBApp(status) dbaas.CONF.guest_id = self.FAKE_ID @property def app(self): return self.couchdbApp @property def appStatus(self): return self.couchdbApp.status @property def expected_state_change_timeout(self): return self.couchdbApp.state_change_wait_time @property def expected_service_candidates(self): return couchdb_service.system.SERVICE_CANDIDATES def tearDown(self): couchdb_service.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) netutils.get_my_ipv4 = self.orig_get_ip operating_system.service_discovery = self.orig_service_discovery time.sleep = self.orig_time_sleep time.time = self.orig_time_time dbaas.CONF.guest_id = None super(CouchDBAppTest, self).tearDown() def test_install_when_couchdb_installed(self): couchdb_service.packager.pkg_is_installed = Mock(return_value=True) couchdb_service.utils.execute_with_timeout = Mock() self.couchdbApp.install_if_needed(["package"]) self.assertTrue(couchdb_service.packager.pkg_is_installed.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class MongoDBAppTest(BaseAppTest.AppTestCase): def fake_mongodb_service_discovery(self, candidates): return { 'cmd_start': 'start', 'cmd_stop': 'stop', 'cmd_enable': 'enable', 'cmd_disable': 'disable' } @patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, _): super(MongoDBAppTest, self).setUp(str(uuid4())) self.orig_utils_execute_with_timeout = (mongo_service. utils.execute_with_timeout) self.orig_time_sleep = time.sleep self.orig_time_time = time.time self.orig_packager = mongo_system.PACKAGER self.orig_service_discovery = operating_system.service_discovery self.orig_os_unlink = os.unlink self.orig_os_path_eu = os.path.expanduser os.path.expanduser = Mock(return_value='/tmp/.file') operating_system.service_discovery = ( self.fake_mongodb_service_discovery) util.init_db() self.mongoDbApp = mongo_service.MongoDBApp() self.mongoDbApp.status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) time.sleep = Mock() time.time = Mock(side_effect=faketime) os.unlink = Mock() @property def app(self): return self.mongoDbApp @property def appStatus(self): return self.mongoDbApp.status @property def expected_state_change_timeout(self): return self.mongoDbApp.state_change_wait_time @property def expected_service_candidates(self): return mongo_system.MONGOD_SERVICE_CANDIDATES @patch.object(utils, 'execute_with_timeout') def test_service_cleanup(self, exec_mock): self.appStatus.cleanup_stalled_db_services() # def cleanup_stalled_db_services(self): # out, err = utils.execute_with_timeout(system.FIND_PID, shell=True) # pid = "".join(out.split(" ")[1:2]) # utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True) def tearDown(self): mongo_service.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) time.sleep = self.orig_time_sleep time.time = self.orig_time_time mongo_system.PACKAGER = self.orig_packager operating_system.service_discovery = self.orig_service_discovery os.unlink = self.orig_os_unlink os.path.expanduser = self.orig_os_path_eu super(MongoDBAppTest, self).tearDown() def test_start_db_with_conf_changes_db_is_running(self): self.mongoDbApp.start_db = Mock() self.mongoDbApp.status.status = rd_instance.ServiceStatuses.RUNNING self.assertRaises(RuntimeError, self.mongoDbApp.start_db_with_conf_changes, Mock()) def test_install_when_db_installed(self): packager_mock = MagicMock() packager_mock.pkg_is_installed = MagicMock(return_value=True) mongo_system.PACKAGER = packager_mock self.mongoDbApp.install_if_needed(['package']) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_install_when_db_not_installed(self): packager_mock = MagicMock() packager_mock.pkg_is_installed = MagicMock(return_value=False) mongo_system.PACKAGER = packager_mock self.mongoDbApp.install_if_needed(['package']) packager_mock.pkg_install.assert_any_call(ANY, {}, ANY) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class VerticaAppStatusTest(trove_testtools.TestCase): def setUp(self): super(VerticaAppStatusTest, self).setUp() util.init_db() self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) def tearDown(self): super(VerticaAppStatusTest, self).tearDown() InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() def test_get_actual_db_status(self): self.verticaAppStatus = VerticaAppStatus() with patch.object(vertica_system, 'shell_execute', MagicMock(return_value=['db_srvr', None])): status = self.verticaAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status) def test_get_actual_db_status_shutdown(self): self.verticaAppStatus = VerticaAppStatus() with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', None], ['db_srvr', None]])): status = self.verticaAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_get_actual_db_status_error_crashed(self, *args): self.verticaAppStatus = VerticaAppStatus() with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=ProcessExecutionError('problem' ))): status = self.verticaAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status) class VerticaAppTest(trove_testtools.TestCase): @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def setUp(self, *args, **kwargs): super(VerticaAppTest, self).setUp() self.FAKE_ID = 1000 self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.app = VerticaApp(self.appStatus) self.setread = VolumeDevice.set_readahead_size self.Popen = subprocess.Popen vertica_system_patcher = patch.multiple( vertica_system, shell_execute=MagicMock(return_value=('', '')), exec_vsql_command=MagicMock(return_value=('', ''))) self.addCleanup(vertica_system_patcher.stop) vertica_system_patcher.start() VolumeDevice.set_readahead_size = Mock() subprocess.Popen = Mock() self.test_config = ConfigParser.ConfigParser() self.test_config.add_section('credentials') self.test_config.set('credentials', 'dbadmin_password', 'some_password') def tearDown(self): self.app = None VolumeDevice.set_readahead_size = self.setread subprocess.Popen = self.Popen super(VerticaAppTest, self).tearDown() def test_enable_root_is_root_not_enabled(self): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.app, 'is_root_enabled', return_value=False): with patch.object(vertica_system, 'exec_vsql_command', MagicMock(side_effect=[['', ''], ['', ''], ['', '']])): self.app.enable_root('root_password') create_user_arguments = ( vertica_system.exec_vsql_command.call_args_list[0]) expected_create_user_cmd = ( vertica_system.CREATE_USER % ('root', 'root_password')) create_user_arguments.assert_called_with( 'some_password', expected_create_user_cmd) grant_role_arguments = ( vertica_system.exec_vsql_command.call_args_list[1]) expected_grant_role_cmd = ( vertica_system.GRANT_TO_USER % ('pseudosuperuser', 'root')) grant_role_arguments.assert_called_with( 'some_password', expected_grant_role_cmd) enable_user_arguments = ( vertica_system.exec_vsql_command.call_args_list[2]) expected_enable_user_cmd = ( vertica_system.ENABLE_FOR_USER % ('root', 'pseudosuperuser' )) enable_user_arguments.assert_called_with( 'some_password', expected_enable_user_cmd) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_enable_root_is_root_not_enabled_failed(self, *args): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.app, 'is_root_enabled', return_value=False): with patch.object(vertica_system, 'exec_vsql_command', MagicMock(side_effect=[ ['', vertica_system.VSqlError( 'ERROR 123: Test' )]])): self.assertRaises(RuntimeError, self.app.enable_root, 'root_password') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_enable_root_is_root_enabled(self, *args): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.app, 'is_root_enabled', return_value=True): with patch.object(vertica_system, 'exec_vsql_command', MagicMock(side_effect=[['', '']])): self.app.enable_root('root_password') alter_user_password_arguments = ( vertica_system.exec_vsql_command.call_args_list[0]) expected_alter_user_cmd = ( vertica_system.ALTER_USER_PASSWORD % ('root', 'root_password' )) alter_user_password_arguments.assert_called_with( 'some_password', expected_alter_user_cmd) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_enable_root_is_root_enabled_failed(self, *arg): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.app, 'is_root_enabled', return_value=True): with patch.object(vertica_system, 'exec_vsql_command', MagicMock(side_effect=[ ['', vertica_system.VSqlError( 'ERROR 123: Test' )]])): self.assertRaises(RuntimeError, self.app.enable_root, 'root_password') def test_is_root_enable(self): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', '']])): self.app.is_root_enabled() user_exists_args = ( vertica_system.shell_execute.call_args_list[0]) expected_user_exists_cmd = vertica_system.USER_EXISTS % ( 'some_password', 'root') user_exists_args.assert_called_with(expected_user_exists_cmd, 'dbadmin') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_is_root_enable_failed(self, *args): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[ ['', ProcessExecutionError]])): self.assertRaises(RuntimeError, self.app.is_root_enabled) def test_install_if_needed_installed(self): with patch.object(pkg.Package, 'pkg_is_installed', return_value=True): with patch.object(pkg.Package, 'pkg_install', return_value=None): self.app.install_if_needed('vertica') pkg.Package.pkg_is_installed.assert_any_call('vertica') self.assertEqual(0, pkg.Package.pkg_install.call_count) def test_install_if_needed_not_installed(self): with patch.object(pkg.Package, 'pkg_is_installed', return_value=False): with patch.object(pkg.Package, 'pkg_install', return_value=None): self.app.install_if_needed('vertica') pkg.Package.pkg_is_installed.assert_any_call('vertica') self.assertEqual(1, pkg.Package.pkg_install.call_count) def test_prepare_for_install_vertica(self): self.app.prepare_for_install_vertica() arguments = vertica_system.shell_execute.call_args_list[0] self.assertEqual(1, VolumeDevice.set_readahead_size.call_count) expected_command = ( "VERT_DBA_USR=dbadmin VERT_DBA_HOME=/home/dbadmin " "VERT_DBA_GRP=verticadba /opt/vertica/oss/python/bin/python" " -m vertica.local_coerce") arguments.assert_called_with(expected_command) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure_prepare_for_install_vertica(self, *args): with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('Error')): self.assertRaises(ProcessExecutionError, self.app.prepare_for_install_vertica) def test_install_vertica(self): with patch.object(self.app, 'write_config', return_value=None): self.app.install_vertica(members='10.0.0.2') arguments = vertica_system.shell_execute.call_args_list[0] expected_command = ( vertica_system.INSTALL_VERTICA % ('10.0.0.2', '/var/lib/vertica')) arguments.assert_called_with(expected_command) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure_install_vertica(self, *args): with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('some exception')): self.assertRaisesRegexp(RuntimeError, 'install_vertica failed.', self.app.install_vertica, members='10.0.0.2') def test_create_db(self): with patch.object(self.app, 'read_config', return_value=self.test_config): self.app.create_db(members='10.0.0.2') arguments = vertica_system.shell_execute.call_args_list[0] expected_command = (vertica_system.CREATE_DB % ('10.0.0.2', 'db_srvr', '/var/lib/vertica', '/var/lib/vertica', 'some_password')) arguments.assert_called_with(expected_command, 'dbadmin') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure_create_db(self, *args): with patch.object(self.app, 'read_config', side_effect=RuntimeError('Error')): self.assertRaisesRegexp(RuntimeError, 'Vertica database create failed.', self.app.create_db) # Because of an exception in read_config there was no shell execution. self.assertEqual(0, vertica_system.shell_execute.call_count) def test_vertica_write_config(self): temp_file_handle = tempfile.NamedTemporaryFile(delete=False) mock_mkstemp = MagicMock(return_value=(temp_file_handle)) mock_unlink = Mock(return_value=0) self.app.write_config(config=self.test_config, temp_function=mock_mkstemp, unlink_function=mock_unlink) arguments = vertica_system.shell_execute.call_args_list[0] expected_command = ( ("install -o root -g root -m 644 %(source)s %(target)s" ) % {'source': temp_file_handle.name, 'target': vertica_system.VERTICA_CONF}) arguments.assert_called_with(expected_command) self.assertEqual(1, mock_mkstemp.call_count) configuration_data = ConfigParser.ConfigParser() configuration_data.read(temp_file_handle.name) self.assertEqual( self.test_config.get('credentials', 'dbadmin_password'), configuration_data.get('credentials', 'dbadmin_password')) self.assertEqual(1, mock_unlink.call_count) # delete the temporary_config_file os.unlink(temp_file_handle.name) def test_vertica_error_in_write_config_verify_unlink(self): mock_unlink = Mock(return_value=0) temp_file_handle = tempfile.NamedTemporaryFile(delete=False) mock_mkstemp = MagicMock(return_value=temp_file_handle) with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('some exception')): self.assertRaises(ProcessExecutionError, self.app.write_config, config=self.test_config, temp_function=mock_mkstemp, unlink_function=mock_unlink) self.assertEqual(1, mock_unlink.call_count) # delete the temporary_config_file os.unlink(temp_file_handle.name) @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def test_restart(self, *args, **kwargs): mock_status = MagicMock() app = VerticaApp(mock_status) mock_status.begin_restart = MagicMock(return_value=None) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) with patch.object(VerticaApp, 'stop_db', return_value=None): with patch.object(VerticaApp, 'start_db', return_value=None): mock_status.end_restart = MagicMock( return_value=None) app.restart() mock_status.begin_restart.assert_any_call() VerticaApp.stop_db.assert_any_call() VerticaApp.start_db.assert_any_call() @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def test_start_db(self, *args, **kwargs): mock_status = MagicMock() type(mock_status)._is_restarting = PropertyMock(return_value=False) app = VerticaApp(mock_status) with patch.object(app, '_enable_db_on_boot', return_value=None): with patch.object(app, 'read_config', return_value=self.test_config): mock_status.end_restart = MagicMock( return_value=None) app.start_db() agent_start, db_start = subprocess.Popen.call_args_list agent_expected_command = [ 'sudo', 'su', '-', 'root', '-c', (vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'start')] db_expected_cmd = [ 'sudo', 'su', '-', 'dbadmin', '-c', (vertica_system.START_DB % ('db_srvr', 'some_password'))] self.assertTrue(mock_status.end_restart.called) agent_start.assert_called_with(agent_expected_command) db_start.assert_called_with(db_expected_cmd) def test_start_db_failure(self): with patch.object(self.app, '_enable_db_on_boot', side_effect=RuntimeError()): with patch.object(self.app, 'read_config', return_value=self.test_config): self.assertRaises(RuntimeError, self.app.start_db) def test_stop_db(self): type(self.appStatus)._is_restarting = PropertyMock(return_value=False) with patch.object(self.app, '_disable_db_on_boot', return_value=None): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', ''], ['db_srvr', None], ['', '']])): self.appStatus.wait_for_real_status_to_change_to = \ MagicMock(return_value=True) self.appStatus.end_restart = MagicMock( return_value=None) self.app.stop_db() self.assertEqual( 3, vertica_system.shell_execute.call_count) # There are 3 shell-executions: # a) stop vertica-agent service # b) check daatabase status # c) stop_db # We are matcing that 3rd command called was stop_db arguments = vertica_system.shell_execute.call_args_list[2] expected_cmd = (vertica_system.STOP_DB % ('db_srvr', 'some_password')) self.assertTrue(self.appStatus. wait_for_real_status_to_change_to.called) arguments.assert_called_with(expected_cmd, 'dbadmin') def test_stop_db_do_not_start_on_reboot(self): type(self.appStatus)._is_restarting = PropertyMock(return_value=True) with patch.object(self.app, '_disable_db_on_boot', return_value=None): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', ''], ['db_srvr', None], ['', '']])): self.app.stop_db(do_not_start_on_reboot=True) self.assertEqual( 3, vertica_system.shell_execute.call_count) self.app._disable_db_on_boot.assert_any_call() def test_stop_db_database_not_running(self): with patch.object(self.app, '_disable_db_on_boot', return_value=None): with patch.object(self.app, 'read_config', return_value=self.test_config): self.app.stop_db() # Since database stop command does not gets executed, # so only 2 shell calls were there. self.assertEqual( 2, vertica_system.shell_execute.call_count) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_stop_db_failure(self, *args): type(self.appStatus)._is_restarting = PropertyMock(return_value=False) with patch.object(self.app, '_disable_db_on_boot', return_value=None): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', ''], ['db_srvr', None], ['', '']])): self.appStatus.wait_for_real_status_to_change_to = \ MagicMock(return_value=None) self.appStatus.end_restart = MagicMock( return_value=None) self.assertRaises(RuntimeError, self.app.stop_db) def test_export_conf_to_members(self): self.app._export_conf_to_members(members=['member1', 'member2']) self.assertEqual(2, vertica_system.shell_execute.call_count) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_fail__export_conf_to_members(self, *args): # app = VerticaApp(MagicMock()) with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('Error')): self.assertRaises(ProcessExecutionError, self.app._export_conf_to_members, ['member1', 'member2']) def test_authorize_public_keys(self): user = 'test_user' keys = ['test_key@machine1', 'test_key@machine2'] with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): self.app.authorize_public_keys(user=user, public_keys=keys) self.assertEqual(2, vertica_system.shell_execute.call_count) vertica_system.shell_execute.assert_any_call( 'cat ' + '/home/' + user + '/.ssh/authorized_keys') def test_authorize_public_keys_authorized_file_not_exists(self): user = 'test_user' keys = ['test_key@machine1', 'test_key@machine2'] with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): with patch.object( vertica_system, 'shell_execute', MagicMock(side_effect=[ProcessExecutionError('Some Error'), ['', '']])): self.app.authorize_public_keys(user=user, public_keys=keys) self.assertEqual(2, vertica_system.shell_execute.call_count) vertica_system.shell_execute.assert_any_call( 'cat ' + '/home/' + user + '/.ssh/authorized_keys') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_fail_authorize_public_keys(self, *args): user = 'test_user' keys = ['test_key@machine1', 'test_key@machine2'] with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): with patch.object( vertica_system, 'shell_execute', MagicMock(side_effect=[ProcessExecutionError('Some Error'), ProcessExecutionError('Some Error') ])): self.assertRaises(ProcessExecutionError, self.app.authorize_public_keys, user, keys) def test_get_public_keys(self): user = 'test_user' with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): self.app.get_public_keys(user=user) self.assertEqual(2, vertica_system.shell_execute.call_count) vertica_system.shell_execute.assert_any_call( (vertica_system.SSH_KEY_GEN % ('/home/' + user)), user) vertica_system.shell_execute.assert_any_call( 'cat ' + '/home/' + user + '/.ssh/id_rsa.pub') def test_get_public_keys_if_key_exists(self): user = 'test_user' with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): with patch.object( vertica_system, 'shell_execute', MagicMock(side_effect=[ProcessExecutionError('Some Error'), ['some_key', None]])): key = self.app.get_public_keys(user=user) self.assertEqual(2, vertica_system.shell_execute.call_count) self.assertEqual('some_key', key) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_fail_get_public_keys(self, *args): user = 'test_user' with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): with patch.object( vertica_system, 'shell_execute', MagicMock(side_effect=[ProcessExecutionError('Some Error'), ProcessExecutionError('Some Error') ])): self.assertRaises(ProcessExecutionError, self.app.get_public_keys, user) def test_install_cluster(self): with patch.object(self.app, 'read_config', return_value=self.test_config): self.app.install_cluster(members=['member1', 'member2']) # Verifying the number of shell calls, # as command has already been tested in preceding tests self.assertEqual(5, vertica_system.shell_execute.call_count) def test__enable_db_on_boot(self): self.app._enable_db_on_boot() restart_policy, agent_enable = subprocess.Popen.call_args_list expected_restart_policy = [ 'sudo', 'su', '-', 'dbadmin', '-c', (vertica_system.SET_RESTART_POLICY % ('db_srvr', 'always'))] expected_agent_enable = [ 'sudo', 'su', '-', 'root', '-c', (vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'enable')] self.assertEqual(2, subprocess.Popen.call_count) restart_policy.assert_called_with(expected_restart_policy) agent_enable.assert_called_with(expected_agent_enable) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure__enable_db_on_boot(self, *args): with patch.object(subprocess, 'Popen', side_effect=OSError): self.assertRaisesRegexp(RuntimeError, 'Could not enable db on boot.', self.app._enable_db_on_boot) def test__disable_db_on_boot(self): self.app._disable_db_on_boot() restart_policy, agent_disable = ( vertica_system.shell_execute.call_args_list) expected_restart_policy = ( vertica_system.SET_RESTART_POLICY % ('db_srvr', 'never')) expected_agent_disable = ( vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'disable') self.assertEqual(2, vertica_system.shell_execute.call_count) restart_policy.assert_called_with(expected_restart_policy, 'dbadmin') agent_disable.assert_called_with(expected_agent_disable, 'root') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure__disable_db_on_boot(self, *args): with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('Error')): self.assertRaisesRegexp(RuntimeError, 'Could not disable db on boot.', self.app._disable_db_on_boot) def test_read_config(self): with patch.object(ConfigParser, 'ConfigParser', return_value=self.test_config): test_config = self.app.read_config() self.assertEqual('some_password', test_config.get('credentials', 'dbadmin_password') ) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_fail_read_config(self, *args): with patch.object(ConfigParser.ConfigParser, 'read', side_effect=ConfigParser.Error()): self.assertRaises(RuntimeError, self.app.read_config) @patch.object(ConfigurationManager, 'save_configuration') def test_start_db_with_conf_changes(self, save_cfg): type(self.appStatus)._is_restarting = PropertyMock(return_value=False) type(self.appStatus).is_running = PropertyMock(return_value=False) with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.appStatus, 'end_restart') as end_restart: config = 'tst_cfg_contents' self.app.start_db_with_conf_changes(config) save_cfg.assert_called_once_with(config) end_restart.assert_any_call() class DB2AppTest(trove_testtools.TestCase): def setUp(self): super(DB2AppTest, self).setUp() self.orig_utils_execute_with_timeout = ( db2service.utils.execute_with_timeout) util.init_db() self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.db2App = db2service.DB2App(self.appStatus) dbaas.CONF.guest_id = self.FAKE_ID def tearDown(self): db2service.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() dbaas.CONF.guest_id = None self.db2App = None super(DB2AppTest, self).tearDown() def assert_reported_status(self, expected_status): service_status = InstanceServiceStatus.find_by( instance_id=self.FAKE_ID) self.assertEqual(expected_status, service_status.status) def test_stop_db(self): db2service.utils.execute_with_timeout = MagicMock(return_value=None) self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN) self.db2App.stop_db() self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_restart_server(self): self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) mock_status = MagicMock(return_value=None) app = db2service.DB2App(mock_status) mock_status.begin_restart = MagicMock(return_value=None) app.stop_db = MagicMock(return_value=None) app.start_db = MagicMock(return_value=None) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) app.restart() self.assertTrue(mock_status.begin_restart.called) self.assertTrue(app.stop_db.called) self.assertTrue(app.start_db.called) def test_start_db(self): db2service.utils.execute_with_timeout = MagicMock(return_value=None) self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) with patch.object(self.db2App, '_enable_db_on_boot', return_value=None): self.db2App.start_db() self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class DB2AdminTest(trove_testtools.TestCase): def setUp(self): super(DB2AdminTest, self).setUp() self.db2Admin = db2service.DB2Admin() self.orig_utils_execute_with_timeout = ( db2service.utils.execute_with_timeout) def tearDown(self): db2service.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) super(DB2AdminTest, self).tearDown() @patch('trove.guestagent.datastore.experimental.db2.service.LOG') def test_delete_database(self, *args): with patch.object( db2service, 'run_command', MagicMock( return_value=None, side_effect=ProcessExecutionError('Error'))): self.assertRaises(GuestError, self.db2Admin.delete_database, FAKE_DB) self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 drop database testDB" self.assertEqual(expected, args[0], "Delete database queries are not the same") @patch('trove.guestagent.datastore.experimental.db2.service.LOG') def test_list_databases(self, *args): with patch.object(db2service, 'run_command', MagicMock( side_effect=ProcessExecutionError('Error'))): self.db2Admin.list_databases() self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 list database directory " \ "| grep -B6 -i indirect | grep 'Database name' | " \ "sed 's/.*= //'" self.assertEqual(expected, args[0], "Delete database queries are not the same") def test_create_users(self): with patch.object(db2service, 'run_command', MagicMock( return_value=None)): db2service.utils.execute_with_timeout = MagicMock( return_value=None) self.db2Admin.create_user(FAKE_USER) self.assertTrue(db2service.utils.execute_with_timeout.called) self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 connect to testDB; " \ "db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \ "ON DATABASE TO USER random; db2 connect reset" self.assertEqual( expected, args[0], "Granting database access queries are not the same") self.assertEqual(1, db2service.run_command.call_count) def test_delete_users_with_db(self): with patch.object(db2service, 'run_command', MagicMock(return_value=None)): with patch.object(db2service.DB2Admin, 'list_access', MagicMock(return_value=None)): utils.execute_with_timeout = MagicMock(return_value=None) self.db2Admin.delete_user(FAKE_USER[0]) self.assertTrue(db2service.run_command.called) self.assertTrue(db2service.utils.execute_with_timeout.called) self.assertFalse(db2service.DB2Admin.list_access.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 connect to testDB; " \ "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \ "ON DATABASE FROM USER random; db2 connect reset" self.assertEqual( expected, args[0], "Revoke database access queries are not the same") self.assertEqual(1, db2service.run_command.call_count) def test_delete_users_without_db(self): FAKE_USER.append( {"_name": "random2", "_password": "guesswhat", "_databases": []}) with patch.object(db2service, 'run_command', MagicMock(return_value=None)): with patch.object(db2service.DB2Admin, 'list_access', MagicMock(return_value=[FAKE_DB])): utils.execute_with_timeout = MagicMock(return_value=None) self.db2Admin.delete_user(FAKE_USER[1]) self.assertTrue(db2service.run_command.called) self.assertTrue(db2service.DB2Admin.list_access.called) self.assertTrue( db2service.utils.execute_with_timeout.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 connect to testDB; " \ "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \ "DATAACCESS ON DATABASE FROM USER random2; " \ "db2 connect reset" self.assertEqual( expected, args[0], "Revoke database access queries are not the same") self.assertEqual(1, db2service.run_command.call_count) def test_list_users(self): databases = [] databases.append(FAKE_DB) with patch.object(db2service, 'run_command', MagicMock( side_effect=ProcessExecutionError('Error'))): with patch.object(self.db2Admin, "list_databases", MagicMock(return_value=(databases, None))): self.db2Admin.list_users() self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 +o connect to testDB; " \ "db2 -x select grantee, dataaccessauth " \ "from sysibm.sysdbauth; db2 connect reset" self.assertEqual(expected, args[0], "List database queries are not the same") def test_get_user(self): databases = [] databases.append(FAKE_DB) with patch.object(db2service, 'run_command', MagicMock( side_effect=ProcessExecutionError('Error'))): with patch.object(self.db2Admin, "list_databases", MagicMock(return_value=(databases, None))): self.db2Admin._get_user('random', None) self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 +o connect to testDB; " \ "db2 -x select grantee, dataaccessauth " \ "from sysibm.sysdbauth; db2 connect reset" self.assertEqual(args[0], expected, "Delete database queries are not the same") class PXCAppTest(trove_testtools.TestCase): def setUp(self): super(PXCAppTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_time_sleep = time.sleep self.orig_time_time = time.time self.orig_unlink = os.unlink self.orig_get_auth_password = \ mysql_common_service.BaseMySqlApp.get_auth_password self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.PXCApp = pxc_service.PXCApp(self.appStatus) mysql_service = patch.object( pxc_service.PXCApp, 'mysql_service', PropertyMock(return_value={ 'cmd_start': Mock(), 'cmd_stop': Mock(), 'cmd_enable': Mock(), 'cmd_disable': Mock(), 'cmd_bootstrap_galera_cluster': Mock(), 'bin': Mock() })) mysql_service.start() self.addCleanup(mysql_service.stop) time.sleep = Mock() time.time = Mock(side_effect=faketime) os.unlink = Mock() mysql_common_service.BaseMySqlApp.get_auth_password = Mock() self.mock_client = Mock() self.mock_execute = Mock() self.mock_client.__enter__ = Mock() self.mock_client.__exit__ = Mock() self.mock_client.__enter__.return_value.execute = self.mock_execute self.orig_configuration_manager = \ mysql_common_service.BaseMySqlApp.configuration_manager mysql_common_service.BaseMySqlApp.configuration_manager = Mock() self.orig_create_engine = sqlalchemy.create_engine def tearDown(self): self.PXCApp = None mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout time.sleep = self.orig_time_sleep time.time = self.orig_time_time os.unlink = self.orig_unlink mysql_common_service.BaseMySqlApp.get_auth_password = \ self.orig_get_auth_password InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() mysql_common_service.BaseMySqlApp.configuration_manager = \ self.orig_configuration_manager sqlalchemy.create_engine = self.orig_create_engine super(PXCAppTest, self).tearDown() @patch.object(pxc_service.PXCApp, 'get_engine', return_value=MagicMock(name='get_engine')) def test__grant_cluster_replication_privilege(self, mock_engine): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } with patch.object(pxc_service.PXCApp, 'local_sql_client', return_value=self.mock_client): self.PXCApp._grant_cluster_replication_privilege(repl_user) args, _ = self.mock_execute.call_args_list[0] expected = ("GRANT LOCK TABLES, RELOAD, REPLICATION CLIENT ON *.* " "TO `test-user`@`%` IDENTIFIED BY 'test-user-password';") self.assertEqual(expected, args[0].text, "Sql statements are not the same") @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test__bootstrap_cluster(self, mock_execute): pxc_service_cmds = self.PXCApp.mysql_service self.PXCApp._bootstrap_cluster(timeout=20) self.assertEqual(1, mock_execute.call_count) mock_execute.assert_called_with( pxc_service_cmds['cmd_bootstrap_galera_cluster'], shell=True, timeout=20) def test_install_cluster(self): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } apply_mock = Mock() self.PXCApp.configuration_manager.apply_system_override = apply_mock self.PXCApp.stop_db = Mock() self.PXCApp._grant_cluster_replication_privilege = Mock() self.PXCApp.wipe_ib_logfiles = Mock() self.PXCApp.start_mysql = Mock() self.PXCApp.install_cluster(repl_user, "something") self.assertEqual(1, self.PXCApp.stop_db.call_count) self.assertEqual( 1, self.PXCApp._grant_cluster_replication_privilege.call_count) self.assertEqual(1, apply_mock.call_count) self.assertEqual(1, self.PXCApp.wipe_ib_logfiles.call_count) self.assertEqual(1, self.PXCApp.start_mysql.call_count) def test_install_cluster_with_bootstrap(self): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } apply_mock = Mock() self.PXCApp.configuration_manager.apply_system_override = apply_mock self.PXCApp.stop_db = Mock() self.PXCApp._grant_cluster_replication_privilege = Mock() self.PXCApp.wipe_ib_logfiles = Mock() self.PXCApp._bootstrap_cluster = Mock() self.PXCApp.install_cluster(repl_user, "something", bootstrap=True) self.assertEqual(1, self.PXCApp.stop_db.call_count) self.assertEqual( 1, self.PXCApp._grant_cluster_replication_privilege.call_count) self.assertEqual(1, self.PXCApp.wipe_ib_logfiles.call_count) self.assertEqual(1, apply_mock.call_count) self.assertEqual(1, self.PXCApp._bootstrap_cluster.call_count) class MariaDBAppTest(trove_testtools.TestCase): def setUp(self): super(MariaDBAppTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_time_sleep = time.sleep self.orig_time_time = time.time self.orig_unlink = os.unlink self.orig_get_auth_password = \ mysql_common_service.BaseMySqlApp.get_auth_password self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.MariaDBApp = mariadb_service.MariaDBApp(self.appStatus) mysql_service = patch.object( mariadb_service.MariaDBApp, 'mysql_service', PropertyMock(return_value={ 'cmd_start': Mock(), 'cmd_stop': Mock(), 'cmd_enable': Mock(), 'cmd_disable': Mock(), 'cmd_bootstrap_galera_cluster': Mock(), 'bin': Mock() })) mysql_service.start() self.addCleanup(mysql_service.stop) time.sleep = Mock() time.time = Mock(side_effect=faketime) os.unlink = Mock() mysql_common_service.BaseMySqlApp.get_auth_password = Mock() self.mock_client = Mock() self.mock_execute = Mock() self.mock_client.__enter__ = Mock() self.mock_client.__exit__ = Mock() self.mock_client.__enter__.return_value.execute = self.mock_execute self.orig_configuration_manager = \ mysql_common_service.BaseMySqlApp.configuration_manager mysql_common_service.BaseMySqlApp.configuration_manager = Mock() self.orig_create_engine = sqlalchemy.create_engine def tearDown(self): self.MariaDBApp = None mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout time.sleep = self.orig_time_sleep time.time = self.orig_time_time os.unlink = self.orig_unlink mysql_common_service.BaseMySqlApp.get_auth_password = \ self.orig_get_auth_password InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() mysql_common_service.BaseMySqlApp.configuration_manager = \ self.orig_configuration_manager sqlalchemy.create_engine = self.orig_create_engine super(MariaDBAppTest, self).tearDown() @patch.object(mariadb_service.MariaDBApp, 'get_engine', return_value=MagicMock(name='get_engine')) def test__grant_cluster_replication_privilege(self, mock_engine): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } with patch.object(mariadb_service.MariaDBApp, 'local_sql_client', return_value=self.mock_client): self.MariaDBApp._grant_cluster_replication_privilege(repl_user) args, _ = self.mock_execute.call_args_list[0] expected = ("GRANT LOCK TABLES, RELOAD, REPLICATION CLIENT ON *.* " "TO `test-user`@`%` IDENTIFIED BY 'test-user-password';") self.assertEqual(expected, args[0].text, "Sql statements are not the same") @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test__bootstrap_cluster(self, mock_execute): mariadb_service_cmds = self.MariaDBApp.mysql_service self.MariaDBApp._bootstrap_cluster(timeout=20) self.assertEqual(1, mock_execute.call_count) mock_execute.assert_called_with( mariadb_service_cmds['cmd_bootstrap_galera_cluster'], shell=True, timeout=20) def test_install_cluster(self): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } apply_mock = Mock() self.MariaDBApp.configuration_manager.apply_system_override = \ apply_mock self.MariaDBApp.stop_db = Mock() self.MariaDBApp._grant_cluster_replication_privilege = Mock() self.MariaDBApp.wipe_ib_logfiles = Mock() self.MariaDBApp.start_mysql = Mock() self.MariaDBApp.install_cluster(repl_user, "something") self.assertEqual(1, self.MariaDBApp.stop_db.call_count) self.assertEqual( 1, self.MariaDBApp._grant_cluster_replication_privilege.call_count) self.assertEqual(1, apply_mock.call_count) self.assertEqual(1, self.MariaDBApp.wipe_ib_logfiles.call_count) self.assertEqual(1, self.MariaDBApp.start_mysql.call_count) def test_install_cluster_with_bootstrap(self): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } apply_mock = Mock() self.MariaDBApp.configuration_manager.apply_system_override = \ apply_mock self.MariaDBApp.stop_db = Mock() self.MariaDBApp._grant_cluster_replication_privilege = Mock() self.MariaDBApp.wipe_ib_logfiles = Mock() self.MariaDBApp._bootstrap_cluster = Mock() self.MariaDBApp.install_cluster(repl_user, "something", bootstrap=True) self.assertEqual(1, self.MariaDBApp.stop_db.call_count) self.assertEqual( 1, self.MariaDBApp._grant_cluster_replication_privilege.call_count) self.assertEqual(1, self.MariaDBApp.wipe_ib_logfiles.call_count) self.assertEqual(1, apply_mock.call_count) self.assertEqual(1, self.MariaDBApp._bootstrap_cluster.call_count) class PostgresAppTest(BaseAppTest.AppTestCase): class FakePostgresApp(pg_manager.Manager): """Postgresql design is currently different than other datastores. It does not have an App class, only the Manager, so we fake one. The fake App just passes the calls onto the Postgres manager. """ def restart(self): super(PostgresAppTest.FakePostgresApp, self).restart(Mock()) def start_db(self): super(PostgresAppTest.FakePostgresApp, self).start_db(Mock()) def stop_db(self): super(PostgresAppTest.FakePostgresApp, self).stop_db(Mock()) @patch.object(pg_config.PgSqlConfig, '_find_config_file', return_value='') def setUp(self, _): super(PostgresAppTest, self).setUp(str(uuid4())) self.orig_time_sleep = time.sleep self.orig_time_time = time.time time.sleep = Mock() time.time = Mock(side_effect=faketime) status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.pg_status_patcher = patch.object(pg_status.PgSqlAppStatus, 'get', return_value=status) self.addCleanup(self.pg_status_patcher.stop) self.pg_status_patcher.start() self.postgres = PostgresAppTest.FakePostgresApp() @property def app(self): return self.postgres @property def appStatus(self): return self.postgres.status @property def expected_state_change_timeout(self): return CONF.state_change_wait_time @property def expected_service_candidates(self): return self.postgres.SERVICE_CANDIDATES def tearDown(self): time.sleep = self.orig_time_sleep time.time = self.orig_time_time super(PostgresAppTest, self).tearDown() trove-5.0.0/trove/tests/unittests/guestagent/test_configuration.py0000664000567000056710000004707612701410316027016 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import getpass from mock import call from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch import os import tempfile from trove.common.stream_codecs import IniCodec from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.tests.unittests import trove_testtools class TestConfigurationManager(trove_testtools.TestCase): @patch.multiple('trove.guestagent.common.operating_system', read_file=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def test_read_write_configuration(self, read_file, write_file, chown, chmod): sample_path = Mock() sample_owner = Mock() sample_group = Mock() sample_codec = MagicMock() sample_requires_root = Mock() sample_strategy = MagicMock() sample_strategy.configure = Mock() sample_strategy.parse_updates = Mock(return_value={}) manager = ConfigurationManager( sample_path, sample_owner, sample_group, sample_codec, requires_root=sample_requires_root, override_strategy=sample_strategy) manager.parse_configuration() read_file.assert_called_with(sample_path, codec=sample_codec, as_root=sample_requires_root) with patch.object(manager, 'parse_configuration', return_value={'key1': 'v1', 'key2': 'v2'}): self.assertEqual('v1', manager.get_value('key1')) self.assertIsNone(manager.get_value('key3')) sample_contents = Mock() manager.save_configuration(sample_contents) write_file.assert_called_with( sample_path, sample_contents, as_root=sample_requires_root) chown.assert_called_with(sample_path, sample_owner, sample_group, as_root=sample_requires_root) chmod.assert_called_with( sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root) sample_data = {} manager.apply_system_override(sample_data) manager.apply_user_override(sample_data) manager.apply_system_override(sample_data, change_id='sys1') manager.apply_user_override(sample_data, change_id='usr1') sample_strategy.apply.has_calls([ call(manager.SYSTEM_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.SYSTEM_GROUP, 'sys1', sample_data), call(manager.USER_GROUP, 'usr1', sample_data) ]) class TestConfigurationOverrideStrategy(trove_testtools.TestCase): def setUp(self): trove_testtools.TestCase.setUp(self) self._temp_files_paths = [] self.chmod_patch = patch.object( operating_system, 'chmod', MagicMock(return_value=None)) self.chmod_patch_mock = self.chmod_patch.start() self.addCleanup(self.chmod_patch.stop) def tearDown(self): trove_testtools.TestCase.tearDown(self) # Remove temporary files in the LIFO order. while self._temp_files_paths: try: os.remove(self._temp_files_paths.pop()) except Exception: pass # Do not fail in cleanup. def _create_temp_dir(self): path = tempfile.mkdtemp() self._temp_files_paths.append(path) return path def test_import_override_strategy(self): # Data structures representing overrides. # ('change id', 'values', 'expected import index', # 'expected final import data') # Distinct IDs within each group mean that there is one file for each # override. user_overrides_v1 = ('id1', {'Section_1': {'name': 'sqrt(2)', 'value': '1.4142'}}, 1, {'Section_1': {'name': 'sqrt(2)', 'value': '1.4142'}} ) user_overrides_v2 = ('id2', {'Section_1': {'is_number': 'False'}}, 2, {'Section_1': {'is_number': 'False'}} ) system_overrides_v1 = ('id1', {'Section_1': {'name': 'e', 'value': '2.7183'}}, 1, {'Section_1': {'name': 'e', 'value': '2.7183'}} ) system_overrides_v2 = ('id2', {'Section_2': {'is_number': 'True'}}, 2, {'Section_2': {'is_number': 'True'}} ) self._test_import_override_strategy( [system_overrides_v1, system_overrides_v2], [user_overrides_v1, user_overrides_v2], True) # Same IDs within a group mean that the overrides get written into a # single file. user_overrides_v1 = ('id1', {'Section_1': {'name': 'sqrt(2)', 'value': '1.4142'}}, 1, {'Section_1': {'name': 'sqrt(2)', 'is_number': 'False', 'value': '1.4142'}} ) user_overrides_v2 = ('id1', {'Section_1': {'is_number': 'False'}}, 1, {'Section_1': {'name': 'sqrt(2)', 'is_number': 'False', 'value': '1.4142'}} ) system_overrides_v1 = ('id1', {'Section_1': {'name': 'e', 'value': '2.7183'}}, 1, {'Section_1': {'name': 'e', 'value': '2.7183'}, 'Section_2': {'is_number': 'True'}} ) system_overrides_v2 = ('id1', {'Section_2': {'is_number': 'True'}}, 1, {'Section_1': {'name': 'e', 'value': '2.7183'}, 'Section_2': {'is_number': 'True'}} ) self._test_import_override_strategy( [system_overrides_v1, system_overrides_v2], [user_overrides_v1, user_overrides_v2], False) @patch.multiple(operating_system, chmod=Mock(), chown=Mock()) def _test_import_override_strategy( self, system_overrides, user_overrides, test_multi_rev): base_config_contents = {'Section_1': {'name': 'pi', 'is_number': 'True', 'value': '3.1415'} } codec = IniCodec() current_user = getpass.getuser() revision_dir = self._create_temp_dir() with tempfile.NamedTemporaryFile() as base_config: # Write initial config contents. operating_system.write_file( base_config.name, base_config_contents, codec) strategy = ImportOverrideStrategy(revision_dir, 'ext') strategy.configure( base_config.name, current_user, current_user, codec, False) self._assert_import_override_strategy( strategy, system_overrides, user_overrides, test_multi_rev) def _assert_import_override_strategy( self, strategy, system_overrides, user_overrides, test_multi_rev): def import_path_builder( root, group_name, change_id, file_index, file_ext): return os.path.join( root, '%s-%03d-%s.%s' % (group_name, file_index, change_id, file_ext)) # Apply and remove overrides sequentially. ########################################## # Apply the overrides and verify the files as they are created. self._apply_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._apply_import_overrides( strategy, 'user', user_overrides, import_path_builder) # Verify the files again after applying all overrides. self._assert_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._assert_import_overrides( strategy, 'user', user_overrides, import_path_builder) # Remove the overrides and verify the files are gone. self._remove_import_overrides( strategy, 'user', user_overrides, import_path_builder) self._remove_import_overrides( strategy, 'system', user_overrides, import_path_builder) # Remove a whole group. ########################################## # Apply overrides first. self._apply_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._apply_import_overrides( strategy, 'user', user_overrides, import_path_builder) # Remove all user overrides and verify the files are gone. self._remove_import_overrides( strategy, 'user', None, import_path_builder) # Assert that the system files are still there intact. self._assert_import_overrides( strategy, 'system', system_overrides, import_path_builder) # Remove all system overrides and verify the files are gone. self._remove_import_overrides( strategy, 'system', None, import_path_builder) if test_multi_rev: # Remove at the end (only if we have multiple revision files). ########################################## # Apply overrides first. self._apply_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._apply_import_overrides( strategy, 'user', user_overrides, import_path_builder) # Remove the last user and system overrides. self._remove_import_overrides( strategy, 'user', [user_overrides[-1]], import_path_builder) self._remove_import_overrides( strategy, 'system', [system_overrides[-1]], import_path_builder) # Assert that the first overrides are still there intact. self._assert_import_overrides( strategy, 'user', [user_overrides[0]], import_path_builder) self._assert_import_overrides( strategy, 'system', [system_overrides[0]], import_path_builder) # Re-apply all overrides. self._apply_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._apply_import_overrides( strategy, 'user', user_overrides, import_path_builder) # This should overwrite the existing files and resume counting from # their indices. self._assert_import_overrides( strategy, 'user', user_overrides, import_path_builder) self._assert_import_overrides( strategy, 'system', system_overrides, import_path_builder) def _apply_import_overrides( self, strategy, group_name, overrides, path_builder): # Apply the overrides and immediately check the file and its contents. for change_id, contents, index, _ in overrides: strategy.apply(group_name, change_id, contents) expected_path = path_builder( strategy._revision_dir, group_name, change_id, index, strategy._revision_ext) self._assert_file_exists(expected_path, True) def _remove_import_overrides( self, strategy, group_name, overrides, path_builder): if overrides: # Remove the overrides and immediately check the file was removed. for change_id, _, index, _ in overrides: strategy.remove(group_name, change_id) expected_path = path_builder( strategy._revision_dir, group_name, change_id, index, strategy._revision_ext) self._assert_file_exists(expected_path, False) else: # Remove the entire group. strategy.remove(group_name) found = operating_system.list_files_in_directory( strategy._revision_dir, pattern='^%s-.+$' % group_name) self.assertEqual(set(), found, "Some import files from group '%s' " "were not removed." % group_name) def _assert_import_overrides( self, strategy, group_name, overrides, path_builder): # Check all override files and their contents, for change_id, _, index, expected in overrides: expected_path = path_builder( strategy._revision_dir, group_name, change_id, index, strategy._revision_ext) self._assert_file_exists(expected_path, True) # Assert that the file contents. imported = operating_system.read_file( expected_path, codec=strategy._codec) self.assertEqual(expected, imported) def _assert_file_exists(self, file_path, exists): if exists: self.assertTrue(os.path.exists(file_path), "Revision import '%s' does not exist." % file_path) else: self.assertFalse(os.path.exists(file_path), "Revision import '%s' was not removed." % file_path) def test_get_value(self): revision_dir = self._create_temp_dir() self._assert_get_value(ImportOverrideStrategy(revision_dir, 'ext')) self._assert_get_value(OneFileOverrideStrategy(revision_dir)) @patch.multiple(operating_system, chmod=Mock(), chown=Mock()) def _assert_get_value(self, override_strategy): base_config_contents = {'Section_1': {'name': 'pi', 'is_number': 'True', 'value': '3.1415'} } config_overrides_v1a = {'Section_1': {'name': 'sqrt(2)', 'value': '1.4142'} } config_overrides_v2 = {'Section_1': {'name': 'e', 'value': '2.7183'}, 'Section_2': {'foo': 'bar'} } config_overrides_v1b = {'Section_1': {'name': 'sqrt(4)', 'value': '2.0'} } codec = IniCodec() current_user = getpass.getuser() with tempfile.NamedTemporaryFile() as base_config: # Write initial config contents. operating_system.write_file( base_config.name, base_config_contents, codec) manager = ConfigurationManager( base_config.name, current_user, current_user, codec, requires_root=False, override_strategy=override_strategy) # Test default value. self.assertIsNone(manager.get_value('Section_2')) self.assertEqual('foo', manager.get_value('Section_2', 'foo')) # Test value before applying overrides. self.assertEqual('pi', manager.get_value('Section_1')['name']) self.assertEqual('3.1415', manager.get_value('Section_1')['value']) # Test value after applying overrides. manager.apply_user_override(config_overrides_v1a, change_id='id1') self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name']) self.assertEqual('1.4142', manager.get_value('Section_1')['value']) manager.apply_user_override(config_overrides_v2, change_id='id2') self.assertEqual('e', manager.get_value('Section_1')['name']) self.assertEqual('2.7183', manager.get_value('Section_1')['value']) self.assertEqual('bar', manager.get_value('Section_2')['foo']) # Editing change 'id1' become visible only after removing # change 'id2', which overrides 'id1'. manager.apply_user_override(config_overrides_v1b, change_id='id1') self.assertEqual('e', manager.get_value('Section_1')['name']) self.assertEqual('2.7183', manager.get_value('Section_1')['value']) # Test value after removing overrides. # The edited values from change 'id1' should be visible after # removing 'id2'. manager.remove_user_override(change_id='id2') self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) self.assertEqual('2.0', manager.get_value('Section_1')['value']) # Back to the base. manager.remove_user_override(change_id='id1') self.assertEqual('pi', manager.get_value('Section_1')['name']) self.assertEqual('3.1415', manager.get_value('Section_1')['value']) self.assertIsNone(manager.get_value('Section_2')) # Test system overrides. manager.apply_system_override( config_overrides_v1b, change_id='id1') self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) self.assertEqual('2.0', manager.get_value('Section_1')['value']) # The system values should take precedence over the user # override. manager.apply_user_override( config_overrides_v1a, change_id='id1') self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) self.assertEqual('2.0', manager.get_value('Section_1')['value']) # The user values should become visible only after removing the # system change. manager.remove_system_override(change_id='id1') self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name']) self.assertEqual('1.4142', manager.get_value('Section_1')['value']) # Back to the base. manager.remove_user_override(change_id='id1') self.assertEqual('pi', manager.get_value('Section_1')['name']) self.assertEqual('3.1415', manager.get_value('Section_1')['value']) self.assertIsNone(manager.get_value('Section_2')) trove-5.0.0/trove/tests/unittests/guestagent/test_service.py0000664000567000056710000000214712701410316025575 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from mock import patch from trove.guestagent import service from trove.tests.unittests import trove_testtools class ServiceTest(trove_testtools.TestCase): def setUp(self): super(ServiceTest, self).setUp() def tearDown(self): super(ServiceTest, self).tearDown() @patch.object(service.API, '_instance_router') def test_app_factory(self, instance_router_mock): service.app_factory(Mock) self.assertEqual(1, instance_router_mock.call_count) trove-5.0.0/trove/tests/unittests/guestagent/test_query.py0000664000567000056710000003616312701410316025307 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.guestagent.common import sql_query from trove.tests.unittests import trove_testtools class QueryTestBase(trove_testtools.TestCase): def setUp(self): super(QueryTestBase, self).setUp() def tearDown(self): super(QueryTestBase, self).tearDown() class QueryTest(QueryTestBase): def setUp(self): super(QueryTest, self).setUp() def tearDown(self): super(QueryTest, self).tearDown() def test_columns(self): myQuery = sql_query.Query(columns=None) self.assertEqual("SELECT *", myQuery._columns) def test_columns_2(self): columns = ["col_A", "col_B"] myQuery = sql_query.Query(columns=columns) self.assertEqual("SELECT col_A, col_B", myQuery._columns) def test_tables(self): tables = ['table_A', 'table_B'] myQuery = sql_query.Query(tables=tables) self.assertEqual("FROM table_A, table_B", myQuery._tables) def test_where(self): myQuery = sql_query.Query(where=None) self.assertEqual("", myQuery._where) def test_where_2(self): conditions = ['cond_A', 'cond_B'] myQuery = sql_query.Query(where=conditions) self.assertEqual("WHERE cond_A AND cond_B", myQuery._where) def test_order(self): myQuery = sql_query.Query(order=None) self.assertEqual('', myQuery._order) def test_order_2(self): orders = ['deleted_at', 'updated_at'] myQuery = sql_query.Query(order=orders) self.assertEqual('ORDER BY deleted_at, updated_at', myQuery._order) def test_group_by(self): myQuery = sql_query.Query(group=None) self.assertEqual('', myQuery._group_by) def test_group_by_2(self): groups = ['deleted=1'] myQuery = sql_query.Query(group=groups) self.assertEqual('GROUP BY deleted=1', myQuery._group_by) def test_limit(self): myQuery = sql_query.Query(limit=None) self.assertEqual('', myQuery._limit) def test_limit_2(self): limit_count = 20 myQuery = sql_query.Query(limit=limit_count) self.assertEqual('LIMIT 20', myQuery._limit) class GrantTest(QueryTestBase): def setUp(self): super(GrantTest, self).setUp() def tearDown(self): super(GrantTest, self).tearDown() def test_grant_no_arg_constr(self): grant = sql_query.Grant() self.assertIsNotNone(grant) self.assertEqual("GRANT USAGE ON *.* " "TO ``@`%`;", str(grant)) def test_grant_all_with_grant_option(self): permissions = ['ALL'] user_name = 'root' user_password = 'password123' host = 'localhost' # grant_option defaults to True grant = sql_query.Grant(permissions=permissions, user=user_name, host=host, clear=user_password, grant_option=True) self.assertEqual("GRANT ALL PRIVILEGES ON *.* TO " "`root`@`localhost` " "IDENTIFIED BY 'password123' " "WITH GRANT OPTION;", str(grant)) def test_grant_all_with_explicit_grant_option(self): permissions = ['ALL', 'GRANT OPTION'] user_name = 'root' user_password = 'password123' host = 'localhost' grant = sql_query.Grant(permissions=permissions, user=user_name, host=host, clear=user_password, grant_option=True) self.assertEqual("GRANT ALL PRIVILEGES ON *.* TO " "`root`@`localhost` " "IDENTIFIED BY 'password123' " "WITH GRANT OPTION;", str(grant)) def test_grant_specify_permissions(self): permissions = ['ALTER ROUTINE', 'CREATE', 'ALTER', 'CREATE ROUTINE', 'CREATE TEMPORARY TABLES', 'CREATE VIEW', 'CREATE USER', 'DELETE', 'DROP', 'EVENT', 'EXECUTE', 'INDEX', 'INSERT', 'LOCK TABLES', 'PROCESS', 'REFERENCES', 'SELECT', 'SHOW DATABASES', 'SHOW VIEW', 'TRIGGER', 'UPDATE', 'USAGE'] user_name = 'root' user_password = 'password123' host = 'localhost' grant = sql_query.Grant(permissions=permissions, user=user_name, host=host, clear=user_password) self.assertEqual("GRANT ALTER, " "ALTER ROUTINE, " "CREATE, " "CREATE ROUTINE, " "CREATE TEMPORARY TABLES, " "CREATE USER, " "CREATE VIEW, " "DELETE, " "DROP, " "EVENT, " "EXECUTE, " "INDEX, " "INSERT, " "LOCK TABLES, " "PROCESS, " "REFERENCES, " "SELECT, " "SHOW DATABASES, " "SHOW VIEW, " "TRIGGER, " "UPDATE, " "USAGE ON *.* TO " "`root`@`localhost` " "IDENTIFIED BY " "'password123';", str(grant)) def test_grant_specify_duplicate_permissions(self): permissions = ['ALTER ROUTINE', 'CREATE', 'CREATE', 'DROP', 'DELETE', 'DELETE', 'ALTER', 'CREATE ROUTINE', 'CREATE TEMPORARY TABLES', 'CREATE VIEW', 'CREATE USER', 'DELETE', 'DROP', 'EVENT', 'EXECUTE', 'INDEX', 'INSERT', 'LOCK TABLES', 'PROCESS', 'REFERENCES', 'SELECT', 'SHOW DATABASES', 'SHOW VIEW', 'TRIGGER', 'UPDATE', 'USAGE'] user_name = 'root' user_password = 'password123' host = 'localhost' grant = sql_query.Grant(permissions=permissions, user=user_name, host=host, clear=user_password) self.assertEqual("GRANT ALTER, " "ALTER ROUTINE, " "CREATE, " "CREATE ROUTINE, " "CREATE TEMPORARY TABLES, " "CREATE USER, " "CREATE VIEW, " "DELETE, " "DROP, " "EVENT, " "EXECUTE, " "INDEX, " "INSERT, " "LOCK TABLES, " "PROCESS, " "REFERENCES, " "SELECT, " "SHOW DATABASES, " "SHOW VIEW, " "TRIGGER, " "UPDATE, " "USAGE ON *.* TO " "`root`@`localhost` " "IDENTIFIED BY " "'password123';", str(grant)) class RevokeTest(QueryTestBase): def setUp(self): super(RevokeTest, self).setUp() def tearDown(self): super(RevokeTest, self).tearDown() def test_defaults(self): r = sql_query.Revoke() # Technically, this isn't valid for MySQL. self.assertEqual("REVOKE ALL ON *.* FROM ``@`%`;", str(r)) def test_permissions(self): r = sql_query.Revoke() r.user = 'x' r.permissions = ['CREATE', 'DELETE', 'DROP'] self.assertEqual("REVOKE CREATE, DELETE, DROP ON *.* FROM `x`@`%`;", str(r)) def test_database(self): r = sql_query.Revoke() r.user = 'x' r.database = 'foo' self.assertEqual("REVOKE ALL ON `foo`.* FROM `x`@`%`;", str(r)) def test_table(self): r = sql_query.Revoke() r.user = 'x' r.database = 'foo' r.table = 'bar' self.assertEqual("REVOKE ALL ON `foo`.'bar' FROM `x`@`%`;", str(r)) def test_user(self): r = sql_query.Revoke() r.user = 'x' self.assertEqual("REVOKE ALL ON *.* FROM `x`@`%`;", str(r)) def test_user_host(self): r = sql_query.Revoke() r.user = 'x' r.host = 'y' self.assertEqual("REVOKE ALL ON *.* FROM `x`@`y`;", str(r)) class CreateDatabaseTest(QueryTestBase): def setUp(self): super(CreateDatabaseTest, self).setUp() def tearDown(self): super(CreateDatabaseTest, self).tearDown() def test_defaults(self): cd = sql_query.CreateDatabase('foo') self.assertEqual("CREATE DATABASE IF NOT EXISTS `foo`;", str(cd)) def test_charset(self): cd = sql_query.CreateDatabase('foo') cd.charset = "foo" self.assertEqual(("CREATE DATABASE IF NOT EXISTS `foo` " "CHARACTER SET = 'foo';"), str(cd)) def test_collate(self): cd = sql_query.CreateDatabase('foo') cd.collate = "bar" self.assertEqual(("CREATE DATABASE IF NOT EXISTS `foo` " "COLLATE = 'bar';"), str(cd)) class DropDatabaseTest(QueryTestBase): def setUp(self): super(DropDatabaseTest, self).setUp() def tearDown(self): super(DropDatabaseTest, self).tearDown() def test_defaults(self): dd = sql_query.DropDatabase('foo') self.assertEqual("DROP DATABASE `foo`;", str(dd)) class CreateUserTest(QueryTestBase): def setUp(self): super(CreateUserTest, self).setUp() def tearDown(self): super(CreateUserTest, self).tearDown() def test_defaults(self): username = 'root' hostname = 'localhost' password = 'password123' cu = sql_query.CreateUser(user=username, host=hostname, clear=password) self.assertEqual("CREATE USER :user@:host " "IDENTIFIED BY 'password123';", str(cu)) class UpdateUserTest(QueryTestBase): def setUp(self): super(UpdateUserTest, self).setUp() def tearDown(self): super(UpdateUserTest, self).tearDown() def test_rename_user(self): username = 'root' hostname = 'localhost' new_user = 'root123' uu = sql_query.UpdateUser(user=username, host=hostname, new_user=new_user) self.assertEqual("UPDATE mysql.user SET User='root123' " "WHERE User = 'root' " "AND Host = 'localhost';", str(uu)) def test_change_password(self): username = 'root' hostname = 'localhost' new_password = 'password123' uu = sql_query.UpdateUser(user=username, host=hostname, clear=new_password) self.assertEqual("UPDATE mysql.user SET " "Password=PASSWORD('password123') " "WHERE User = 'root' " "AND Host = 'localhost';", str(uu)) def test_change_host(self): username = 'root' hostname = 'localhost' new_host = '%' uu = sql_query.UpdateUser(user=username, host=hostname, new_host=new_host) self.assertEqual("UPDATE mysql.user SET Host='%' " "WHERE User = 'root' " "AND Host = 'localhost';", str(uu)) def test_change_password_and_username(self): username = 'root' hostname = 'localhost' new_user = 'root123' new_password = 'password123' uu = sql_query.UpdateUser(user=username, host=hostname, clear=new_password, new_user=new_user) self.assertEqual("UPDATE mysql.user SET User='root123', " "Password=PASSWORD('password123') " "WHERE User = 'root' " "AND Host = 'localhost';", str(uu)) def test_change_username_password_hostname(self): username = 'root' hostname = 'localhost' new_user = 'root123' new_password = 'password123' new_host = '%' uu = sql_query.UpdateUser(user=username, host=hostname, clear=new_password, new_user=new_user, new_host=new_host) self.assertEqual("UPDATE mysql.user SET User='root123', " "Host='%', " "Password=PASSWORD('password123') " "WHERE User = 'root' " "AND Host = 'localhost';", str(uu)) def test_change_username_and_hostname(self): username = 'root' hostname = 'localhost' new_user = 'root123' new_host = '%' uu = sql_query.UpdateUser(user=username, host=hostname, new_host=new_host, new_user=new_user) self.assertEqual("UPDATE mysql.user SET User='root123', " "Host='%' " "WHERE User = 'root' " "AND Host = 'localhost';", str(uu)) class DropUserTest(QueryTestBase): def setUp(self): super(DropUserTest, self).setUp() def tearDown(self): super(DropUserTest, self).tearDown() def test_defaults(self): username = 'root' hostname = 'localhost' du = sql_query.DropUser(user=username, host=hostname) self.assertEqual("DROP USER `root`@`localhost`;", str(du)) trove-5.0.0/trove/tests/unittests/guestagent/test_manager.py0000664000567000056710000005140112701410316025544 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import getpass import os from mock import ANY from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch from proboscis.asserts import assert_equal from proboscis.asserts import assert_true from trove.common.context import TroveContext from trove.common import exception from trove.guestagent.common import operating_system from trove.guestagent.datastore import manager from trove.guestagent import guest_log from trove import rpc from trove.tests.unittests import trove_testtools class MockManager(manager.Manager): def __init__(self): super(MockManager, self).__init__('mysql') self._app = MagicMock() self._status = MagicMock() self._configuration_manager = MagicMock() @property def app(self): return self._app @property def status(self): return self._status @property def configuration_manager(self): return self._configuration_manager def prepare(self, *args): args[0].notification = MagicMock() with patch.object(rpc, 'get_client'): return super(MockManager, self).prepare(*args) class ManagerTest(trove_testtools.TestCase): def setUp(self): super(ManagerTest, self).setUp() self.chmod_patch = patch.object(operating_system, 'chmod') self.chmod_mock = self.chmod_patch.start() self.addCleanup(self.chmod_patch.stop) self.manager = MockManager() self.context = TroveContext() self.log_name_sys = 'guest' self.log_name_user = 'general' self.prefix = 'log_prefix' self.container = 'log_container' self.size = 1024 self.published = 128 self.guest_log_user = guest_log.GuestLog( self.context, self.log_name_user, guest_log.LogType.USER, None, '/tmp/gen.log', True) self.guest_log_sys = guest_log.GuestLog( self.context, self.log_name_sys, guest_log.LogType.SYS, None, '/tmp/guest.log', True) for gl in [self.guest_log_user, self.guest_log_sys]: gl._container_name = self.container gl._refresh_details = MagicMock() gl._log_rotated = MagicMock(return_value=False) gl._publish_to_container = MagicMock() gl._delete_log_components = MagicMock() gl._object_prefix = MagicMock(return_value=self.prefix) gl._size = self.size gl._published_size = self.published self.manager._guest_log_cache = { self.log_name_user: self.guest_log_user, self.log_name_sys: self.guest_log_sys} self.expected_details_user = { 'status': 'Disabled', 'prefix': self.prefix, 'container': self.container, 'name': self.log_name_user, 'published': self.published, 'metafile': self.prefix + '_metafile', 'type': 'USER', 'pending': self.size - self.published} self.expected_details_sys = dict(self.expected_details_user) self.expected_details_sys['type'] = 'SYS' self.expected_details_sys['status'] = 'Enabled' self.expected_details_sys['name'] = self.log_name_sys def tearDown(self): super(ManagerTest, self).tearDown() def test_update_status(self): self.manager.update_status(self.context) self.manager.status.update.assert_any_call() def test_guest_log_list(self): log_list = self.manager.guest_log_list(self.context) expected = [self.expected_details_sys, self.expected_details_user] assert_equal(self._flatten_list_of_dicts(expected), self._flatten_list_of_dicts(log_list), "Wrong list: %s (Expected: %s)" % ( self._flatten_list_of_dicts(log_list), self._flatten_list_of_dicts(expected))) def _flatten_list_of_dicts(self, lod): value = sorted("".join("%s%s" % (k, d[k]) for k in sorted(d.keys())) for d in lod) return "".join(sorted(value)) def test_guest_log_action_enable_disable(self): self.assertRaisesRegexp(exception.BadRequest, "Cannot enable and disable", self.manager.guest_log_action, self.context, self.log_name_sys, True, True, False, False) def test_guest_log_action_enable_sys(self): self.assertRaisesRegexp(exception.BadRequest, "Cannot enable a SYSTEM log", self.manager.guest_log_action, self.context, self.log_name_sys, True, False, False, False) def test_guest_log_action_disable_sys(self): self.assertRaisesRegexp(exception.BadRequest, "Cannot disable a SYSTEM log", self.manager.guest_log_action, self.context, self.log_name_sys, False, True, False, False) def test_guest_log_action_publish_sys(self): with patch.object(os.path, 'isfile', return_value=True): log_details = self.manager.guest_log_action(self.context, self.log_name_sys, False, False, True, False) assert_equal(log_details, self.expected_details_sys, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_sys)) assert_equal( 1, self.guest_log_sys._publish_to_container.call_count) def test_guest_log_action_discard_sys(self): log_details = self.manager.guest_log_action(self.context, self.log_name_sys, False, False, False, True) assert_equal(log_details, self.expected_details_sys, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_sys)) assert_equal( 1, self.guest_log_sys._delete_log_components.call_count) def test_guest_log_action_enable_user(self): with patch.object(manager.Manager, 'guest_log_enable', return_value=False) as mock_enable: log_details = self.manager.guest_log_action(self.context, self.log_name_user, True, False, False, False) assert_equal(log_details, self.expected_details_user, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_user)) assert_equal(1, mock_enable.call_count) def test_guest_log_action_disable_user(self): with patch.object(manager.Manager, 'guest_log_enable', return_value=False) as mock_enable: self.guest_log_user._enabled = True log_details = self.manager.guest_log_action(self.context, self.log_name_user, False, True, False, False) assert_equal(log_details, self.expected_details_user, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_user)) assert_equal(1, mock_enable.call_count) def test_guest_log_action_publish_user(self): with patch.object(manager.Manager, 'guest_log_enable', return_value=False) as mock_enable: with patch.object(os.path, 'isfile', return_value=True): log_details = self.manager.guest_log_action(self.context, self.log_name_user, False, False, True, False) assert_equal(log_details, self.expected_details_user, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_user)) assert_equal(1, mock_enable.call_count) def test_guest_log_action_discard_user(self): log_details = self.manager.guest_log_action(self.context, self.log_name_user, False, False, False, True) assert_equal(log_details, self.expected_details_user, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_user)) assert_equal(1, self.guest_log_user._delete_log_components.call_count) def test_set_guest_log_status_disabled(self): data = [ {'orig': guest_log.LogStatus.Enabled, 'new': guest_log.LogStatus.Disabled, 'expect': guest_log.LogStatus.Disabled}, {'orig': guest_log.LogStatus.Restart_Required, 'new': guest_log.LogStatus.Enabled, 'expect': guest_log.LogStatus.Restart_Required}, {'orig': guest_log.LogStatus.Restart_Required, 'new': guest_log.LogStatus.Restart_Completed, 'expect': guest_log.LogStatus.Restart_Completed}, {'orig': guest_log.LogStatus.Published, 'new': guest_log.LogStatus.Partial, 'expect': guest_log.LogStatus.Partial}, ] for datum in data: self.assert_guest_log_status(datum['orig'], datum['new'], datum['expect']) def assert_guest_log_status(self, original_status, new_status, expected_final_status): gl_cache = self.manager.guest_log_cache gl_cache[self.log_name_sys]._status = original_status self.manager.set_guest_log_status(new_status, self.log_name_sys) assert_equal(gl_cache[self.log_name_sys].status, expected_final_status, "Unexpected status for '%s': %s' (Expected %s)" % (self.log_name_sys, gl_cache[self.log_name_sys].status, expected_final_status)) def test_build_log_file_name(self): current_owner = getpass.getuser() with patch.multiple(operating_system, exists=MagicMock(return_value=False), write_file=DEFAULT, create_directory=DEFAULT, chown=DEFAULT, chmod=DEFAULT) as os_mocks: log_file = self.manager.build_log_file_name(self.log_name_sys, current_owner) expected_filename = '%s/%s/%s-%s.log' % ( self.manager.GUEST_LOG_BASE_DIR, self.manager.GUEST_LOG_DATASTORE_DIRNAME, self.manager.manager, self.log_name_sys) expected_call_counts = {'exists': 1, 'write_file': 1, 'create_directory': 2, 'chown': 1, 'chmod': 1} self.assert_build_log_file_name(expected_filename, log_file, os_mocks, expected_call_counts) def assert_build_log_file_name(self, expected_filename, filename, mocks, call_counts): assert_equal(expected_filename, filename, "Unexpected filename: %s (expected %s)" % (filename, expected_filename)) for key in mocks.keys(): assert_true( mocks[key].call_count == call_counts[key], "%s called %d time(s)" % (key, mocks[key].call_count)) def test_build_log_file_name_with_dir(self): current_owner = getpass.getuser() log_dir = '/tmp' with patch.multiple(operating_system, exists=MagicMock(return_value=False), write_file=DEFAULT, create_directory=DEFAULT, chown=DEFAULT, chmod=DEFAULT) as os_mocks: log_file = self.manager.build_log_file_name(self.log_name_sys, current_owner, datastore_dir=log_dir) expected_filename = '%s/%s-%s.log' % ( log_dir, self.manager.manager, self.log_name_sys) expected_call_counts = {'exists': 1, 'write_file': 1, 'create_directory': 1, 'chown': 1, 'chmod': 1} self.assert_build_log_file_name(expected_filename, log_file, os_mocks, expected_call_counts) def test_validate_log_file(self): file_name = '/tmp/non-existent-file' current_owner = getpass.getuser() with patch.multiple(operating_system, exists=MagicMock(return_value=False), write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) as os_mocks: log_file = self.manager.validate_log_file(file_name, current_owner) assert_equal(file_name, log_file, "Unexpected filename") for key in os_mocks.keys(): assert_true(os_mocks[key].call_count == 1, "%s not called" % key) def test_prepare_single(self): self.run_prepare_test(cluster_config=None) def test_prepare_single_no_users(self): self.run_prepare_test(cluster_config=None, users=None) def test_prepare_single_no_databases(self): self.run_prepare_test(cluster_config=None, databases=None) def test_prepare_single_no_root_password(self): self.run_prepare_test(cluster_config=None, root_password=None) def test_prepare_cluster(self): self.run_prepare_test() def run_prepare_test(self, packages=Mock(), databases=Mock(), memory_mb=Mock(), users=Mock(), device_path=Mock(), mount_point=Mock(), backup_info=Mock(), config_contents=Mock(), root_password=Mock(), overrides=Mock(), cluster_config=Mock(), snapshot=Mock()): self._assert_prepare(self.context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) def _assert_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): is_error_expected = False is_post_process_expected = cluster_config is not None with patch.multiple(self.manager, do_prepare=DEFAULT, post_prepare=DEFAULT, apply_overrides_on_prepare=DEFAULT, enable_root_on_prepare=DEFAULT, create_database=DEFAULT, create_user=DEFAULT): self.manager.prepare( context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) self.manager.status.begin_install.assert_called_once_with() self.manager.do_prepare.assert_called_once_with( context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) self.manager.apply_overrides_on_prepare.assert_called_once_with( context, overrides) self.manager.status.end_install( error_occurred=is_error_expected, post_processing=is_post_process_expected) self.manager.post_prepare.assert_called_once_with( context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) if not is_post_process_expected: if databases: self.manager.create_database.assert_called_once_with( context, databases) else: self.assertEqual( 0, self.manager.create_database.call_count) if users: self.manager.create_user.assert_called_once_with( context, users) else: self.assertEqual(0, self.manager.create_user.call_count) if not backup_info and root_password: (self.manager.enable_root_on_prepare. assert_called_once_with(context, root_password)) else: self.assertEqual( 0, self.manager.enable_root_on_prepare.call_count) else: self.assertEqual(0, self.manager.create_database.call_count) self.assertEqual(0, self.manager.create_user.call_count) self.assertEqual( 0, self.manager.enable_root_on_prepare.call_count) def test_apply_overrides_on_prepare(self): overrides = Mock() with patch.multiple(self.manager, update_overrides=DEFAULT, restart=DEFAULT): self.manager.apply_overrides_on_prepare(self.context, overrides) self.manager.update_overrides.assert_called_once_with( self.context, overrides) self.manager.restart.assert_called_once_with(self.context) @patch('trove.guestagent.datastore.manager.LOG') def test_apply_overrides_on_prepare_failure(self, mock_logging): packages = Mock() databases = Mock() memory_mb = Mock() users = Mock() device_path = Mock() mount_point = Mock() backup_info = Mock() config_contents = Mock() root_password = Mock() overrides = Mock() cluster_config = Mock() snapshot = Mock() expected_failure = Exception("Error in 'apply_overrides_on_prepare'.") with patch.multiple( self.manager, do_prepare=DEFAULT, apply_overrides_on_prepare=MagicMock( side_effect=expected_failure )): self.assertRaisesRegexp( Exception, expected_failure.message, self.manager.prepare, self.context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) self.manager.status.begin_install.assert_called_once_with() self.manager.status.end_install( error_occurred=True, post_processing=ANY) trove-5.0.0/trove/tests/unittests/guestagent/test_models.py0000664000567000056710000000525112701410316025417 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from mock import Mock, MagicMock, patch from trove.common import utils from trove.db import models as dbmodels from trove.db.sqlalchemy import api as dbapi from trove.guestagent import models from trove.tests.unittests import trove_testtools class AgentHeartBeatTest(trove_testtools.TestCase): def setUp(self): super(AgentHeartBeatTest, self).setUp() self.origin_get_db_api = dbmodels.get_db_api self.origin_utcnow = utils.utcnow self.origin_db_api_save = dbapi.save self.origin_is_valid = dbmodels.DatabaseModelBase.is_valid self.origin_generate_uuid = utils.generate_uuid def tearDown(self): super(AgentHeartBeatTest, self).tearDown() dbmodels.get_db_api = self.origin_get_db_api utils.utcnow = self.origin_utcnow dbapi.save = self.origin_db_api_save dbmodels.DatabaseModelBase.is_valid = self.origin_is_valid utils.generate_uuid = self.origin_generate_uuid def test_create(self): utils.generate_uuid = Mock() dbapi.save = MagicMock( return_value=dbmodels.DatabaseModelBase) dbmodels.DatabaseModelBase.is_valid = Mock(return_value=True) models.AgentHeartBeat.create() self.assertEqual(1, utils.generate_uuid.call_count) self.assertEqual(3, dbmodels.DatabaseModelBase.is_valid.call_count) @patch('trove.db.models.DatabaseModelBase') def test_save(self, dmb_mock): utils.utcnow = Mock() dbmodels.get_db_api = MagicMock( return_value=dbmodels.DatabaseModelBase) dbapi.save = Mock() dbmodels.DatabaseModelBase.is_valid = Mock(return_value=True) self.heartBeat = models.AgentHeartBeat() self.heartBeat.save() self.assertEqual(1, utils.utcnow.call_count) def test_is_active(self): models.AGENT_HEARTBEAT = 10000000000 mock = models.AgentHeartBeat() models.AgentHeartBeat.__setitem__(mock, 'updated_at', datetime.now()) self.assertTrue(models.AgentHeartBeat.is_active(mock)) trove-5.0.0/trove/tests/unittests/guestagent/test_redis_manager.py0000664000567000056710000003455312701410316026743 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import DEFAULT, MagicMock, patch from trove.guestagent import backup from trove.guestagent.common import configuration from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.redis import ( service as redis_service) from trove.guestagent.datastore.experimental.redis.manager import ( Manager as RedisManager) from trove.guestagent.volume import VolumeDevice from trove.tests.unittests import trove_testtools class RedisGuestAgentManagerTest(trove_testtools.TestCase): @patch.object(redis_service.RedisApp, '_build_admin_client') @patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, *args, **kwargs): super(RedisGuestAgentManagerTest, self).setUp() self.patch_ope = patch('os.path.expanduser') self.mock_ope = self.patch_ope.start() self.addCleanup(self.patch_ope.stop) self.context = trove_testtools.TroveTestContext(self) self.replication_strategy = 'RedisSyncReplication' self.patch_rs = patch( 'trove.guestagent.strategies.replication.get_strategy', return_value=self.replication_strategy) self.mock_rs = self.patch_rs.start() self.addCleanup(self.patch_rs.stop) self.manager = RedisManager() self.packages = 'redis-server' self.origin_RedisAppStatus = redis_service.RedisAppStatus self.origin_start_redis = redis_service.RedisApp.start_db self.origin_stop_redis = redis_service.RedisApp.stop_db self.origin_install_redis = redis_service.RedisApp._install_redis self.origin_install_if_needed = \ redis_service.RedisApp.install_if_needed self.origin_format = VolumeDevice.format self.origin_mount = VolumeDevice.mount self.origin_mount_points = VolumeDevice.mount_points self.origin_restore = backup.restore self.patch_repl = patch( 'trove.guestagent.strategies.replication.get_instance') self.mock_repl = self.patch_repl.start() self.addCleanup(self.patch_repl.stop) self.patch_gfvs = patch( 'trove.guestagent.dbaas.get_filesystem_volume_stats') self.mock_gfvs_class = self.patch_gfvs.start() self.addCleanup(self.patch_gfvs.stop) def tearDown(self): super(RedisGuestAgentManagerTest, self).tearDown() redis_service.RedisAppStatus = self.origin_RedisAppStatus redis_service.RedisApp.stop_db = self.origin_stop_redis redis_service.RedisApp.start_db = self.origin_start_redis redis_service.RedisApp._install_redis = self.origin_install_redis redis_service.RedisApp.install_if_needed = \ self.origin_install_if_needed VolumeDevice.format = self.origin_format VolumeDevice.mount = self.origin_mount VolumeDevice.mount_points = self.origin_mount_points backup.restore = self.origin_restore def test_update_status(self): mock_status = MagicMock() self.manager._app.status = mock_status self.manager.update_status(self.context) mock_status.update.assert_any_call() def test_prepare_redis_not_installed(self): self._prepare_dynamic(is_redis_installed=False) def test_prepare_redis_with_snapshot(self): snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': 1.0}, 'config': None} self._prepare_dynamic(snapshot=snapshot) @patch.object(redis_service.RedisApp, 'get_working_dir', MagicMock(return_value='/var/lib/redis')) def test_prepare_redis_from_backup(self): self._prepare_dynamic(backup_id='backup_id_123abc') @patch.multiple(redis_service.RedisApp, apply_initial_guestagent_configuration=DEFAULT, restart=DEFAULT, install_if_needed=DEFAULT) @patch.object(operating_system, 'chown') @patch.object(configuration.ConfigurationManager, 'save_configuration') def _prepare_dynamic(self, save_configuration_mock, chown_mock, apply_initial_guestagent_configuration, restart, install_if_needed, device_path='/dev/vdb', is_redis_installed=True, backup_info=None, is_root_enabled=False, mount_point='var/lib/redis', backup_id=None, snapshot=None): backup_info = None if backup_id is not None: backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'RedisBackup', 'checksum': 'fake-checksum', } # covering all outcomes is starting to cause trouble here mock_status = MagicMock() self.manager._app.status = mock_status self.manager._build_admin_client = MagicMock(return_value=MagicMock()) redis_service.RedisApp.stop_db = MagicMock(return_value=None) redis_service.RedisApp.start_db = MagicMock(return_value=None) mock_status.begin_install = MagicMock(return_value=None) VolumeDevice.format = MagicMock(return_value=None) VolumeDevice.mount = MagicMock(return_value=None) VolumeDevice.mount_points = MagicMock(return_value=[]) backup.restore = MagicMock(return_value=None) mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_repl.return_value = mock_replication self.manager.prepare(self.context, self.packages, None, '2048', None, device_path=device_path, mount_point=mount_point, backup_info=backup_info, overrides=None, cluster_config=None, snapshot=snapshot) mock_status.begin_install.assert_any_call() VolumeDevice.format.assert_any_call() install_if_needed.assert_any_call(self.packages) save_configuration_mock.assert_any_call(None) apply_initial_guestagent_configuration.assert_called_once_with() chown_mock.assert_any_call(mount_point, 'redis', 'redis', as_root=True) if backup_info: backup.restore.assert_called_once_with(self.context, backup_info, '/var/lib/redis') else: redis_service.RedisApp.restart.assert_any_call() if snapshot: self.assertEqual(1, mock_replication.enable_as_slave.call_count) else: self.assertEqual(0, mock_replication.enable_as_slave.call_count) @patch.object(redis_service.RedisApp, 'restart') def test_restart(self, redis_mock): self.manager.restart(self.context) redis_mock.assert_any_call() @patch.object(redis_service.RedisApp, 'stop_db') def test_stop_db(self, redis_mock): self.manager.stop_db(self.context) redis_mock.assert_any_call(do_not_start_on_reboot=False) @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.object(backup, 'backup') @patch.object(configuration.ConfigurationManager, 'parse_configuration', MagicMock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(operating_system, 'chown') @patch.object(operating_system, 'create_directory') def test_create_backup(self, *mocks): backup.backup = MagicMock(return_value=None) RedisManager().create_backup(self.context, 'backup_id_123') backup.backup.assert_any_call(self.context, 'backup_id_123') def test_backup_required_for_replication(self): mock_replication = MagicMock() mock_replication.backup_required_for_replication = MagicMock() self.mock_repl.return_value = mock_replication self.manager.backup_required_for_replication(self.context) self.assertEqual( 1, mock_replication.backup_required_for_replication.call_count) def test_attach_replica(self): mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_repl.return_value = mock_replication snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': 1.0}} self.manager.attach_replica(self.context, snapshot, None) self.assertEqual(1, mock_replication.enable_as_slave.call_count) def test_detach_replica(self): mock_replication = MagicMock() mock_replication.detach_slave = MagicMock() self.mock_repl.return_value = mock_replication self.manager.detach_replica(self.context) self.assertEqual(1, mock_replication.detach_slave.call_count) def test_enable_as_master(self): mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() self.mock_repl.return_value = mock_replication self.manager.enable_as_master(self.context, None) self.assertEqual(mock_replication.enable_as_master.call_count, 1) def test_demote_replication_master(self): mock_replication = MagicMock() mock_replication.demote_master = MagicMock() self.mock_repl.return_value = mock_replication self.manager.demote_replication_master(self.context) self.assertEqual(1, mock_replication.demote_master.call_count) @patch.object(redis_service.RedisApp, 'make_read_only') def test_make_read_only(self, redis_mock): self.manager.make_read_only(self.context, 'ON') redis_mock.assert_any_call('ON') def test_cleanup_source_on_replica_detach(self): mock_replication = MagicMock() mock_replication.cleanup_source_on_replica_detach = MagicMock() self.mock_repl.return_value = mock_replication snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': '1.0'}} self.manager.cleanup_source_on_replica_detach(self.context, snapshot) self.assertEqual( 1, mock_replication.cleanup_source_on_replica_detach.call_count) def test_get_replication_snapshot(self): snapshot_id = None log_position = None master_ref = 'my_master' used_size = 1.0 total_size = 2.0 mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() mock_replication.snapshot_for_replication = MagicMock( return_value=(snapshot_id, log_position)) mock_replication.get_master_ref = MagicMock( return_value=master_ref) self.mock_repl.return_value = mock_replication self.mock_gfvs_class.return_value = ( {'used': used_size, 'total': total_size}) expected_replication_snapshot = { 'dataset': { 'datastore_manager': self.manager.manager, 'dataset_size': used_size, 'volume_size': total_size, 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': master_ref, 'log_position': log_position } snapshot_info = None replica_source_config = None replication_snapshot = ( self.manager.get_replication_snapshot(self.context, snapshot_info, replica_source_config)) self.assertEqual(expected_replication_snapshot, replication_snapshot) self.assertEqual(1, mock_replication.enable_as_master.call_count) self.assertEqual( 1, mock_replication.snapshot_for_replication.call_count) self.assertEqual(1, mock_replication.get_master_ref.call_count) def test_get_replica_context(self): master_ref = { 'host': '1.2.3.4', 'port': 3306 } expected_info = { 'master': master_ref, } mock_replication = MagicMock() mock_replication.get_replica_context = MagicMock( return_value=expected_info) self.mock_repl.return_value = mock_replication replica_info = self.manager.get_replica_context(self.context) self.assertEqual(1, mock_replication.get_replica_context.call_count) self.assertEqual(expected_info, replica_info) def test_get_last_txn(self): expected_host = '10.0.0.2' self.manager._get_master_host = MagicMock(return_value=expected_host) expected_txn_id = 199 repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} self.manager._get_repl_info = MagicMock(return_value=repl_info) (host, txn_id) = self.manager.get_last_txn(self.context) self.manager._get_master_host.assert_any_call() self.manager._get_repl_info.assert_any_call() self.assertEqual(expected_host, host) self.assertEqual(expected_txn_id, txn_id) def test_get_latest_txn_id(self): expected_txn_id = 199 repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} self.manager._get_repl_info = MagicMock(return_value=repl_info) latest_txn_id = self.manager.get_latest_txn_id(self.context) self.assertEqual(expected_txn_id, latest_txn_id) self.manager._get_repl_info.assert_any_call() def test_wait_for_txn(self): expected_txn_id = 199 repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} self.manager._get_repl_info = MagicMock(return_value=repl_info) self.manager.wait_for_txn(self.context, expected_txn_id) self.manager._get_repl_info.assert_any_call() trove-5.0.0/trove/tests/unittests/guestagent/test_vertica_api.py0000664000567000056710000001145612701410316026426 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import Timeout import mock import trove.common.context as context from trove.common import exception from trove.common.rpc.version import RPC_API_VERSION from trove.common.strategies.cluster.experimental.vertica.guestagent import ( VerticaGuestAgentAPI) from trove import rpc from trove.tests.unittests import trove_testtools def _mock_call(cmd, timeout, version=None, user=None, public_keys=None, members=None): # To check get_public_keys, authorize_public_keys, # install_cluster, cluster_complete in cmd. if cmd in ('get_public_keys', 'authorize_public_keys', 'install_cluster', 'cluster_complete'): return True else: raise BaseException("Test Failed") class ApiTest(trove_testtools.TestCase): @mock.patch.object(rpc, 'get_client') def setUp(self, *args): super(ApiTest, self).setUp() self.context = context.TroveContext() self.guest = VerticaGuestAgentAPI(self.context, 0) self.guest._call = _mock_call self.api = VerticaGuestAgentAPI(self.context, "instance-id-x23d2d") self._mock_rpc_client() def test_get_routing_key(self): self.assertEqual('guestagent.instance-id-x23d2d', self.api._get_routing_key()) @mock.patch('trove.guestagent.api.LOG') def test_api_cast_exception(self, mock_logging): self.call_context.cast.side_effect = IOError('host down') self.assertRaises(exception.GuestError, self.api.create_user, 'test_user') @mock.patch('trove.guestagent.api.LOG') def test_api_call_exception(self, mock_logging): self.call_context.call.side_effect = IOError('host_down') self.assertRaises(exception.GuestError, self.api.list_users) def test_api_call_timeout(self): self.call_context.call.side_effect = Timeout() self.assertRaises(exception.GuestTimeout, self.api.restart) def _verify_rpc_prepare_before_call(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION, timeout=mock.ANY) def _verify_rpc_prepare_before_cast(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION) def _verify_cast(self, *args, **kwargs): self.call_context.cast.assert_called_once_with(self.context, *args, **kwargs) def _verify_call(self, *args, **kwargs): self.call_context.call.assert_called_once_with(self.context, *args, **kwargs) def _mock_rpc_client(self): self.call_context = mock.Mock() self.api.client.prepare = mock.Mock(return_value=self.call_context) self.call_context.call = mock.Mock() self.call_context.cast = mock.Mock() def test_get_public_keys(self): exp_resp = 'some_key' self.call_context.call.return_value = exp_resp resp = self.api.get_public_keys(user='dummy') self._verify_rpc_prepare_before_call() self._verify_call('get_public_keys', user='dummy') self.assertEqual(exp_resp, resp) def test_authorize_public_keys(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.authorize_public_keys(user='dummy', public_keys='some_key') self._verify_rpc_prepare_before_call() self._verify_call('authorize_public_keys', user='dummy', public_keys='some_key') self.assertEqual(exp_resp, resp) def test_install_cluster(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.install_cluster(members=['10.0.0.1', '10.0.0.2']) self._verify_rpc_prepare_before_call() self._verify_call('install_cluster', members=['10.0.0.1', '10.0.0.2']) self.assertEqual(exp_resp, resp) def test_cluster_complete(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.cluster_complete() self._verify_rpc_prepare_before_call() self._verify_call('cluster_complete') self.assertEqual(exp_resp, resp) trove-5.0.0/trove/tests/unittests/guestagent/test_mongodb_manager.py0000664000567000056710000003545312701410316027262 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import pymongo import trove.common.utils as utils import trove.guestagent.backup as backup from trove.guestagent.common.configuration import ImportOverrideStrategy import trove.guestagent.datastore.experimental.mongodb.manager as manager import trove.guestagent.datastore.experimental.mongodb.service as service import trove.guestagent.db.models as models import trove.guestagent.volume as volume import trove.tests.unittests.trove_testtools as trove_testtools class GuestAgentMongoDBManagerTest(trove_testtools.TestCase): @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, _): super(GuestAgentMongoDBManagerTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.manager = manager.Manager() self.execute_with_timeout_patch = mock.patch.object( utils, 'execute_with_timeout', return_value=('0', '') ) self.addCleanup(self.execute_with_timeout_patch.stop) self.execute_with_timeout_patch.start() self.pymongo_patch = mock.patch.object( pymongo, 'MongoClient' ) self.addCleanup(self.pymongo_patch.stop) self.pymongo_patch.start() self.mount_point = '/var/lib/mongodb' def tearDown(self): super(GuestAgentMongoDBManagerTest, self).tearDown() def test_update_status(self): self.manager.app.status = mock.MagicMock() self.manager.update_status(self.context) self.manager.app.status.update.assert_any_call() def _prepare_method(self, packages=['packages'], databases=None, memory_mb='2048', users=None, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None,): """self.manager.app must be correctly mocked before calling.""" self.manager.app.status = mock.Mock() self.manager.prepare(self.context, packages, databases, memory_mb, users, device_path=device_path, mount_point=mount_point, backup_info=backup_info, config_contents=config_contents, root_password=root_password, overrides=overrides, cluster_config=cluster_config) self.manager.app.status.begin_install.assert_any_call() self.manager.app.install_if_needed.assert_called_with(packages) self.manager.app.stop_db.assert_any_call() self.manager.app.clear_storage.assert_any_call() (self.manager.app.apply_initial_guestagent_configuration. assert_called_once_with(cluster_config, self.mount_point)) @mock.patch.object(volume, 'VolumeDevice') @mock.patch('os.path.exists') def test_prepare_for_volume(self, exists, mocked_volume): device_path = '/dev/vdb' self.manager.app = mock.Mock() self._prepare_method(device_path=device_path) mocked_volume().unmount_device.assert_called_with(device_path) mocked_volume().format.assert_any_call() mocked_volume().migrate_data.assert_called_with(self.mount_point) mocked_volume().mount.assert_called_with(self.mount_point) def test_secure(self): self.manager.app = mock.Mock() mock_secure = mock.Mock() self.manager.app.secure = mock_secure self._prepare_method() mock_secure.assert_called_with() @mock.patch.object(backup, 'restore') @mock.patch.object(service.MongoDBAdmin, 'is_root_enabled') def test_prepare_from_backup(self, mocked_root_check, mocked_restore): self.manager.app = mock.Mock() backup_info = {'id': 'backup_id_123abc', 'location': 'fake-location', 'type': 'MongoDBDump', 'checksum': 'fake-checksum'} self._prepare_method(backup_info=backup_info) mocked_restore.assert_called_with(self.context, backup_info, '/var/lib/mongodb') mocked_root_check.assert_any_call() def test_prepare_with_databases(self): self.manager.app = mock.Mock() database = mock.Mock() mock_create_databases = mock.Mock() self.manager.create_database = mock_create_databases self._prepare_method(databases=[database]) mock_create_databases.assert_called_with(self.context, [database]) def test_prepare_with_users(self): self.manager.app = mock.Mock() user = mock.Mock() mock_create_users = mock.Mock() self.manager.create_user = mock_create_users self._prepare_method(users=[user]) mock_create_users.assert_called_with(self.context, [user]) @mock.patch.object(service.MongoDBAdmin, 'enable_root') def test_provide_root_password(self, mocked_enable_root): self.manager.app = mock.Mock() self._prepare_method(root_password='test_password') mocked_enable_root.assert_called_with('test_password') # This is used in the test_*_user tests below _serialized_user = {'_name': 'testdb.testuser', '_password': None, '_roles': [{'db': 'testdb', 'role': 'testrole'}], '_username': 'testuser', '_databases': [], '_host': None, '_database': {'_name': 'testdb', '_character_set': None, '_collate': None}} @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') @mock.patch.object(service.MongoDBAdmin, '_get_user_record') def test_create_user(self, mocked_get_user, mocked_admin_user, mocked_client): user = self._serialized_user.copy() user['_password'] = 'testpassword' users = [user] client = mocked_client().__enter__()['testdb'] mocked_get_user.return_value = None self.manager.create_user(self.context, users) client.add_user.assert_called_with('testuser', password='testpassword', roles=[{'db': 'testdb', 'role': 'testrole'}]) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_delete_user(self, mocked_admin_user, mocked_client): client = mocked_client().__enter__()['testdb'] self.manager.delete_user(self.context, self._serialized_user) client.remove_user.assert_called_with('testuser') @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_get_user(self, mocked_admin_user, mocked_client): mocked_find = mock.MagicMock(return_value={ '_id': 'testdb.testuser', 'user': 'testuser', 'db': 'testdb', 'roles': [{'db': 'testdb', 'role': 'testrole'}] }) client = mocked_client().__enter__().admin client.system.users.find_one = mocked_find result = self.manager.get_user(self.context, 'testdb.testuser', None) mocked_find.assert_called_with({'user': 'testuser', 'db': 'testdb'}) self.assertEqual(self._serialized_user, result) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_list_users(self, mocked_admin_user, mocked_client): # roles are NOT returned by list_users user1 = self._serialized_user.copy() user2 = self._serialized_user.copy() user2['_name'] = 'testdb.otheruser' user2['_username'] = 'otheruser' user2['_roles'] = [{'db': 'testdb2', 'role': 'readWrite'}] user2['_databases'] = [{'_name': 'testdb2', '_character_set': None, '_collate': None}] mocked_find = mock.MagicMock(return_value=[ { '_id': 'admin.os_admin', 'user': 'os_admin', 'db': 'admin', 'roles': [{'db': 'admin', 'role': 'root'}] }, { '_id': 'testdb.testuser', 'user': 'testuser', 'db': 'testdb', 'roles': [{'db': 'testdb', 'role': 'testrole'}] }, { '_id': 'testdb.otheruser', 'user': 'otheruser', 'db': 'testdb', 'roles': [{'db': 'testdb2', 'role': 'readWrite'}] } ]) client = mocked_client().__enter__().admin client.system.users.find = mocked_find users, next_marker = self.manager.list_users(self.context) self.assertIsNone(next_marker) self.assertEqual(sorted([user1, user2]), users) @mock.patch.object(service.MongoDBAdmin, 'create_validated_user') @mock.patch.object(utils, 'generate_random_password', return_value='password') def test_enable_root(self, mock_gen_rand_pwd, mock_create_user): root_user = {'_name': 'admin.root', '_username': 'root', '_database': {'_name': 'admin', '_character_set': None, '_collate': None}, '_password': 'password', '_roles': [{'db': 'admin', 'role': 'root'}], '_databases': [], '_host': None} result = self.manager.enable_root(self.context) self.assertTrue(mock_create_user.called) self.assertEqual(root_user, result) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') @mock.patch.object(service.MongoDBAdmin, '_get_user_record', return_value=models.MongoDBUser('testdb.testuser')) def test_grant_access(self, mocked_get_user, mocked_admin_user, mocked_client): client = mocked_client().__enter__()['testdb'] self.manager.grant_access(self.context, 'testdb.testuser', None, ['db1', 'db2', 'db3']) client.add_user.assert_called_with('testuser', roles=[ {'db': 'db1', 'role': 'readWrite'}, {'db': 'db2', 'role': 'readWrite'}, {'db': 'db3', 'role': 'readWrite'} ]) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') @mock.patch.object(service.MongoDBAdmin, '_get_user_record', return_value=models.MongoDBUser('testdb.testuser')) def test_revoke_access(self, mocked_get_user, mocked_admin_user, mocked_client): client = mocked_client().__enter__()['testdb'] mocked_get_user.return_value.roles = [ {'db': 'db1', 'role': 'readWrite'}, {'db': 'db2', 'role': 'readWrite'}, {'db': 'db3', 'role': 'readWrite'} ] self.manager.revoke_access(self.context, 'testdb.testuser', None, 'db2') client.add_user.assert_called_with('testuser', roles=[ {'db': 'db1', 'role': 'readWrite'}, {'db': 'db3', 'role': 'readWrite'} ]) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') @mock.patch.object(service.MongoDBAdmin, '_get_user_record', return_value=models.MongoDBUser('testdb.testuser')) def test_list_access(self, mocked_get_user, mocked_admin_user, mocked_client): mocked_get_user.return_value.roles = [ {'db': 'db1', 'role': 'readWrite'}, {'db': 'db2', 'role': 'readWrite'}, {'db': 'db3', 'role': 'readWrite'} ] accessible_databases = self.manager.list_access( self.context, 'testdb.testuser', None ) self.assertEqual(['db1', 'db2', 'db3'], [db['_name'] for db in accessible_databases]) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_create_databases(self, mocked_admin_user, mocked_client): schema = models.MongoDBSchema('testdb').serialize() db_client = mocked_client().__enter__()['testdb'] self.manager.create_database(self.context, [schema]) db_client['dummy'].insert.assert_called_with({'dummy': True}) db_client.drop_collection.assert_called_with('dummy') @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_list_databases(self, # mocked_ignored_dbs, mocked_admin_user, mocked_client): # This list contains the special 'admin', 'local' and 'config' dbs; # the special dbs should be skipped in the output. # Pagination is tested by starting at 'db1', so 'db0' should not # be in the output. The limit is set to 2, meaning the result # should be 'db1' and 'db2'. The next_marker should be 'db3'. mocked_list = mock.MagicMock( return_value=['admin', 'local', 'config', 'db0', 'db1', 'db2', 'db3']) mocked_client().__enter__().database_names = mocked_list marker = models.MongoDBSchema('db1').serialize() dbs, next_marker = self.manager.list_databases( self.context, limit=2, marker=marker, include_marker=True) mocked_list.assert_any_call() self.assertEqual([models.MongoDBSchema('db1').serialize(), models.MongoDBSchema('db2').serialize()], dbs) self.assertEqual(models.MongoDBSchema('db2').serialize(), next_marker) @mock.patch.object(service, 'MongoDBClient') @mock.patch.object(service.MongoDBAdmin, '_admin_user') def test_delete_database(self, mocked_admin_user, mocked_client): schema = models.MongoDBSchema('testdb').serialize() self.manager.delete_database(self.context, schema) mocked_client().__enter__().drop_database.assert_called_with('testdb') trove-5.0.0/trove/tests/unittests/guestagent/test_mysql_manager.py0000664000567000056710000007620312701410316027000 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from mock import DEFAULT from mock import MagicMock from mock import patch from proboscis.asserts import assert_equal from testtools.matchers import Is, Equals, Not from trove.common.exception import InsufficientSpaceForReplica from trove.common.exception import ProcessExecutionError from trove.common import instance as rd_instance from trove.guestagent import backup from trove.guestagent.common import operating_system # TODO(atomic77) The test cases should be made configurable # to make it easier to test the various derived datastores. from trove.guestagent.datastore.mysql.manager import Manager import trove.guestagent.datastore.mysql.service as dbaas from trove.guestagent import dbaas as base_dbaas from trove.guestagent import pkg as pkg from trove.guestagent import volume from trove.guestagent.volume import VolumeDevice from trove.tests.unittests import trove_testtools class GuestAgentManagerTest(trove_testtools.TestCase): def setUp(self): super(GuestAgentManagerTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.replication_strategy = 'MysqlGTIDReplication' self.patch_rs = patch( 'trove.guestagent.strategies.replication.get_strategy', return_value=self.replication_strategy) self.mock_rs = self.patch_rs.start() self.addCleanup(self.patch_rs.stop) self.manager = Manager() self.origin_MySqlAppStatus = dbaas.MySqlAppStatus.get self.origin_os_path_exists = os.path.exists self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_unmount = volume.VolumeDevice.unmount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_mysql = dbaas.MySqlApp.stop_db self.origin_start_mysql = dbaas.MySqlApp.start_mysql self.origin_update_overrides = dbaas.MySqlApp.update_overrides self.origin_install_if_needed = dbaas.MySqlApp.install_if_needed self.origin_secure = dbaas.MySqlApp.secure self.origin_secure_root = dbaas.MySqlApp.secure_root self.origin_pkg_is_installed = pkg.Package.pkg_is_installed self.origin_os_path_exists = os.path.exists self.origin_chown = operating_system.chown # set up common mock objects, etc. for replication testing self.patcher_gfvs = patch( 'trove.guestagent.dbaas.get_filesystem_volume_stats') self.patcher_rs = patch( 'trove.guestagent.strategies.replication.get_instance') self.mock_gfvs_class = self.patcher_gfvs.start() self.mock_rs_class = self.patcher_rs.start() def tearDown(self): super(GuestAgentManagerTest, self).tearDown() dbaas.MySqlAppStatus.get = self.origin_MySqlAppStatus os.path.exists = self.origin_os_path_exists volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.migrate_data = self.origin_migrate_data volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.unmount = self.origin_unmount volume.VolumeDevice.mount_points = self.origin_mount_points dbaas.MySqlApp.stop_db = self.origin_stop_mysql dbaas.MySqlApp.start_mysql = self.origin_start_mysql dbaas.MySqlApp.update_overrides = self.origin_update_overrides dbaas.MySqlApp.install_if_needed = self.origin_install_if_needed dbaas.MySqlApp.secure = self.origin_secure dbaas.MySqlApp.secure_root = self.origin_secure_root operating_system.chown = self.origin_chown pkg.Package.pkg_is_installed = self.origin_pkg_is_installed os.path.exists = self.origin_os_path_exists # teardown the replication mock objects self.patcher_gfvs.stop() self.patcher_rs.stop() def test_update_status(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.update_status(self.context) dbaas.MySqlAppStatus.get.assert_any_call() mock_status.update.assert_any_call() @patch.object(dbaas.MySqlAdmin, 'create_database') def test_create_database(self, create_db_mock): self.manager.create_database(self.context, ['db1']) create_db_mock.assert_any_call(['db1']) @patch.object(dbaas.MySqlAdmin, 'create_user') def test_create_user(self, create_user_mock): self.manager.create_user(self.context, ['user1']) create_user_mock.assert_any_call(['user1']) @patch.object(dbaas.MySqlAdmin, 'delete_database') def test_delete_database(self, delete_database_mock): databases = ['db1'] self.manager.delete_database(self.context, databases) delete_database_mock.assert_any_call(databases) @patch.object(dbaas.MySqlAdmin, 'delete_user') def test_delete_user(self, delete_user_mock): user = ['user1'] self.manager.delete_user(self.context, user) delete_user_mock.assert_any_call(user) @patch.object(dbaas.MySqlAdmin, 'grant_access') def test_grant_access(self, grant_access_mock): username = "test_user" hostname = "test_host" databases = ["test_database"] self.manager.grant_access(self.context, username, hostname, databases) grant_access_mock.assert_any_call(username, hostname, databases) @patch.object(dbaas.MySqlAdmin, 'list_databases', return_value=['database1']) def test_list_databases(self, list_databases_mock): databases = self.manager.list_databases(self.context) self.assertThat(databases, Not(Is(None))) self.assertThat(databases, Equals(list_databases_mock.return_value)) list_databases_mock.assert_any_call(None, None, False) @patch.object(dbaas.MySqlAdmin, 'list_users', return_value=['user1']) def test_list_users(self, list_users_mock): users = self.manager.list_users(self.context) self.assertThat(users, Equals(list_users_mock.return_value)) dbaas.MySqlAdmin.list_users.assert_any_call(None, None, False) @patch.object(dbaas.MySqlAdmin, 'get_user', return_value=['user1']) def test_get_users(self, get_user_mock): username = ['user1'] hostname = ['host'] users = self.manager.get_user(self.context, username, hostname) self.assertThat(users, Equals(get_user_mock.return_value)) get_user_mock.assert_any_call(username, hostname) @patch.object(dbaas.MySqlAdmin, 'enable_root', return_value='user_id_stuff') def test_enable_root(self, enable_root_mock): user_id = self.manager.enable_root(self.context) self.assertThat(user_id, Is(enable_root_mock.return_value)) enable_root_mock.assert_any_call() @patch.object(dbaas.MySqlAdmin, 'disable_root') def test_disable_root(self, disable_root_mock): self.manager.disable_root(self.context) disable_root_mock.assert_any_call() @patch.object(dbaas.MySqlAdmin, 'is_root_enabled', return_value=True) def test_is_root_enabled(self, is_root_enabled_mock): is_enabled = self.manager.is_root_enabled(self.context) self.assertThat(is_enabled, Is(is_root_enabled_mock.return_value)) is_root_enabled_mock.assert_any_call() @patch.object(backup, 'backup') def test_create_backup(self, backup_mock): # entry point Manager().create_backup(self.context, 'backup_id_123') # assertions backup_mock.assert_any_call(self.context, 'backup_id_123') def test_prepare_device_path_true(self): self._prepare_dynamic() def test_prepare_device_path_false(self): self._prepare_dynamic(device_path=None) def test_prepare_device_path_mounted(self): self._prepare_dynamic(is_mounted=True) def test_prepare_mysql_not_installed(self): self._prepare_dynamic(is_mysql_installed=False) def test_prepare_mysql_from_backup(self): self._prepare_dynamic(backup_id='backup_id_123abc') def test_prepare_mysql_from_backup_with_root(self): self._prepare_dynamic(backup_id='backup_id_123abc', is_root_enabled=True) def test_prepare_mysql_with_root_password(self): self._prepare_dynamic(root_password='some_password') def test_prepare_mysql_with_users_and_databases(self): self._prepare_dynamic(databases=['db1'], users=['user1']) def test_prepare_mysql_with_snapshot(self): snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': 1.0}, 'config': None} total_size = snapshot['dataset']['dataset_size'] + 1 self.mock_gfvs_class.return_value = {'total': total_size} self._prepare_dynamic(snapshot=snapshot) @patch.multiple(dbaas.MySqlAdmin, create_user=DEFAULT, create_database=DEFAULT, enable_root=DEFAULT) @patch.object(backup, 'restore') def _prepare_dynamic(self, restore_mock, create_user, create_database, enable_root, device_path='/dev/vdb', is_mysql_installed=True, backup_id=None, is_root_enabled=False, root_password=None, overrides=None, is_mounted=False, databases=None, users=None, snapshot=None): # covering all outcomes is starting to cause trouble here COUNT = 1 if device_path else 0 backup_info = None if backup_id is not None: backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } # TODO(juice): this should stub an instance of the MySqlAppStatus mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_status.begin_install = MagicMock(return_value=None) VolumeDevice.format = MagicMock(return_value=None) VolumeDevice.migrate_data = MagicMock(return_value=None) VolumeDevice.mount = MagicMock(return_value=None) mount_points = [] if is_mounted: mount_points = ['/mnt'] VolumeDevice.mount_points = MagicMock(return_value=mount_points) VolumeDevice.unmount = MagicMock(return_value=None) set_data_dir_patcher = patch.object(dbaas.MySqlApp, 'set_data_dir', return_value='/var/lib/mysql') self.addCleanup(set_data_dir_patcher.stop) set_data_dir_patcher.start() dbaas.MySqlApp.stop_db = MagicMock(return_value=None) dbaas.MySqlApp.start_mysql = MagicMock(return_value=None) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) dbaas.MySqlApp.install_if_needed = MagicMock(return_value=None) dbaas.MySqlApp.secure = MagicMock(return_value=None) dbaas.MySqlApp.secure_root = MagicMock(return_value=None) pkg.Package.pkg_is_installed = MagicMock( return_value=is_mysql_installed) operating_system.chown = MagicMock(return_value=None) os.path.exists = MagicMock(return_value=True) mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication with patch.object(dbaas.MySqlAdmin, 'is_root_enabled', return_value=is_root_enabled): self.manager.prepare(context=self.context, packages=None, memory_mb='2048', databases=databases, users=users, device_path=device_path, mount_point='/var/lib/mysql', backup_info=backup_info, root_password=root_password, overrides=overrides, cluster_config=None, snapshot=snapshot) # verification/assertion mock_status.begin_install.assert_any_call() self.assertEqual(COUNT, VolumeDevice.format.call_count) self.assertEqual(COUNT, VolumeDevice.migrate_data.call_count) self.assertEqual(COUNT, VolumeDevice.mount_points.call_count) self.assertEqual(COUNT, dbaas.MySqlApp.stop_db.call_count) if is_mounted: self.assertEqual(1, VolumeDevice.unmount.call_count) else: self.assertEqual(0, VolumeDevice.unmount.call_count) if backup_info: restore_mock.assert_any_call(self.context, backup_info, '/var/lib/mysql/data') dbaas.MySqlApp.install_if_needed.assert_any_call(None) # We don't need to make sure the exact contents are there dbaas.MySqlApp.secure.assert_any_call(None) dbaas.MySqlApp.secure_root.assert_any_call( secure_remote_root=not is_root_enabled) if root_password: dbaas.MySqlAdmin.enable_root.assert_any_call(root_password) if databases: dbaas.MySqlAdmin.create_database.assert_any_call(databases) else: self.assertFalse(dbaas.MySqlAdmin.create_database.called) if users: dbaas.MySqlAdmin.create_user.assert_any_call(users) else: self.assertFalse(dbaas.MySqlAdmin.create_user.called) if snapshot: self.assertEqual(1, mock_replication.enable_as_slave.call_count) else: self.assertEqual(0, mock_replication.enable_as_slave.call_count) def test_get_replication_snapshot(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) snapshot_id = 'my_snapshot_id' log_position = 123456789 master_ref = 'my_master' used_size = 1.0 total_size = 2.0 mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() mock_replication.snapshot_for_replication = MagicMock( return_value=(snapshot_id, log_position)) mock_replication.get_master_ref = MagicMock( return_value=master_ref) self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = ( {'used': used_size, 'total': total_size}) expected_replication_snapshot = { 'dataset': { 'datastore_manager': self.manager.manager, 'dataset_size': used_size, 'volume_size': total_size, 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': master_ref, 'log_position': log_position } snapshot_info = None replica_source_config = None # entry point replication_snapshot = ( self.manager.get_replication_snapshot(self.context, snapshot_info, replica_source_config)) # assertions self.assertEqual(expected_replication_snapshot, replication_snapshot) self.assertEqual(1, mock_replication.enable_as_master.call_count) self.assertEqual( 1, mock_replication.snapshot_for_replication.call_count) self.assertEqual(1, mock_replication.get_master_ref.call_count) def test_attach_replication_slave_valid(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) total_size = 2.0 dataset_size = 1.0 mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = {'total': total_size} snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': dataset_size}} # entry point self.manager.attach_replica(self.context, snapshot, None) # assertions self.assertEqual(1, mock_replication.enable_as_slave.call_count) @patch('trove.guestagent.datastore.mysql_common.manager.LOG') def test_attach_replication_slave_invalid(self, *args): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) total_size = 2.0 dataset_size = 3.0 mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = {'total': total_size} snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': dataset_size}} # entry point self.assertRaises(InsufficientSpaceForReplica, self.manager.attach_replica, self.context, snapshot, None) # assertions self.assertEqual(0, mock_replication.enable_as_slave.call_count) def test_detach_replica(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_replication = MagicMock() mock_replication.detach_slave = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.detach_replica(self.context) # assertions self.assertEqual(1, mock_replication.detach_slave.call_count) def test_demote_replication_master(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_replication = MagicMock() mock_replication.demote_master = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.demote_replication_master(self.context) # assertions self.assertEqual(1, mock_replication.demote_master.call_count) def test_get_master_UUID(self): app = dbaas.MySqlApp(None) def test_case(slave_status, expected_value): with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value=slave_status): assert_equal(app._get_master_UUID(), expected_value) test_case({'Master_UUID': '2a5b-2064-32fb'}, '2a5b-2064-32fb') test_case({'Master_UUID': ''}, None) test_case({}, None) def test_get_last_txn(self): def test_case(gtid_list, expected_value): with patch.object(dbaas.MySqlApp, '_get_gtid_executed', return_value=gtid_list): txn = self.manager.get_last_txn(self.context) assert_equal(txn, expected_value) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={'Master_UUID': '2a5b-2064-32fb'}): test_case('2a5b-2064-32fb:1', ('2a5b-2064-32fb', 1)) test_case('2a5b-2064-32fb:1-5', ('2a5b-2064-32fb', 5)) test_case('2a5b-2064-32fb:1,4b4-23:5', ('2a5b-2064-32fb', 1)) test_case('4b4-23:5,2a5b-2064-32fb:1', ('2a5b-2064-32fb', 1)) test_case('4b-23:5,2a5b-2064-32fb:1,25:3-4', ('2a5b-2064-32fb', 1)) test_case('4b4-23:1-5,2a5b-2064-32fb:1-10', ('2a5b-2064-32fb', 10)) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={'Master_UUID': ''}): test_case('2a5b-2064-32fb:1', (None, 0)) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={}): test_case('2a5b-2064-32fb:1', (None, 0)) def test_rpc_ping(self): self.assertTrue(self.manager.rpc_ping(self.context)) @patch.object(dbaas.MySqlAdmin, 'change_passwords') def test_change_passwords(self, change_passwords_mock): self.manager.change_passwords( self.context, [{'name': 'test_user', 'password': 'testpwd'}]) change_passwords_mock.assert_any_call( [{'name': 'test_user', 'password': 'testpwd'}]) @patch.object(dbaas.MySqlAdmin, 'update_attributes') def test_update_attributes(self, update_attr_mock): self.manager.update_attributes(self.context, 'test_user', '%', {'password': 'testpwd'}) update_attr_mock.assert_any_call('test_user', '%', {'password': 'testpwd'}) @patch.object(dbaas.MySqlApp, 'reset_configuration') def test_reset_configuration(self, reset_config_mock): dbaas.MySqlAppStatus.get = MagicMock(return_value=MagicMock()) configuration = {'config_contents': 'some junk'} self.manager.reset_configuration(self.context, configuration) dbaas.MySqlAppStatus.get.assert_any_call() reset_config_mock.assert_any_call({'config_contents': 'some junk'}) @patch.object(dbaas.MySqlAdmin, 'revoke_access') def test_revoke_access(self, revoke_access_mock): self.manager.revoke_access(self.context, 'test_user', '%', 'test_db') revoke_access_mock.assert_any_call('test_user', '%', 'test_db') @patch.object(dbaas.MySqlAdmin, 'list_access', return_value=['database1']) def test_list_access(self, list_access_mock): access = self.manager.list_access(self.context, 'test_user', '%') self.assertEqual(list_access_mock.return_value, access) list_access_mock.assert_any_call('test_user', '%') @patch.object(dbaas.MySqlApp, 'restart') def test_restart(self, restart_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.restart(self.context) dbaas.MySqlAppStatus.get.assert_any_call() restart_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'start_db_with_conf_changes') def test_start_db_with_conf_changes(self, start_db_mock): mock_status = MagicMock() configuration = {'config_contents': 'some junk'} self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.start_db_with_conf_changes(self.context, configuration) dbaas.MySqlAppStatus.get.assert_any_call() start_db_mock.assert_any_call({'config_contents': 'some junk'}) def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.stop_db = MagicMock(return_value=None) self.manager.stop_db(self.context) dbaas.MySqlAppStatus.get.assert_any_call() dbaas.MySqlApp.stop_db.assert_any_call(do_not_start_on_reboot=False) def test_get_filesystem_stats(self): with patch.object(base_dbaas, 'get_filesystem_volume_stats'): self.manager.get_filesystem_stats(self.context, '/var/lib/mysql') base_dbaas.get_filesystem_volume_stats.assert_any_call( '/var/lib/mysql') def test_mount_volume(self): with patch.object(volume.VolumeDevice, 'mount', return_value=None): self.manager.mount_volume(self.context, device_path='/dev/vdb', mount_point='/var/lib/mysql') test_mount = volume.VolumeDevice.mount.call_args_list[0] test_mount.assert_called_with('/var/lib/mysql', False) def test_unmount_volume(self): with patch.object(volume.VolumeDevice, 'unmount', return_value=None): self.manager.unmount_volume(self.context, device_path='/dev/vdb') test_unmount = volume.VolumeDevice.unmount.call_args_list[0] test_unmount.assert_called_with('/var/lib/mysql') def test_resize_fs(self): with patch.object(volume.VolumeDevice, 'resize_fs', return_value=None): self.manager.resize_fs(self.context, device_path='/dev/vdb') test_resize_fs = volume.VolumeDevice.resize_fs.call_args_list[0] test_resize_fs.assert_called_with('/var/lib/mysql') @patch.object(dbaas.MySqlApp, 'remove_overrides') def test_update_overrides(self, remove_config_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) self.manager.update_overrides(self.context, 'something_overrides') dbaas.MySqlAppStatus.get.assert_any_call() remove_config_mock.assert_not_called() dbaas.MySqlApp.update_overrides.assert_any_call('something_overrides') @patch.object(dbaas.MySqlApp, 'remove_overrides') def test_update_overrides_with_remove(self, remove_overrides_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) self.manager.update_overrides(self.context, 'something_overrides', True) dbaas.MySqlAppStatus.get.assert_any_call() remove_overrides_mock.assert_any_call() dbaas.MySqlApp.update_overrides.assert_any_call('something_overrides') @patch.object(dbaas.MySqlApp, 'apply_overrides') def test_apply_overrides(self, apply_overrides_mock): mock_status = MagicMock() override = {'some_key': 'some value'} self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.apply_overrides(self.context, override) dbaas.MySqlAppStatus.get.assert_any_call() apply_overrides_mock.assert_any_call({'some_key': 'some value'}) @patch.object(dbaas.MySqlApp, 'get_txn_count', return_value=(9879)) def test_get_txn_count(self, get_txn_count_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) txn_count = self.manager.get_txn_count(self.context) self.assertEqual(get_txn_count_mock.return_value, txn_count) dbaas.MySqlAppStatus.get.assert_any_call() get_txn_count_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'get_latest_txn_id', return_value=('2a5b-2064-32fb:1')) def test_get_latest_txn_id(self, get_latest_txn_id_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) latest_txn_id = self.manager.get_latest_txn_id(self.context) self.assertEqual(get_latest_txn_id_mock.return_value, latest_txn_id) dbaas.MySqlAppStatus.get.assert_any_call() get_latest_txn_id_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'wait_for_txn') def test_wait_for_txn(self, wait_for_txn_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.wait_for_txn(self.context, '4b4-23:5,2a5b-2064-32fb:1') dbaas.MySqlAppStatus.get.assert_any_call() wait_for_txn_mock.assert_any_call('4b4-23:5,2a5b-2064-32fb:1') @patch.object(dbaas.MySqlApp, 'make_read_only') def test_make_read_only(self, make_read_only_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.make_read_only(self.context, 'ON') dbaas.MySqlAppStatus.get.assert_any_call() make_read_only_mock.assert_any_call('ON') def test_cleanup_source_on_replica_detach(self): mock_replication = MagicMock() mock_replication.cleanup_source_on_replica_detach = MagicMock() self.mock_rs_class.return_value = mock_replication snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': '1.0'}} # entry point self.manager.cleanup_source_on_replica_detach(self.context, snapshot) # assertions self.assertEqual( 1, mock_replication.cleanup_source_on_replica_detach.call_count) def test_get_replica_context(self): replication_user = { 'name': 'repl_user', 'password': 'repl_pwd' } master_ref = { 'host': '1.2.3.4', 'port': 3306 } rep_info = { 'master': master_ref, 'log_position': { 'replication_user': replication_user } } mock_replication = MagicMock() mock_replication.get_replica_context = MagicMock(return_value=rep_info) self.mock_rs_class.return_value = mock_replication # entry point replica_info = self.manager.get_replica_context(self.context) # assertions self.assertEqual(1, mock_replication.get_replica_context.call_count) self.assertEqual(rep_info, replica_info) def test_enable_as_master(self): mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.enable_as_master(self.context, None) # assertions self.assertEqual(mock_replication.enable_as_master.call_count, 1) @patch('trove.guestagent.datastore.mysql_common.manager.LOG') def test__perform_restore(self, *args): backup_info = {'id': 'backup_id_123abc', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) with patch.object(backup, 'restore', side_effect=ProcessExecutionError): self.assertRaises(ProcessExecutionError, self.manager._perform_restore, backup_info, self.context, '/var/lib/mysql', app) app.status.set_status.assert_called_with( rd_instance.ServiceStatuses.FAILED) trove-5.0.0/trove/tests/unittests/guestagent/test_backups.py0000664000567000056710000013523212701410316025567 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import ANY, DEFAULT, Mock, patch, PropertyMock from testtools.testcase import ExpectedException from trove.common import exception from trove.common import utils from trove.guestagent.common import configuration from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import ( service as cass_service ) from trove.guestagent.strategies.backup import base as backupBase from trove.guestagent.strategies.backup.mysql_impl import MySqlApp from trove.guestagent.strategies.restore import base as restoreBase from trove.guestagent.strategies.restore.mysql_impl import MySQLRestoreMixin from trove.tests.unittests import trove_testtools BACKUP_XTRA_CLS = ("trove.guestagent.strategies.backup." "mysql_impl.InnoBackupEx") RESTORE_XTRA_CLS = ("trove.guestagent.strategies.restore." "mysql_impl.InnoBackupEx") BACKUP_XTRA_INCR_CLS = ("trove.guestagent.strategies.backup." "mysql_impl.InnoBackupExIncremental") RESTORE_XTRA_INCR_CLS = ("trove.guestagent.strategies.restore." "mysql_impl.InnoBackupExIncremental") BACKUP_SQLDUMP_CLS = ("trove.guestagent.strategies.backup." "mysql_impl.MySQLDump") RESTORE_SQLDUMP_CLS = ("trove.guestagent.strategies.restore." "mysql_impl.MySQLDump") BACKUP_CBBACKUP_CLS = ("trove.guestagent.strategies.backup." "experimental.couchbase_impl.CbBackup") RESTORE_CBBACKUP_CLS = ("trove.guestagent.strategies.restore." "experimental.couchbase_impl.CbBackup") BACKUP_MONGODUMP_CLS = ("trove.guestagent.strategies.backup." "experimental.mongo_impl.MongoDump") RESTORE_MONGODUMP_CLS = ("trove.guestagent.strategies.restore." "experimental.mongo_impl.MongoDump") BACKUP_REDIS_CLS = ("trove.guestagent.strategies.backup." "experimental.redis_impl.RedisBackup") RESTORE_REDIS_CLS = ("trove.guestagent.strategies.restore." "experimental.redis_impl.RedisBackup") BACKUP_NODETOOLSNAPSHOT_CLS = ("trove.guestagent.strategies.backup." "experimental.cassandra_impl.NodetoolSnapshot") RESTORE_NODETOOLSNAPSHOT_CLS = ("trove.guestagent.strategies.restore." "experimental.cassandra_impl.NodetoolSnapshot") BACKUP_DB2_CLS = ("trove.guestagent.strategies.backup." "experimental.db2_impl.DB2Backup") RESTORE_DB2_CLS = ("trove.guestagent.strategies.restore." "experimental.db2_impl.DB2Backup") BACKUP_COUCHDB_BACKUP_CLS = ("trove.guestagent.strategies.backup." "experimental.couchdb_impl.CouchDBBackup") RESTORE_COUCHDB_BACKUP_CLS = ("trove.guestagent.strategies.restore." "experimental.couchdb_impl.CouchDBBackup") PIPE = " | " ZIP = "gzip" UNZIP = "gzip -d -c" ENCRYPT = "openssl enc -aes-256-cbc -salt -pass pass:default_aes_cbc_key" DECRYPT = "openssl enc -d -aes-256-cbc -salt -pass pass:default_aes_cbc_key" XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s" " /var/lib/mysql/data 2>/tmp/innobackupex.log") XTRA_BACKUP = XTRA_BACKUP_RAW % {'extra_opts': ''} XTRA_BACKUP_EXTRA_OPTS = XTRA_BACKUP_RAW % {'extra_opts': '--no-lock'} XTRA_BACKUP_INCR = ('sudo innobackupex --stream=xbstream' ' --incremental --incremental-lsn=%(lsn)s' ' %(extra_opts)s /var/lib/mysql/data' ' 2>/tmp/innobackupex.log') SQLDUMP_BACKUP_RAW = ("mysqldump --all-databases %(extra_opts)s " "--opt --password=password -u os_admin" " 2>/tmp/mysqldump.log") SQLDUMP_BACKUP = SQLDUMP_BACKUP_RAW % {'extra_opts': ''} SQLDUMP_BACKUP_EXTRA_OPTS = (SQLDUMP_BACKUP_RAW % {'extra_opts': '--events --routines --triggers'}) XTRA_RESTORE_RAW = "sudo xbstream -x -C %(restore_location)s" XTRA_RESTORE = XTRA_RESTORE_RAW % {'restore_location': '/var/lib/mysql/data'} XTRA_INCR_PREPARE = ("sudo innobackupex" " --defaults-file=/var/lib/mysql/data/backup-my.cnf" " --ibbackup=xtrabackup" " --apply-log" " --redo-only" " /var/lib/mysql/data" " %(incr)s" " 2>/tmp/innoprepare.log") SQLDUMP_RESTORE = "sudo mysql" PREPARE = ("sudo innobackupex" " --defaults-file=/var/lib/mysql/data/backup-my.cnf" " --ibbackup=xtrabackup" " --apply-log" " /var/lib/mysql/data" " 2>/tmp/innoprepare.log") CRYPTO_KEY = "default_aes_cbc_key" CBBACKUP_CMD = "tar cpPf - /tmp/backups" CBBACKUP_RESTORE = "sudo tar xpPf -" MONGODUMP_CMD = "sudo tar cPf - /var/lib/mongodb/dump" MONGODUMP_RESTORE = "sudo tar xPf -" REDISBACKUP_CMD = "sudo cat /var/lib/redis/dump.rdb" REDISBACKUP_RESTORE = "tee /var/lib/redis/dump.rdb" DB2BACKUP_CMD = "sudo tar cPf - /home/db2inst1/db2inst1/backup" DB2BACKUP_RESTORE = "sudo tar xPf -" COUCHDB_BACKUP_CMD = "sudo tar cpPf - /var/lib/couchdb" COUCHDB_RESTORE_CMD = "sudo tar xPf -" class GuestAgentBackupTest(trove_testtools.TestCase): def setUp(self): super(GuestAgentBackupTest, self).setUp() self.patch_pc = patch('trove.guestagent.datastore.service.' 'BaseDbStatus.prepare_completed') self.mock_pc = self.patch_pc.start() self.mock_pc.__get__ = Mock(return_value=True) self.addCleanup(self.patch_pc.stop) self.get_auth_pwd_patch = patch.object( MySqlApp, 'get_auth_password', mock.Mock(return_value='password')) self.get_auth_pwd_mock = self.get_auth_pwd_patch.start() self.addCleanup(self.get_auth_pwd_patch.stop) self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout') self.exec_timeout_mock = self.exec_timeout_patch.start() self.addCleanup(self.exec_timeout_patch.stop) self.get_data_dir_patch = patch.object( MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') self.get_datadir_mock = self.get_data_dir_patch.start() self.addCleanup(self.get_data_dir_patch.stop) backupBase.BackupRunner.is_zipped = True backupBase.BackupRunner.is_encrypted = True restoreBase.RestoreRunner.is_zipped = True restoreBase.RestoreRunner.is_encrypted = True def tearDown(self): super(GuestAgentBackupTest, self).tearDown() def test_backup_decrypted_xtrabackup_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_XTRA_CLS) bkup = RunnerClass(12345, extra_opts="") self.assertEqual(XTRA_BACKUP + PIPE + ZIP, bkup.command) self.assertEqual("12345.xbstream.gz", bkup.manifest) def test_backup_decrypted_xtrabackup_with_extra_opts_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_XTRA_CLS) bkup = RunnerClass(12345, extra_opts="--no-lock") self.assertEqual(XTRA_BACKUP_EXTRA_OPTS + PIPE + ZIP, bkup.command) self.assertEqual("12345.xbstream.gz", bkup.manifest) def test_backup_encrypted_xtrabackup_command(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_XTRA_CLS) bkup = RunnerClass(12345, extra_opts="") self.assertEqual(XTRA_BACKUP + PIPE + ZIP + PIPE + ENCRYPT, bkup.command) self.assertEqual("12345.xbstream.gz.enc", bkup.manifest) def test_backup_xtrabackup_incremental(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) opts = {'lsn': '54321', 'extra_opts': ''} expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP bkup = RunnerClass(12345, extra_opts="", lsn="54321") self.assertEqual(expected, bkup.command) self.assertEqual("12345.xbstream.gz", bkup.manifest) def test_backup_xtrabackup_incremental_with_extra_opts_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) opts = {'lsn': '54321', 'extra_opts': '--no-lock'} expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP bkup = RunnerClass(12345, extra_opts="--no-lock", lsn="54321") self.assertEqual(expected, bkup.command) self.assertEqual("12345.xbstream.gz", bkup.manifest) def test_backup_xtrabackup_incremental_encrypted(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) opts = {'lsn': '54321', 'extra_opts': ''} expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP + PIPE + ENCRYPT bkup = RunnerClass(12345, extra_opts="", lsn="54321") self.assertEqual(expected, bkup.command) self.assertEqual("12345.xbstream.gz.enc", bkup.manifest) def test_backup_decrypted_mysqldump_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) bkup = RunnerClass(12345, extra_opts="") self.assertEqual(SQLDUMP_BACKUP + PIPE + ZIP, bkup.command) self.assertEqual("12345.gz", bkup.manifest) def test_backup_decrypted_mysqldump_with_extra_opts_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) bkup = RunnerClass(12345, extra_opts="--events --routines --triggers") self.assertEqual(SQLDUMP_BACKUP_EXTRA_OPTS + PIPE + ZIP, bkup.command) self.assertEqual("12345.gz", bkup.manifest) def test_backup_encrypted_mysqldump_command(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) bkup = RunnerClass(12345, user="user", password="password", extra_opts="") self.assertEqual(SQLDUMP_BACKUP + PIPE + ZIP + PIPE + ENCRYPT, bkup.command) self.assertEqual("12345.gz.enc", bkup.manifest) def test_restore_decrypted_xtrabackup_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_XTRA_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") self.assertEqual(UNZIP + PIPE + XTRA_RESTORE, restr.restore_cmd) self.assertEqual(PREPARE, restr.prepare_cmd) def test_restore_encrypted_xtrabackup_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_XTRA_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE, restr.restore_cmd) self.assertEqual(PREPARE, restr.prepare_cmd) def test_restore_xtrabackup_incremental_prepare_command(self): RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="m5d") # Final prepare command (same as normal xtrabackup) self.assertEqual(PREPARE, restr.prepare_cmd) # Incremental backup prepare command expected = XTRA_INCR_PREPARE % {'incr': '--incremental-dir=/foo/bar/'} observed = restr._incremental_prepare_cmd('/foo/bar/') self.assertEqual(expected, observed) # Full backup prepare command expected = XTRA_INCR_PREPARE % {'incr': ''} observed = restr._incremental_prepare_cmd(None) self.assertEqual(expected, observed) def test_restore_decrypted_xtrabackup_incremental_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="m5d") # Full restore command expected = UNZIP + PIPE + XTRA_RESTORE self.assertEqual(expected, restr.restore_cmd) # Incremental backup restore command opts = {'restore_location': '/foo/bar/'} expected = UNZIP + PIPE + (XTRA_RESTORE_RAW % opts) observed = restr._incremental_restore_cmd('/foo/bar/') self.assertEqual(expected, observed) def test_restore_encrypted_xtrabackup_incremental_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") # Full restore command expected = DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE self.assertEqual(expected, restr.restore_cmd) # Incremental backup restore command opts = {'restore_location': '/foo/bar/'} expected = DECRYPT + PIPE + UNZIP + PIPE + (XTRA_RESTORE_RAW % opts) observed = restr._incremental_restore_cmd('/foo/bar/') self.assertEqual(expected, observed) def test_restore_decrypted_mysqldump_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_SQLDUMP_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") self.assertEqual(UNZIP + PIPE + SQLDUMP_RESTORE, restr.restore_cmd) def test_restore_encrypted_mysqldump_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_SQLDUMP_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + SQLDUMP_RESTORE, restr.restore_cmd) def test_backup_encrypted_cbbackup_command(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_CBBACKUP_CLS) utils.execute_with_timeout = mock.Mock(return_value=None) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual( CBBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) def test_backup_not_encrypted_cbbackup_command(self): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_CBBACKUP_CLS) utils.execute_with_timeout = mock.Mock(return_value=None) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(CBBACKUP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) def test_restore_decrypted_cbbackup_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_CBBACKUP_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(UNZIP + PIPE + CBBACKUP_RESTORE, restr.restore_cmd) def test_restore_encrypted_cbbackup_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_CBBACKUP_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + CBBACKUP_RESTORE, restr.restore_cmd) @patch.multiple('trove.guestagent.common.operating_system', chmod=DEFAULT, remove=DEFAULT) def test_reset_root_password_on_mysql_restore(self, chmod, remove): with patch.object(MySQLRestoreMixin, '_start_mysqld_safe_with_init_file', return_value=True): inst = MySQLRestoreMixin() inst.reset_root_password() chmod.assert_called_once_with( ANY, operating_system.FileMode.ADD_READ_ALL, as_root=True) # Make sure the temporary error log got deleted as root # (see bug/1423759). remove.assert_called_once_with(ANY, force=True, as_root=True) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_backup_encrypted_mongodump_command(self, _): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_MONGODUMP_CLS) utils.execute_with_timeout = mock.Mock(return_value=None) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual( MONGODUMP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_backup_not_encrypted_mongodump_command(self, _): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_MONGODUMP_CLS) utils.execute_with_timeout = mock.Mock(return_value=None) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(MONGODUMP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_restore_decrypted_mongodump_command(self, _): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_MONGODUMP_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, UNZIP + PIPE + MONGODUMP_RESTORE) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_restore_encrypted_mongodump_command(self, _): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_MONGODUMP_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, DECRYPT + PIPE + UNZIP + PIPE + MONGODUMP_RESTORE) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) @patch.object(configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) def test_backup_encrypted_redisbackup_command(self, *mocks): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_REDIS_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual( REDISBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) @patch.object(configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) def test_backup_not_encrypted_redisbackup_command(self, *mocks): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_REDIS_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(REDISBACKUP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) @patch.object(configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(operating_system, 'chown') @patch.object(operating_system, 'create_directory') def test_restore_decrypted_redisbackup_command(self, *mocks): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_REDIS_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, UNZIP + PIPE + REDISBACKUP_RESTORE) @patch.object(configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(operating_system, 'chown') @patch.object(operating_system, 'create_directory') def test_restore_encrypted_redisbackup_command(self, *mocks): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_REDIS_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, DECRYPT + PIPE + UNZIP + PIPE + REDISBACKUP_RESTORE) @patch.object(utils, 'execute_with_timeout') def test_backup_encrypted_db2backup_command(self, *mock): backupBase.BackupRunner.is_encrypted = True backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_DB2_CLS) bkp = RunnerClass(12345) # this is not db2 backup filename self.assertIsNotNone(12345) # look into this self.assertEqual( DB2BACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) @patch.object(utils, 'execute_with_timeout') def test_backup_not_encrypted_db2backup_command(self, *mock): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_DB2_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(DB2BACKUP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) def test_restore_decrypted_db2backup_command(self): restoreBase.RestoreRunner.is_zipped = True restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_DB2_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, UNZIP + PIPE + DB2BACKUP_RESTORE) def test_restore_encrypted_db2backup_command(self): restoreBase.RestoreRunner.is_zipped = True restoreBase.RestoreRunner.is_encrypted = True restoreBase.RestoreRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_DB2_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, DECRYPT + PIPE + UNZIP + PIPE + DB2BACKUP_RESTORE) def test_backup_encrypted_couchdbbackup_command(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual( COUCHDB_BACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) def test_backup_not_encrypted_couchdbbackup_command(self): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(COUCHDB_BACKUP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) def test_restore_decrypted_couchdbbackup_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_COUCHDB_BACKUP_CLS) restr = RunnerClass(None, restore_location="/var/lib/couchdb", location="filename", checksum="md5") self.assertEqual(UNZIP + PIPE + COUCHDB_RESTORE_CMD, restr.restore_cmd) def test_restore_encrypted_couchdbbackup_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_COUCHDB_BACKUP_CLS) restr = RunnerClass(None, restore_location="/var/lib/couchdb", location="filename", checksum="md5") self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + COUCHDB_RESTORE_CMD, restr.restore_cmd) class CassandraBackupTest(trove_testtools.TestCase): _BASE_BACKUP_CMD = ('sudo tar --transform="s#snapshots/%s/##" -cpPf - ' '-C "%s" "%s"') _BASE_RESTORE_CMD = 'sudo tar -xpPf - -C "%(restore_location)s"' _DATA_DIR = 'data_dir' _SNAPSHOT_NAME = 'snapshot_name' _SNAPSHOT_FILES = {'foo.db', 'bar.db'} _RESTORE_LOCATION = {'restore_location': '/var/lib/cassandra'} def setUp(self): super(CassandraBackupTest, self).setUp() self.app_status_patcher = patch( 'trove.guestagent.datastore.experimental.cassandra.service.' 'CassandraAppStatus') self.addCleanup(self.app_status_patcher.stop) self.app_status_patcher.start() self.get_data_dirs_patcher = patch.object( cass_service.CassandraApp, 'cassandra_data_dir', new_callable=PropertyMock) self.addCleanup(self.get_data_dirs_patcher.stop) data_dir_mock = self.get_data_dirs_patcher.start() data_dir_mock.return_value = self._DATA_DIR self.os_list_patcher = patch.object( operating_system, 'list_files_in_directory', return_value=self._SNAPSHOT_FILES) self.addCleanup(self.os_list_patcher.stop) self.os_list_patcher.start() def tearDown(self): super(CassandraBackupTest, self).tearDown() @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_backup_encrypted_zipped_nodetoolsnapshot_command(self, _): bkp = self._build_backup_runner(True, True) bkp._run_pre_backup() self.assertIsNotNone(bkp) self.assertEqual(self._BASE_BACKUP_CMD % ( self._SNAPSHOT_NAME, self._DATA_DIR, '" "'.join(self._SNAPSHOT_FILES) ) + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn(".gz.enc", bkp.manifest) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_backup_not_encrypted_not_zipped_nodetoolsnapshot_command(self, _): bkp = self._build_backup_runner(False, False) bkp._run_pre_backup() self.assertIsNotNone(bkp) self.assertEqual(self._BASE_BACKUP_CMD % ( self._SNAPSHOT_NAME, self._DATA_DIR, '" "'.join(self._SNAPSHOT_FILES) ), bkp.command) self.assertNotIn(".gz.enc", bkp.manifest) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_backup_not_encrypted_but_zipped_nodetoolsnapshot_command(self, _): bkp = self._build_backup_runner(False, True) bkp._run_pre_backup() self.assertIsNotNone(bkp) self.assertEqual(self._BASE_BACKUP_CMD % ( self._SNAPSHOT_NAME, self._DATA_DIR, '" "'.join(self._SNAPSHOT_FILES) ) + PIPE + ZIP, bkp.command) self.assertIn(".gz", bkp.manifest) self.assertNotIn(".enc", bkp.manifest) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_backup_encrypted_but_not_zipped_nodetoolsnapshot_command(self, _): bkp = self._build_backup_runner(True, False) bkp._run_pre_backup() self.assertIsNotNone(bkp) self.assertEqual(self._BASE_BACKUP_CMD % ( self._SNAPSHOT_NAME, self._DATA_DIR, '" "'.join(self._SNAPSHOT_FILES) ) + PIPE + ENCRYPT, bkp.command) self.assertIn(".enc", bkp.manifest) self.assertNotIn(".gz", bkp.manifest) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_restore_encrypted_but_not_zipped_nodetoolsnapshot_command( self, mock_logging, _): restoreBase.RestoreRunner.is_zipped = False restoreBase.RestoreRunner.is_encrypted = True restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_NODETOOLSNAPSHOT_CLS) rstr = RunnerClass(None, restore_location=self._RESTORE_LOCATION, location="filename", checksum="md5") self.assertIsNotNone(rstr) self.assertEqual(self._BASE_RESTORE_CMD % self._RESTORE_LOCATION, rstr.base_restore_cmd % self._RESTORE_LOCATION) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def _build_backup_runner(self, is_encrypted, is_zipped, _): backupBase.BackupRunner.is_zipped = is_zipped backupBase.BackupRunner.is_encrypted = is_encrypted backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_NODETOOLSNAPSHOT_CLS) runner = RunnerClass(self._SNAPSHOT_NAME) runner._remove_snapshot = mock.MagicMock() runner._snapshot_all_keyspaces = mock.MagicMock() runner._find_in_subdirectories = mock.MagicMock( return_value=self._SNAPSHOT_FILES ) return runner class CouchbaseBackupTests(trove_testtools.TestCase): def setUp(self): super(CouchbaseBackupTests, self).setUp() self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout', return_value=('0', '')) self.exec_timeout_patch.start() self.backup_runner = utils.import_class(BACKUP_CBBACKUP_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) def tearDown(self): super(CouchbaseBackupTests, self).tearDown() self.backup_runner_patch.stop() self.exec_timeout_patch.stop() def test_backup_success(self): backup_runner_mocks = self.backup_runner_patch.start() with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): backup_runner_mocks = self.backup_runner_patch.start() backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test') ) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual(0, backup_runner_mocks['_run_post_backup'].call_count) class CouchbaseRestoreTests(trove_testtools.TestCase): def setUp(self): super(CouchbaseRestoreTests, self).setUp() self.restore_runner = utils.import_class( RESTORE_CBBACKUP_CLS)( 'swift', location='http://some.where', checksum='True_checksum', restore_location='/tmp/somewhere') def tearDown(self): super(CouchbaseRestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.pre_restore = mock.Mock() self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_pre_restore(self): self.restore_runner.post_restore = mock.Mock() self.restore_runner.pre_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner._run_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) def test_restore_failed_due_to_run_restore(self): self.restore_runner.pre_restore = mock.Mock() self.restore_runner._run_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class MongodbBackupTests(trove_testtools.TestCase): def setUp(self): super(MongodbBackupTests, self).setUp() self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout', return_value=('0', '')) self.exec_timeout_mock = self.exec_timeout_patch.start() self.addCleanup(self.exec_timeout_patch.stop) self.init_overrides_dir_patch = patch.object( ImportOverrideStrategy, '_initialize_import_directory') self.init_overrides_dir_mock = self.init_overrides_dir_patch.start() self.addCleanup(self.init_overrides_dir_patch.stop) self.backup_runner = utils.import_class(BACKUP_MONGODUMP_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) self.backup_runner_mocks = self.backup_runner_patch.start() self.addCleanup(self.backup_runner_patch.stop) def tearDown(self): super(MongodbBackupTests, self).tearDown() def test_backup_success(self): with self.backup_runner(12345): pass self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() self.backup_runner_mocks['_run'].assert_called_once_with() self.backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): self.backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test') ) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() self.backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual( 0, self.backup_runner_mocks['_run_post_backup'].call_count) class MongodbRestoreTests(trove_testtools.TestCase): @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, _): super(MongodbRestoreTests, self).setUp() self.patch_ope = patch('os.path.expanduser') self.mock_ope = self.patch_ope.start() self.addCleanup(self.patch_ope.stop) self.restore_runner = utils.import_class( RESTORE_MONGODUMP_CLS)('swift', location='http://some.where', checksum='True_checksum', restore_location='/var/lib/somewhere') def tearDown(self): super(MongodbRestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.pre_restore = mock.Mock() self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_pre_restore(self): self.restore_runner.post_restore = mock.Mock() self.restore_runner.pre_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner._run_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) def test_restore_failed_due_to_run_restore(self): self.restore_runner.pre_restore = mock.Mock() self.restore_runner._run_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class RedisBackupTests(trove_testtools.TestCase): def setUp(self): super(RedisBackupTests, self).setUp() self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout', return_value=('0', '')) self.exec_timeout_patch.start() self.addCleanup(self.exec_timeout_patch.stop) self.conf_man_patch = patch.object( configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) self.conf_man_patch.start() self.addCleanup(self.conf_man_patch.stop) self.backup_runner = utils.import_class(BACKUP_REDIS_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) self.backup_runner_mocks = self.backup_runner_patch.start() self.addCleanup(self.backup_runner_patch.stop) def tearDown(self): super(RedisBackupTests, self).tearDown() def test_backup_success(self): with self.backup_runner(12345): pass self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() self.backup_runner_mocks['_run'].assert_called_once_with() self.backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): self.backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test') ) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() self.backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual( 0, self.backup_runner_mocks['_run_post_backup'].call_count) class RedisRestoreTests(trove_testtools.TestCase): def setUp(self): super(RedisRestoreTests, self).setUp() self.conf_man_patch = patch.object( configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) self.conf_man_patch.start() self.addCleanup(self.conf_man_patch.stop) self.os_patch = patch.multiple(operating_system, chown=DEFAULT, create_directory=DEFAULT) self.os_patch.start() self.addCleanup(self.os_patch.stop) self.restore_runner = utils.import_class( RESTORE_REDIS_CLS)('swift', location='http://some.where', checksum='True_checksum', restore_location='/var/lib/somewhere') self.restore_runner_patch = patch.multiple( self.restore_runner, _run_restore=DEFAULT, pre_restore=DEFAULT, post_restore=DEFAULT) self.restore_runner_mocks = self.restore_runner_patch.start() self.expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=self.expected_content_length) self.addCleanup(self.restore_runner_patch.stop) def tearDown(self): super(RedisRestoreTests, self).tearDown() def test_restore_success(self): actual_content_length = self.restore_runner.restore() self.assertEqual( self.expected_content_length, actual_content_length) def test_restore_failed_due_to_pre_restore(self): self.restore_runner_mocks['pre_restore'].side_effect = ( exception.ProcessExecutionError('Error')) self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) def test_restore_failed_due_to_run_restore(self): self.restore_runner._run_restore.side_effect = ( exception.ProcessExecutionError('Error')) self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class DB2BackupTests(trove_testtools.TestCase): def setUp(self): super(DB2BackupTests, self).setUp() self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout') self.exec_timeout_patch.start() self.backup_runner = utils.import_class(BACKUP_DB2_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) def tearDown(self): super(DB2BackupTests, self).tearDown() self.backup_runner_patch.stop() self.exec_timeout_patch.stop() def test_backup_success(self): backup_runner_mocks = self.backup_runner_patch.start() with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): backup_runner_mocks = self.backup_runner_patch.start() backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test')) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual(0, backup_runner_mocks['_run_post_backup'].call_count) class DB2RestoreTests(trove_testtools.TestCase): def setUp(self): super(DB2RestoreTests, self).setUp() self.restore_runner = utils.import_class( RESTORE_DB2_CLS)('swift', location='http://some.where', checksum='True_checksum', restore_location='/var/lib/somewhere') def tearDown(self): super(DB2RestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_run_restore(self): self.restore_runner._run_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class CouchDBBackupTests(trove_testtools.TestCase): def setUp(self): super(CouchDBBackupTests, self).setUp() self.backup_runner = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) def tearDown(self): super(CouchDBBackupTests, self).tearDown() self.backup_runner_patch.stop() def test_backup_success(self): backup_runner_mocks = self.backup_runner_patch.start() with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): backup_runner_mocks = self.backup_runner_patch.start() backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test') ) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual(0, backup_runner_mocks['_run_post_backup'].call_count) class CouchDBRestoreTests(trove_testtools.TestCase): def setUp(self): super(CouchDBRestoreTests, self).setUp() self.restore_runner = utils.import_class( RESTORE_COUCHDB_BACKUP_CLS)( 'swift', location='http://some.where', checksum='True_checksum', restore_location='/tmp/somewhere') def tearDown(self): super(CouchDBRestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.pre_restore = mock.Mock() self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_run_restore(self): self.restore_runner.pre_restore = mock.Mock() self.restore_runner._run_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) trove-5.0.0/trove/tests/unittests/guestagent/test_galera_cluster_api.py0000664000567000056710000001303512701410316027760 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import Timeout import mock import trove.common.context as context from trove.common import exception from trove.common.rpc.version import RPC_API_VERSION from trove.common.strategies.cluster.experimental.galera_common.guestagent \ import GaleraCommonGuestAgentStrategy from trove import rpc from trove.tests.unittests import trove_testtools def _mock_call(cmd, timeout, version=None, user=None, public_keys=None, members=None): # To check get_public_keys, authorize_public_keys, # install_cluster, cluster_complete in cmd. if cmd in ('get_public_keys', 'authorize_public_keys', 'install_cluster', 'cluster_complete'): return True else: raise BaseException("Test Failed") class ApiTest(trove_testtools.TestCase): @mock.patch.object(rpc, 'get_client') def setUp(self, *args): super(ApiTest, self).setUp() cluster_guest_api = (GaleraCommonGuestAgentStrategy() .guest_client_class) self.context = context.TroveContext() self.guest = cluster_guest_api(self.context, 0) self.guest._call = _mock_call self.api = cluster_guest_api(self.context, "instance-id-x23d2d") self._mock_rpc_client() def test_get_routing_key(self): self.assertEqual('guestagent.instance-id-x23d2d', self.api._get_routing_key()) @mock.patch('trove.guestagent.api.LOG') def test_api_cast_exception(self, mock_logging): self.call_context.cast.side_effect = IOError('host down') self.assertRaises(exception.GuestError, self.api.create_user, 'test_user') @mock.patch('trove.guestagent.api.LOG') def test_api_call_exception(self, mock_logging): self.call_context.call.side_effect = IOError('host_down') self.assertRaises(exception.GuestError, self.api.list_users) def test_api_call_timeout(self): self.call_context.call.side_effect = Timeout() self.assertRaises(exception.GuestTimeout, self.api.restart) def _verify_rpc_prepare_before_call(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION, timeout=mock.ANY) def _verify_rpc_prepare_before_cast(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION) def _verify_cast(self, *args, **kwargs): self.call_context.cast.assert_called_once_with(self.context, *args, **kwargs) def _verify_call(self, *args, **kwargs): self.call_context.call.assert_called_once_with(self.context, *args, **kwargs) def _mock_rpc_client(self): self.call_context = mock.Mock() self.api.client.prepare = mock.Mock(return_value=self.call_context) self.call_context.call = mock.Mock() self.call_context.cast = mock.Mock() def test_install_cluster(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.install_cluster( replication_user="repuser", cluster_configuration="cluster-configuration", bootstrap=False) self._verify_rpc_prepare_before_call() self._verify_call('install_cluster', replication_user="repuser", cluster_configuration="cluster-configuration", bootstrap=False) self.assertEqual(exp_resp, resp) def test_reset_admin_password(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.reset_admin_password( admin_password="admin_password") self._verify_rpc_prepare_before_call() self._verify_call('reset_admin_password', admin_password="admin_password") self.assertEqual(exp_resp, resp) def test_cluster_complete(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.cluster_complete() self._verify_rpc_prepare_before_call() self._verify_call('cluster_complete') self.assertEqual(exp_resp, resp) def test_get_cluster_context(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.get_cluster_context() self._verify_rpc_prepare_before_call() self._verify_call('get_cluster_context') self.assertEqual(exp_resp, resp) def test_write_cluster_configuration_overrides(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.write_cluster_configuration_overrides( cluster_configuration="cluster-configuration") self._verify_rpc_prepare_before_call() self._verify_call('write_cluster_configuration_overrides', cluster_configuration="cluster-configuration",) self.assertEqual(exp_resp, resp) trove-5.0.0/trove/tests/unittests/guestagent/test_mariadb_manager.py0000664000567000056710000000527312701410316027231 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from mock import patch import testtools from trove.common.context import TroveContext from trove.guestagent.datastore.experimental.mariadb import ( manager as mariadb_manager) from trove.guestagent.datastore.experimental.mariadb import ( service as mariadb_service) from trove.guestagent.datastore.mysql_common import service as mysql_service class GuestAgentManagerTest(testtools.TestCase): def setUp(self): super(GuestAgentManagerTest, self).setUp() self.manager = mariadb_manager.Manager() self.context = TroveContext() patcher_rs = patch( 'trove.guestagent.strategies.replication.get_instance') patcher_rs.start() self.addCleanup(patcher_rs.stop) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(mariadb_service.MariaDBApp, 'install_cluster', new_callable=MagicMock) def test_install_cluster(self, install_cluster, app_status_get): install_cluster.return_value = MagicMock() app_status_get.return_value = None replication_user = "repuser" configuration = "configuration" bootstrap = True self.manager.install_cluster(self.context, replication_user, configuration, bootstrap) app_status_get.assert_any_call() install_cluster.assert_called_with( replication_user, configuration, bootstrap) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(mariadb_service.MariaDBApp, 'reset_admin_password', new_callable=MagicMock) def test_reset_admin_password(self, reset_admin_password, app_status_get): reset_admin_password.return_value = None app_status_get.return_value = MagicMock() admin_password = "password" self.manager.reset_admin_password(self.context, admin_password) app_status_get.assert_any_call() reset_admin_password.assert_called_with(admin_password) trove-5.0.0/trove/tests/unittests/guestagent/test_galera_manager.py0000664000567000056710000001207612701410316027064 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from mock import patch from trove.common.context import TroveContext from trove.guestagent.datastore.galera_common import manager as galera_manager from trove.guestagent.datastore.galera_common import service as galera_service from trove.guestagent.datastore.mysql_common import service as mysql_service from trove.tests.unittests import trove_testtools class GaleraTestApp(galera_service.GaleraApp): def __init__(self, status): super(GaleraTestApp, self).__init__( status, mysql_service.BaseLocalSqlClient, mysql_service.BaseKeepAliveConnection) @property def cluster_configuration(self): return self.configuration_manager.get_value('mysqld') class GaleraTestRootAccess(mysql_service.BaseMySqlRootAccess): def __init__(self): super(GaleraTestRootAccess, self).__init__( mysql_service.BaseLocalSqlClient, GaleraTestApp(mysql_service.BaseMySqlAppStatus.get())) class GaleraTestAdmin(mysql_service.BaseMySqlAdmin): def __init__(self): super(GaleraTestAdmin, self).__init__( mysql_service.BaseLocalSqlClient, GaleraTestRootAccess(), GaleraTestApp) class GuestAgentManagerTest(trove_testtools.TestCase): def setUp(self): super(GuestAgentManagerTest, self).setUp() self.manager = galera_manager.GaleraManager( GaleraTestApp, mysql_service.BaseMySqlAppStatus, GaleraTestAdmin) self.context = TroveContext() patcher_rs = patch( 'trove.guestagent.strategies.replication.get_instance') patcher_rs.start() self.addCleanup(patcher_rs.stop) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(galera_service.GaleraApp, 'install_cluster', new_callable=MagicMock) def test_install_cluster(self, install_cluster, app_status_get): install_cluster.return_value = MagicMock() app_status_get.return_value = None replication_user = "repuser" configuration = "configuration" bootstrap = True self.manager.install_cluster(self.context, replication_user, configuration, bootstrap) app_status_get.assert_any_call() install_cluster.assert_called_with( replication_user, configuration, bootstrap) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(galera_service.GaleraApp, 'reset_admin_password', new_callable=MagicMock) def test_reset_admin_password(self, reset_admin_password, app_status_get): reset_admin_password.return_value = None app_status_get.return_value = MagicMock() admin_password = "password" self.manager.reset_admin_password(self.context, admin_password) app_status_get.assert_any_call() reset_admin_password.assert_called_with(admin_password) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(galera_service.GaleraApp, 'get_cluster_context') def test_get_cluster_context(self, get_cluster_ctxt, app_status_get): get_cluster_ctxt.return_value = {'cluster': 'info'} self.manager.get_cluster_context(self.context) app_status_get.assert_any_call() get_cluster_ctxt.assert_any_call() @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(galera_service.GaleraApp, 'write_cluster_configuration_overrides') def test_write_cluster_configuration_overrides(self, conf_overries, app_status_get): cluster_configuration = "cluster_configuration" self.manager.write_cluster_configuration_overrides( self.context, cluster_configuration) app_status_get.assert_any_call() conf_overries.assert_called_with(cluster_configuration) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(mysql_service.BaseMySqlAdmin, 'enable_root') def test_enable_root_with_password(self, reset_admin_pwd, app_status_get): admin_password = "password" self.manager.enable_root_with_password(self.context, admin_password) reset_admin_pwd.assert_called_with(admin_password) trove-5.0.0/trove/tests/unittests/guestagent/test_vertica_manager.py0000664000567000056710000004256412701410316027273 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import DEFAULT from mock import MagicMock from mock import patch from os import path from testtools.matchers import Is from trove.common.exception import DatastoreOperationNotSupported from trove.common import instance as rd_instance from trove.guestagent.common import configuration from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.vertica.manager import Manager from trove.guestagent.datastore.experimental.vertica.service import ( VerticaAppStatus) from trove.guestagent.datastore.experimental.vertica.service import VerticaApp from trove.guestagent.datastore.experimental.vertica import system from trove.guestagent import dbaas from trove.guestagent import volume from trove.guestagent.volume import VolumeDevice from trove.tests.unittests import trove_testtools class GuestAgentManagerTest(trove_testtools.TestCase): @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def setUp(self, *args, **kwargs): super(GuestAgentManagerTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.manager = Manager() self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_unmount = volume.VolumeDevice.unmount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_set_read = volume.VolumeDevice.set_readahead_size self.origin_install_vertica = VerticaApp.install_vertica self.origin_create_db = VerticaApp.create_db self.origin_stop_db = VerticaApp.stop_db self.origin_start_db = VerticaApp.start_db self.origin_restart = VerticaApp.restart self.origin_install_if = VerticaApp.install_if_needed self.origin_enable_root = VerticaApp.enable_root self.origin_is_root_enabled = VerticaApp.is_root_enabled self.origin_prepare_for_install_vertica = ( VerticaApp.prepare_for_install_vertica) self.origin_add_udls = VerticaApp.add_udls def tearDown(self): super(GuestAgentManagerTest, self).tearDown() volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.migrate_data = self.origin_migrate_data volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.unmount = self.origin_unmount volume.VolumeDevice.mount_points = self.origin_mount_points volume.VolumeDevice.set_readahead_size = self.origin_set_read VerticaApp.create_db = self.origin_create_db VerticaApp.install_vertica = self.origin_install_vertica VerticaApp.stop_db = self.origin_stop_db VerticaApp.start_db = self.origin_start_db VerticaApp.restart = self.origin_restart VerticaApp.install_if_needed = self.origin_install_if VerticaApp.enable_root = self.origin_enable_root VerticaApp.is_root_enabled = self.origin_is_root_enabled VerticaApp.prepare_for_install_vertica = ( self.origin_prepare_for_install_vertica) VerticaApp.add_udls = self.origin_add_udls def test_update_status(self): mock_status = MagicMock() self.manager.appStatus = mock_status self.manager.update_status(self.context) mock_status.update.assert_any_call() @patch.object(path, 'exists', MagicMock()) @patch.object(configuration.ConfigurationManager, 'save_configuration') def _prepare_dynamic(self, packages, config_content='MockContent', device_path='/dev/vdb', backup_id=None, overrides=None, is_mounted=False): # covering all outcomes is starting to cause trouble here expected_vol_count = 1 if device_path else 0 if not backup_id: backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } mock_status = MagicMock() self.manager.appStatus = mock_status mock_status.begin_install = MagicMock(return_value=None) volume.VolumeDevice.format = MagicMock(return_value=None) volume.VolumeDevice.migrate_data = MagicMock(return_value=None) volume.VolumeDevice.mount = MagicMock(return_value=None) mount_points = [] if is_mounted: mount_points = ['/mnt'] VolumeDevice.mount_points = MagicMock(return_value=mount_points) VolumeDevice.unmount = MagicMock(return_value=None) VerticaApp.install_if_needed = MagicMock(return_value=None) VerticaApp.install_vertica = MagicMock(return_value=None) VerticaApp.create_db = MagicMock(return_value=None) VerticaApp.prepare_for_install_vertica = MagicMock(return_value=None) VerticaApp.add_udls = MagicMock() # invocation self.manager.prepare(context=self.context, packages=packages, config_contents=config_content, databases=None, memory_mb='2048', users=None, device_path=device_path, mount_point="/var/lib/vertica", backup_info=backup_info, overrides=None, cluster_config=None) self.assertEqual(expected_vol_count, VolumeDevice.format.call_count) self.assertEqual(expected_vol_count, VolumeDevice.migrate_data.call_count) self.assertEqual(expected_vol_count, VolumeDevice.mount_points.call_count) if is_mounted: self.assertEqual(1, VolumeDevice.unmount.call_count) else: self.assertEqual(0, VolumeDevice.unmount.call_count) VerticaApp.install_if_needed.assert_any_call(packages) VerticaApp.prepare_for_install_vertica.assert_any_call() VerticaApp.install_vertica.assert_any_call() VerticaApp.create_db.assert_any_call() VerticaApp.add_udls.assert_any_call() def test_prepare_pkg(self): self._prepare_dynamic(['vertica']) def test_prepare_no_pkg(self): self._prepare_dynamic([]) def test_restart(self): mock_status = MagicMock() self.manager.appStatus = mock_status VerticaApp.restart = MagicMock(return_value=None) # invocation self.manager.restart(self.context) # verification/assertion VerticaApp.restart.assert_any_call() def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status VerticaApp.stop_db = MagicMock(return_value=None) # invocation self.manager.stop_db(self.context) # verification/assertion VerticaApp.stop_db.assert_any_call(do_not_start_on_reboot=False) @patch.object(VerticaApp, 'install_vertica') @patch.object(VerticaApp, '_export_conf_to_members') @patch.object(VerticaApp, 'create_db') @patch.object(VerticaApp, 'add_udls') def test_install_cluster(self, mock_udls, mock_install, mock_export, mock_create_db): members = ['test1', 'test2'] self.manager.install_cluster(self.context, members) mock_install.assert_called_with('test1,test2') mock_export.assert_called_with(members) mock_create_db.assert_called_with('test1,test2') mock_udls.assert_any_call() @patch.object(VerticaAppStatus, 'set_status') @patch.object(VerticaApp, 'install_cluster', side_effect=RuntimeError("Boom!")) @patch('trove.guestagent.datastore.experimental.vertica.manager.LOG') def test_install_cluster_failure(self, mock_logging, mock_install, mock_set_status): members = ["test1", "test2"] self.assertRaises(RuntimeError, self.manager.install_cluster, self.context, members) mock_set_status.assert_called_with(rd_instance.ServiceStatuses.FAILED) @patch.object(VerticaApp, '_get_database_password') @patch.object(path, 'isfile') @patch.object(system, 'exec_vsql_command') def test_add_udls(self, mock_vsql, mock_isfile, mock_pwd): mock_vsql.return_value = (None, None) password = 'password' mock_pwd.return_value = password mock_isfile.return_value = True self.manager.app.add_udls() mock_vsql.assert_any_call( password, "CREATE LIBRARY curllib AS " "'/opt/vertica/sdk/examples/build/cURLLib.so'" ) mock_vsql.assert_any_call( password, "CREATE SOURCE curl AS LANGUAGE 'C++' NAME 'CurlSourceFactory' " "LIBRARY curllib" ) @patch.object(volume.VolumeDevice, 'mount_points', return_value=[]) @patch.object(volume.VolumeDevice, 'unmount_device', return_value=None) @patch.object(volume.VolumeDevice, 'mount', return_value=None) @patch.object(volume.VolumeDevice, 'migrate_data', return_value=None) @patch.object(volume.VolumeDevice, 'format', return_value=None) @patch.object(VerticaApp, 'prepare_for_install_vertica') @patch.object(VerticaApp, 'install_if_needed') @patch.object(VerticaApp, 'add_udls') @patch.object(VerticaAppStatus, 'begin_install') def _prepare_method(self, instance_id, instance_type, *args): cluster_config = {"id": instance_id, "instance_type": instance_type} # invocation self.manager.prepare(context=self.context, databases=None, packages=['vertica'], memory_mb='2048', users=None, mount_point='/var/lib/vertica', overrides=None, cluster_config=cluster_config) @patch.object(VerticaAppStatus, 'set_status') def test_prepare_member(self, mock_set_status): self._prepare_method("test-instance-3", "member") mock_set_status.assert_called_with( rd_instance.ServiceStatuses.INSTANCE_READY, force=True) def test_rpc_ping(self): output = self.manager.rpc_ping(self.context) self.assertTrue(output) @patch.object(VerticaAppStatus, 'set_status') @patch('trove.guestagent.datastore.manager.LOG') def test_prepare_invalid_cluster_config(self, mock_logging, mock_set_status): self.assertRaises(RuntimeError, self._prepare_method, "test-instance-3", "query_router") mock_set_status.assert_called_with( rd_instance.ServiceStatuses.FAILED, force=True) def test_get_filesystem_stats(self): with patch.object(dbaas, 'get_filesystem_volume_stats'): self.manager.get_filesystem_stats(self.context, '/var/lib/vertica') dbaas.get_filesystem_volume_stats.assert_any_call( '/var/lib/vertica') def test_mount_volume(self): with patch.object(volume.VolumeDevice, 'mount', return_value=None): self.manager.mount_volume(self.context, device_path='/dev/vdb', mount_point='/var/lib/vertica') test_mount = volume.VolumeDevice.mount.call_args_list[0] test_mount.assert_called_with('/var/lib/vertica', False) def test_unmount_volume(self): with patch.object(volume.VolumeDevice, 'unmount', return_value=None): self.manager.unmount_volume(self.context, device_path='/dev/vdb') test_unmount = volume.VolumeDevice.unmount.call_args_list[0] test_unmount.assert_called_with('/var/lib/vertica') def test_resize_fs(self): with patch.object(volume.VolumeDevice, 'resize_fs', return_value=None): self.manager.resize_fs(self.context, device_path='/dev/vdb') test_resize_fs = volume.VolumeDevice.resize_fs.call_args_list[0] test_resize_fs.assert_called_with('/var/lib/vertica') @patch.object(operating_system, 'write_file') def test_cluster_complete(self, mock_write_file): mock_set_status = MagicMock() self.manager.appStatus.set_status = mock_set_status self.manager.appStatus._get_actual_db_status = MagicMock( return_value=rd_instance.ServiceStatuses.RUNNING) self.manager.cluster_complete(self.context) mock_set_status.assert_called_with( rd_instance.ServiceStatuses.RUNNING, force=True) def test_get_public_keys(self): with patch.object(VerticaApp, 'get_public_keys', return_value='some_key'): test_key = self.manager.get_public_keys(self.context, 'test_user') self.assertEqual('some_key', test_key) def test_authorize_public_keys(self): with patch.object(VerticaApp, 'authorize_public_keys', return_value=None): self.manager.authorize_public_keys(self.context, 'test_user', 'some_key') VerticaApp.authorize_public_keys.assert_any_call( 'test_user', 'some_key') def test_start_db_with_conf_changes(self): with patch.object(VerticaApp, 'start_db_with_conf_changes'): self.manager.start_db_with_conf_changes(self.context, 'something') VerticaApp.start_db_with_conf_changes.assert_any_call('something') def test_change_passwords(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.change_passwords, self.context, None) def test_update_attributes(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.update_attributes, self.context, 'test_user', '%', {'name': 'new_user'}) def test_create_database(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.create_database, self.context, [{'name': 'test_db'}]) def test_create_user(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.create_user, self.context, [{'name': 'test_user'}]) def test_delete_database(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.delete_database, self.context, [{'name': 'test_db'}]) def test_delete_user(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.delete_user, self.context, [{'name': 'test_user'}]) def test_get_user(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.get_user, self.context, 'test_user', '%') def test_grant_access(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.grant_access, self.context, 'test_user', '%', [{'name': 'test_db'}] ) def test_revoke_access(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.revoke_access, self.context, 'test_user', '%', [{'name': 'test_db'}] ) def test_list_access(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.list_access, self.context, 'test_user', '%') def test_list_databases(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.list_databases, self.context) def test_list_users(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.list_users, self.context) def test_enable_root(self): VerticaApp.enable_root = MagicMock(return_value='user_id_stuff') user_id = self.manager.enable_root_with_password(self.context) self.assertThat(user_id, Is('user_id_stuff')) def test_is_root_enabled(self): VerticaApp.is_root_enabled = MagicMock(return_value=True) is_enabled = self.manager.is_root_enabled(self.context) self.assertThat(is_enabled, Is(True)) def test_create_backup(self): self.assertRaises(DatastoreOperationNotSupported, self.manager.create_backup, self.context, {}) trove-5.0.0/trove/tests/unittests/guestagent/test_operating_system.py0000664000567000056710000012644412701410316027540 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import stat import tempfile from mock import call, patch from oslo_concurrency.processutils import UnknownArgumentError import six from testtools import ExpectedException from trove.common import exception from trove.common.stream_codecs import ( Base64Codec, IdentityCodec, IniCodec, JsonCodec, KeyValueCodec, PropertiesCodec, YamlCodec) from trove.common import utils from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.tests.unittests import trove_testtools class TestOperatingSystem(trove_testtools.TestCase): def test_base64_codec(self): data = "Line 1\nLine 2\n" self._test_file_codec(data, Base64Codec()) data = "TGluZSAxCkxpbmUgMgo=" self._test_file_codec(data, Base64Codec(), reverse_encoding=True) data = "5Am9+y0wTwqUx39sMMBg3611FWg=" self._test_file_codec(data, Base64Codec(), reverse_encoding=True) def test_identity_file_codec(self): data = ("Lorem Ipsum, Lorem Ipsum\n" "Lorem Ipsum, Lorem Ipsum\n" "Lorem Ipsum, Lorem Ipsum\n") self._test_file_codec(data, IdentityCodec()) def test_ini_file_codec(self): data_no_none = {"Section1": {"s1k1": 's1v1', "s1k2": '3.1415926535'}, "Section2": {"s2k1": '1', "s2k2": 'True'}} self._test_file_codec(data_no_none, IniCodec()) data_with_none = {"Section1": {"s1k1": 's1v1', "s1k2": '3.1415926535'}, "Section2": {"s2k1": '1', "s2k2": 'True', "s2k3": None}} # Keys with None values will be written without value. self._test_file_codec(data_with_none, IniCodec()) # Non-string values will be converted to strings. data_with_none_as_objects = {"Section1": {"s1k1": 's1v1', "s1k2": 3.1415926535}, "Section2": {"s2k1": 1, "s2k2": True, "s2k3": None}} self._test_file_codec(data_with_none_as_objects, IniCodec(), expected_data=data_with_none) # None will be replaced with 'default_value'. default_value = '1' expected_data = guestagent_utils.update_dict( {"Section2": {"s2k3": default_value}}, dict(data_with_none)) self._test_file_codec(data_with_none, IniCodec(default_value=default_value), expected_data=expected_data) def test_yaml_file_codec(self): data = {"Section1": 's1v1', "Section2": {"s2k1": '1', "s2k2": 'True'}, "Section3": {"Section4": {"s4k1": '3.1415926535', "s4k2": None}} } self._test_file_codec(data, YamlCodec()) self._test_file_codec(data, YamlCodec(default_flow_style=True)) def test_properties_file_codec(self): data = {'key1': [1, "str1", '127.0.0.1', 3.1415926535, True, None], 'key2': [2.0, 3, 0, "str1 str2"], 'key3': ['str1', 'str2'], 'key4': [], 'key5': 5000, 'key6': 'str1', 'key7': 0, 'key8': None, 'key9': [['str1', 'str2'], ['str3', 'str4']], 'key10': [['str1', 'str2', 'str3'], ['str3', 'str4'], 'str5'], 'key11': True } self._test_file_codec(data, PropertiesCodec()) self._test_file_codec(data, PropertiesCodec( string_mappings={'yes': True, 'no': False, "''": None})) def test_key_value_file_codec(self): data = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} self._test_file_codec(data, KeyValueCodec()) def test_json_file_codec(self): data = {"Section1": 's1v1', "Section2": {"s2k1": '1', "s2k2": 'True'}, "Section3": {"Section4": {"s4k1": '3.1415926535', "s4k2": None}} } self._test_file_codec(data, JsonCodec()) def _test_file_codec(self, data, read_codec, write_codec=None, expected_data=None, expected_exception=None, reverse_encoding=False): write_codec = write_codec or read_codec with tempfile.NamedTemporaryFile() as test_file: encode = True decode = True if reverse_encoding: encode = False decode = False if expected_exception: with expected_exception: operating_system.write_file(test_file.name, data, codec=write_codec, encode=encode) operating_system.read_file(test_file.name, codec=read_codec, decode=decode) else: operating_system.write_file(test_file.name, data, codec=write_codec, encode=encode) read = operating_system.read_file(test_file.name, codec=read_codec, decode=decode) if expected_data is not None: self.assertEqual(expected_data, read) else: self.assertEqual(data, read) def test_read_write_file_input_validation(self): with ExpectedException(exception.UnprocessableEntity, "File does not exist: None"): operating_system.read_file(None) with ExpectedException(exception.UnprocessableEntity, "File does not exist: /__DOES_NOT_EXIST__"): operating_system.read_file('/__DOES_NOT_EXIST__') with ExpectedException(exception.UnprocessableEntity, "Invalid path: None"): operating_system.write_file(None, {}) @patch.object(operating_system, 'copy') def test_write_file_as_root(self, copy_mock): target_file = tempfile.NamedTemporaryFile() temp_file = tempfile.NamedTemporaryFile() with patch('tempfile.NamedTemporaryFile', return_value=temp_file): operating_system.write_file( target_file.name, "Lorem Ipsum", as_root=True) copy_mock.assert_called_once_with( temp_file.name, target_file.name, force=True, as_root=True) self.assertFalse(os.path.exists(temp_file.name)) @patch.object(operating_system, 'copy', side_effect=Exception("Error while executing 'copy'.")) def test_write_file_as_root_with_error(self, copy_mock): target_file = tempfile.NamedTemporaryFile() temp_file = tempfile.NamedTemporaryFile() with patch('tempfile.NamedTemporaryFile', return_value=temp_file): with ExpectedException(Exception, "Error while executing 'copy'."): operating_system.write_file(target_file.name, "Lorem Ipsum", as_root=True) self.assertFalse(os.path.exists(temp_file.name)) def test_start_service(self): self._assert_service_call(operating_system.start_service, 'cmd_start') def test_stop_service(self): self._assert_service_call(operating_system.stop_service, 'cmd_stop') def test_enable_service_on_boot(self): self._assert_service_call(operating_system.enable_service_on_boot, 'cmd_enable') def test_disable_service_on_boot(self): self._assert_service_call(operating_system.disable_service_on_boot, 'cmd_disable') @patch.object(operating_system, '_execute_service_command') def _assert_service_call(self, fun, expected_cmd_key, exec_service_cmd_mock): test_candidate_names = ['test_service_1', 'test_service_2'] fun(test_candidate_names) exec_service_cmd_mock.assert_called_once_with(test_candidate_names, expected_cmd_key) @patch.object(operating_system, 'service_discovery', return_value={'cmd_start': 'start', 'cmd_stop': 'stop', 'cmd_enable': 'enable', 'cmd_disable': 'disable'}) def test_execute_service_command(self, discovery_mock): test_service_candidates = ['service_name'] self._assert_execute_call([['start']], [{'shell': True}], operating_system._execute_service_command, None, test_service_candidates, 'cmd_start') discovery_mock.assert_called_once_with(test_service_candidates) with ExpectedException(exception.UnprocessableEntity, "Candidate service names not specified."): operating_system._execute_service_command([], 'cmd_disable') with ExpectedException(exception.UnprocessableEntity, "Candidate service names not specified."): operating_system._execute_service_command(None, 'cmd_start') with ExpectedException(RuntimeError, "Service control command not " "available: unknown"): operating_system._execute_service_command(test_service_candidates, 'unknown') def test_modes(self): self._assert_modes(None, None, None, operating_system.FileMode()) self._assert_modes(None, None, None, operating_system.FileMode([], [], [])) self._assert_modes(0o770, 0o4, 0o3, operating_system.FileMode( [stat.S_IRWXU, stat.S_IRWXG], [stat.S_IROTH], [stat.S_IWOTH | stat.S_IXOTH]) ) self._assert_modes(0o777, None, None, operating_system.FileMode( [stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) ) self._assert_modes(0o777, None, None, operating_system.FileMode( reset=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) ) self._assert_modes(None, 0o777, None, operating_system.FileMode( add=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) ) self._assert_modes(None, None, 0o777, operating_system.FileMode( remove=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) ) self.assertEqual( operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR]), operating_system.FileMode(add=[stat.S_IWUSR, stat.S_IRUSR])) self.assertEqual( hash(operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR])), hash(operating_system.FileMode(add=[stat.S_IWUSR, stat.S_IRUSR]))) self.assertNotEqual( operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR]), operating_system.FileMode(reset=[stat.S_IRUSR, stat.S_IWUSR])) self.assertNotEqual( hash(operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR])), hash(operating_system.FileMode(reset=[stat.S_IRUSR, stat.S_IWUSR])) ) def _assert_modes(self, ex_reset, ex_add, ex_remove, actual): self.assertEqual(bool(ex_reset or ex_add or ex_remove), actual.has_any()) self.assertEqual(ex_reset, actual.get_reset_mode()) self.assertEqual(ex_add, actual.get_add_mode()) self.assertEqual(ex_remove, actual.get_remove_mode()) def test_chmod(self): self._assert_execute_call( [['chmod', '-R', '=064', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.SET_GRP_RW_OTH_R, as_root=True) self._assert_execute_call( [['chmod', '-R', '+444', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.ADD_READ_ALL, as_root=True) self._assert_execute_call( [['chmod', '-R', '+060', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.ADD_GRP_RW, as_root=True) self._assert_execute_call( [['chmod', '-R', '=777', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.SET_FULL, as_root=True) self._assert_execute_call( [['chmod', '-f', '=777', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.SET_FULL, as_root=True, recursive=False, force=True) self._assert_execute_call( [['chmod', '-R', '=777', 'path']], [{'timeout': 100}], operating_system.chmod, None, 'path', FileMode.SET_FULL, timeout=100) self._assert_execute_call( [['chmod', '-R', '=777', 'path']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.chmod, None, 'path', FileMode.SET_FULL, as_root=True, timeout=None) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(exception.UnprocessableEntity, "No file mode specified."), 'path', FileMode()) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(exception.UnprocessableEntity, "No file mode specified."), 'path', None) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(exception.UnprocessableEntity, "Cannot change mode of a blank file."), '', FileMode.SET_FULL) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(exception.UnprocessableEntity, "Cannot change mode of a blank file."), None, FileMode.SET_FULL) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'path', FileMode.SET_FULL, _unknown_kw=0) def test_remove(self): self._assert_execute_call( [['rm', '-R', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.remove, None, 'path', as_root=True) self._assert_execute_call( [['rm', '-f', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.remove, None, 'path', recursive=False, force=True, as_root=True) self._assert_execute_call( [['rm', '-R', 'path']], [{'timeout': 100}], operating_system.remove, None, 'path', timeout=100) self._assert_execute_call( [['rm', '-R', 'path']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.remove, None, 'path', timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.remove, ExpectedException(exception.UnprocessableEntity, "Cannot remove a blank file."), '') self._assert_execute_call( None, None, operating_system.remove, ExpectedException(exception.UnprocessableEntity, "Cannot remove a blank file."), None) self._assert_execute_call( None, None, operating_system.remove, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'path', _unknown_kw=0) def test_move(self): self._assert_execute_call( [['mv', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.move, None, 'source', 'destination', as_root=True) self._assert_execute_call( [['mv', '-f', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.move, None, 'source', 'destination', force=True, as_root=True) self._assert_execute_call( [['mv', 'source', 'destination']], [{'timeout': 100}], operating_system.move, None, 'source', 'destination', timeout=100) self._assert_execute_call( [['mv', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.move, None, 'source', 'destination', timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing source path."), '', 'destination') self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing source path."), None, 'destination') self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing destination path."), 'source', '') self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing destination path."), 'source', None) self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing source path."), '', '') self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing source path."), None, None) self._assert_execute_call( None, None, operating_system.move, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'source', 'destination', _unknown_kw=0) def test_copy(self): self._assert_execute_call( [['cp', '-R', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.copy, None, 'source', 'destination', as_root=True) self._assert_execute_call( [['cp', '-f', '-p', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.copy, None, 'source', 'destination', force=True, preserve=True, recursive=False, as_root=True) self._assert_execute_call( [['cp', '-R', 'source', 'destination']], [{'timeout': 100}], operating_system.copy, None, 'source', 'destination', timeout=100) self._assert_execute_call( [['cp', '-R', 'source', 'destination']], [{'run_as_root': True, 'root_helper': "sudo", 'timeout': None}], operating_system.copy, None, 'source', 'destination', timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing source path."), '', 'destination') self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing source path."), None, 'destination') self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing destination path."), 'source', '') self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing destination path."), 'source', None) self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing source path."), '', '') self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing source path."), None, None) self._assert_execute_call( None, None, operating_system.copy, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'source', 'destination', _unknown_kw=0) def test_chown(self): self._assert_execute_call( [['chown', '-R', 'usr:grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', 'grp', as_root=True) self._assert_execute_call( [['chown', 'usr:grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', 'grp', recursive=False, as_root=True) self._assert_execute_call( [['chown', '-f', '-R', 'usr:grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', 'grp', force=True, as_root=True) self._assert_execute_call( [['chown', '-R', ':grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', '', 'grp', as_root=True) self._assert_execute_call( [['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', '', as_root=True) self._assert_execute_call( [['chown', '-R', ':grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', None, 'grp', as_root=True) self._assert_execute_call( [['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', None, as_root=True) self._assert_execute_call( [['chown', '-R', 'usr:', 'path']], [{'timeout': 100}], operating_system.chown, None, 'path', 'usr', None, timeout=100) self._assert_execute_call( [['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.chown, None, 'path', 'usr', None, timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Cannot change ownership of a blank file."), '', 'usr', 'grp') self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Cannot change ownership of a blank file."), None, 'usr', 'grp') self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Please specify owner or group, or both."), 'path', '', '') self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Please specify owner or group, or both."), 'path', None, None) self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Cannot change ownership of a blank file."), None, None, None) self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Cannot change ownership of a blank file."), '', '', '') self._assert_execute_call( None, None, operating_system.chown, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'path', 'usr', None, _unknown_kw=0) def test_change_user_group(self): self._assert_execute_call( [['usermod', '-a', '-G', 'user', 'group']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.change_user_group, None, 'group', 'user', as_root=True) self._assert_execute_call( [['usermod', '-a', '-G', 'user', 'group']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.change_user_group, None, 'group', 'user', append=True, add_group=True, as_root=True) self._assert_execute_call( [['usermod', '-a', '-G', 'user', 'group']], [{'timeout': 100}], operating_system.change_user_group, None, 'group', 'user', timeout=100) self._assert_execute_call( [['usermod', '-a', '-G', 'user', 'group']], [{'run_as_root': True, 'root_helper': "sudo", 'timeout': None}], operating_system.change_user_group, None, 'group', 'user', timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing user."), '', 'group') self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing user."), None, 'group') self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing group."), 'user', '') self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing group."), 'user', None) self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing user."), '', '') self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing user."), None, None) self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'user', 'add_group', _unknown_kw=0) def test_create_directory(self): self._assert_execute_call( [['mkdir', '-p', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', as_root=True) self._assert_execute_call( [['mkdir', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', force=False, as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='usr', group='grp', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', ':grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', group='grp', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='usr', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], [{'timeout': 100}, {'timeout': 100}], operating_system.create_directory, None, 'path', user='usr', timeout=100) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}, {'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.create_directory, None, 'path', user='usr', timeout=None, as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='usr', group='', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', ':grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='', group='grp', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='', group='', as_root=True) self._assert_execute_call( None, None, operating_system.create_directory, ExpectedException(exception.UnprocessableEntity, "Cannot create a blank directory."), '', user='usr', group='grp') self._assert_execute_call( None, None, operating_system.create_directory, ExpectedException(exception.UnprocessableEntity, "Cannot create a blank directory."), None) self._assert_execute_call( None, None, operating_system.create_directory, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'path', _unknown_kw=0) def test_exists(self): self.assertFalse( operating_system.exists(tempfile.gettempdir(), is_directory=False)) self.assertTrue( operating_system.exists(tempfile.gettempdir(), is_directory=True)) with tempfile.NamedTemporaryFile() as test_file: self.assertTrue( operating_system.exists(test_file.name, is_directory=False)) self.assertFalse( operating_system.exists(test_file.name, is_directory=True)) self._assert_execute_call( [['test -f path && echo 1 || echo 0']], [{'shell': True, 'check_exit_code': False, 'run_as_root': True, 'root_helper': 'sudo'}], operating_system.exists, None, 'path', is_directory=False, as_root=True) self._assert_execute_call( [['test -d path && echo 1 || echo 0']], [{'shell': True, 'check_exit_code': False, 'run_as_root': True, 'root_helper': 'sudo'}], operating_system.exists, None, 'path', is_directory=True, as_root=True) def _assert_execute_call(self, exec_args, exec_kwargs, func, return_value, *args, **kwargs): """ Execute a function with given arguments. Assert a return value and appropriate sequence of calls to the 'utils.execute_with_timeout' interface as the result. :param exec_args: Expected arguments to the execute calls. This is a list-of-list where each sub-list represent a single call to 'utils.execute_with_timeout'. :type exec_args: list-of-lists :param exec_kwargs: Expected keywords to the execute call. This is a list-of-dicts where each dict represent a single call to 'utils.execute_with_timeout'. :type exec_kwargs: list-of-dicts :param func: Tested function call. :type func: callable :param return_value: Expected return value or exception from the tested call if any. :type return_value: object :param args: Arguments passed to the tested call. :type args: list :param kwargs: Keywords passed to the tested call. :type kwargs: dict """ with patch.object(utils, 'execute_with_timeout', return_value=('0', '')) as exec_call: if isinstance(return_value, ExpectedException): with return_value: func(*args, **kwargs) else: actual_value = func(*args, **kwargs) if return_value is not None: self.assertEqual(return_value, actual_value, "Return value mismatch.") expected_calls = [] for arg, kw in six.moves.zip(exec_args, exec_kwargs): expected_calls.append(call(*arg, **kw)) self.assertEqual(expected_calls, exec_call.mock_calls, "Mismatch in calls to " "'execute_with_timeout'.") def test_get_os_redhat(self): with patch.object(os.path, 'isfile', side_effect=[True]): find_os = operating_system.get_os() self.assertEqual('redhat', find_os) def test_get_os_suse(self): with patch.object(os.path, 'isfile', side_effect=[False, True]): find_os = operating_system.get_os() self.assertEqual('suse', find_os) def test_get_os_debian(self): with patch.object(os.path, 'isfile', side_effect=[False, False]): find_os = operating_system.get_os() self.assertEqual('debian', find_os) def test_upstart_type_service_discovery(self): with patch.object(os.path, 'isfile', side_effect=[True]): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_sysvinit_type_service_discovery(self): with patch.object(os.path, 'isfile', side_effect=[False, True, True]): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_sysvinit_chkconfig_type_service_discovery(self): with patch.object(os.path, 'isfile', side_effect=[False, True, False, True]): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) @patch.object(os.path, 'islink', return_value=True) @patch.object(os.path, 'realpath') @patch.object(os.path, 'basename') def test_systemd_symlinked_type_service_discovery(self, mock_base, mock_path, mock_islink): with patch.object(os.path, 'isfile', side_effect=[False, False, True]): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_systemd_not_symlinked_type_service_discovery(self): with patch.object(os.path, 'isfile', side_effect=[False, False, True]): with patch.object(os.path, 'islink', return_value=False): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_file_discovery(self): with patch.object(os.path, 'isfile', side_effect=[False, True]): config_file = operating_system.file_discovery( ["/etc/mongodb.conf", "/etc/mongod.conf"]) self.assertEqual('/etc/mongod.conf', config_file) with patch.object(os.path, 'isfile', side_effect=[False]): config_file = operating_system.file_discovery( ["/etc/mongodb.conf"]) self.assertEqual('', config_file) def test_list_files_in_directory(self): root_path = tempfile.mkdtemp() try: all_paths = set() self._create_temp_fs_structure( root_path, 3, 3, ['txt', 'py', ''], 1, all_paths) # All files in the top directory. self._assert_list_files( root_path, False, None, False, all_paths, 9) # All files & directories in the top directory. self._assert_list_files( root_path, False, None, True, all_paths, 10) # All files recursive. self._assert_list_files( root_path, True, None, False, all_paths, 27) # All files & directories recursive. self._assert_list_files( root_path, True, None, True, all_paths, 29) # Only '*.txt' in the top directory. self._assert_list_files( root_path, False, '.*\.txt$', False, all_paths, 3) # Only '*.txt' (including directories) in the top directory. self._assert_list_files( root_path, False, '.*\.txt$', True, all_paths, 3) # Only '*.txt' recursive. self._assert_list_files( root_path, True, '.*\.txt$', True, all_paths, 9) # Only '*.txt' (including directories) recursive. self._assert_list_files( root_path, True, '.*\.txt$', False, all_paths, 9) # Only extension-less files in the top directory. self._assert_list_files( root_path, False, '[^\.]*$', False, all_paths, 3) # Only extension-less files recursive. self._assert_list_files( root_path, True, '[^\.]*$', False, all_paths, 9) # Non-existing extension in the top directory. self._assert_list_files( root_path, False, '.*\.bak$', False, all_paths, 0) # Non-existing extension recursive. self._assert_list_files( root_path, True, '.*\.bak$', False, all_paths, 0) finally: try: os.remove(root_path) except Exception: pass # Do not fail in the cleanup. def _assert_list_files(self, root, recursive, pattern, include_dirs, all_paths, count): found = operating_system.list_files_in_directory( root, recursive=recursive, pattern=pattern, include_dirs=include_dirs) expected = { path for path in filter( lambda item: include_dirs or not os.path.isdir(item), all_paths) if ( (recursive or os.path.dirname(path) == root) and ( not pattern or re.match( pattern, os.path.basename(path))))} self.assertEqual(expected, found) self.assertEqual(count, len(found), "Incorrect number of listed files.") def _create_temp_fs_structure(self, root_path, num_levels, num_files_per_extension, file_extensions, level, created_paths): """Create a structure of temporary directories 'num_levels' deep with temporary files on each level. """ file_paths = self._create_temp_files( root_path, num_files_per_extension, file_extensions) created_paths.update(file_paths) if level < num_levels: path = tempfile.mkdtemp(dir=root_path) created_paths.add(path) self._create_temp_fs_structure( path, num_levels, num_files_per_extension, file_extensions, level + 1, created_paths) def _create_temp_files(self, root_path, num_files_per_extension, file_extensions): """Create 'num_files_per_extension' temporary files per each of the given extensions. """ files = set() for ext in file_extensions: for fileno in range(1, num_files_per_extension + 1): prefix = str(fileno) suffix = os.extsep + ext if ext else '' _, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=root_path) files.add(path) return files trove-5.0.0/trove/tests/unittests/guestagent/test_cassandra_manager.py0000664000567000056710000011007412701410320027560 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import random import string from mock import ANY from mock import call from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import NonCallableMagicMock from mock import patch from oslo_utils import netutils from testtools import ExpectedException from trove.common import exception from trove.common.instance import ServiceStatuses from trove.guestagent import backup from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import ( manager as cass_manager) from trove.guestagent.datastore.experimental.cassandra import ( service as cass_service) from trove.guestagent.db import models from trove.guestagent import pkg as pkg from trove.guestagent import volume from trove.tests.unittests import trove_testtools class GuestAgentCassandraDBManagerTest(trove_testtools.TestCase): __MOUNT_POINT = '/var/lib/cassandra' __N_GAK = '_get_available_keyspaces' __N_GLU = '_get_listed_users' __N_BU = '_build_user' __N_RU = '_rename_user' __N_AUP = '_alter_user_password' __N_CAU = 'trove.guestagent.db.models.CassandraUser' __N_CU = '_create_user' __N_GFA = '_grant_full_access_on_keyspace' __N_DU = '_drop_user' __ACCESS_MODIFIERS = ('ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT') __CREATE_DB_FORMAT = ( "CREATE KEYSPACE \"{}\" WITH REPLICATION = " "{{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};" ) __DROP_DB_FORMAT = "DROP KEYSPACE \"{}\";" __CREATE_USR_FORMAT = "CREATE USER '{}' WITH PASSWORD %s NOSUPERUSER;" __ALTER_USR_FORMAT = "ALTER USER '{}' WITH PASSWORD %s;" __DROP_USR_FORMAT = "DROP USER '{}';" __GRANT_FORMAT = "GRANT {} ON KEYSPACE \"{}\" TO '{}';" __REVOKE_FORMAT = "REVOKE ALL PERMISSIONS ON KEYSPACE \"{}\" FROM '{}';" __LIST_PERMISSIONS_FORMAT = "LIST ALL PERMISSIONS NORECURSIVE;" __LIST_PERMISSIONS_OF_FORMAT = "LIST ALL PERMISSIONS OF '{}' NORECURSIVE;" __LIST_DB_FORMAT = "SELECT * FROM system.schema_keyspaces;" __LIST_USR_FORMAT = "LIST USERS;" @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def setUp(self, *args, **kwargs): super(GuestAgentCassandraDBManagerTest, self).setUp() self.real_status = cass_service.CassandraAppStatus.set_status class FakeInstanceServiceStatus(object): status = ServiceStatuses.NEW def save(self): pass cass_service.CassandraAppStatus.set_status = MagicMock( return_value=FakeInstanceServiceStatus()) self.context = trove_testtools.TroveTestContext(self) self.manager = cass_manager.Manager() self.manager._Manager__admin = cass_service.CassandraAdmin( models.CassandraUser('Test')) self.admin = self.manager._Manager__admin self.pkg = cass_service.packager self.origin_os_path_exists = os.path.exists self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_db = cass_service.CassandraApp.stop_db self.origin_start_db = cass_service.CassandraApp.start_db self.origin_install_db = cass_service.CassandraApp._install_db self.original_get_ip = netutils.get_my_ipv4 self.orig_make_host_reachable = ( cass_service.CassandraApp.apply_initial_guestagent_configuration) def tearDown(self): super(GuestAgentCassandraDBManagerTest, self).tearDown() cass_service.packager = self.pkg os.path.exists = self.origin_os_path_exists volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.migrate_data = self.origin_migrate_data volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.mount_points = self.origin_mount_points cass_service.CassandraApp.stop_db = self.origin_stop_db cass_service.CassandraApp.start_db = self.origin_start_db cass_service.CassandraApp._install_db = self.origin_install_db netutils.get_my_ipv4 = self.original_get_ip cass_service.CassandraApp.apply_initial_guestagent_configuration = ( self.orig_make_host_reachable) cass_service.CassandraAppStatus.set_status = self.real_status def test_update_status(self): mock_status = MagicMock() self.manager.app.status = mock_status self.manager.update_status(self.context) mock_status.update.assert_any_call() def test_prepare_pkg(self): self._prepare_dynamic(['cassandra']) def test_prepare_no_pkg(self): self._prepare_dynamic([]) def test_prepare_db_not_installed(self): self._prepare_dynamic([], is_db_installed=False) def test_prepare_db_not_installed_no_package(self): self._prepare_dynamic([], is_db_installed=True) @patch.object(backup, 'restore') def test_prepare_db_restore(self, restore): backup_info = {'id': 'backup_id', 'instance_id': 'fake-instance-id', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum'} self._prepare_dynamic(['cassandra'], is_db_installed=False, backup_info=backup_info) restore.assert_called_once_with( self.context, backup_info, self.__MOUNT_POINT) @patch.multiple(operating_system, enable_service_on_boot=DEFAULT, disable_service_on_boot=DEFAULT) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_superuser_password_reset( self, _, enable_service_on_boot, disable_service_on_boot): fake_status = MagicMock() fake_status.is_running = False test_app = cass_service.CassandraApp() test_app.status = fake_status with patch.multiple( test_app, start_db=DEFAULT, stop_db=DEFAULT, restart=DEFAULT, _CassandraApp__disable_remote_access=DEFAULT, _CassandraApp__enable_remote_access=DEFAULT, _CassandraApp__disable_authentication=DEFAULT, _CassandraApp__enable_authentication=DEFAULT, _CassandraApp__reset_user_password_to_default=DEFAULT, secure=DEFAULT) as calls: test_app._reset_admin_password() disable_service_on_boot.assert_called_once_with( test_app.service_candidates) calls[ '_CassandraApp__disable_remote_access' ].assert_called_once_with() calls[ '_CassandraApp__disable_authentication' ].assert_called_once_with() calls['start_db'].assert_called_once_with(update_db=False, enable_on_boot=False), calls[ '_CassandraApp__enable_authentication' ].assert_called_once_with() pw_reset_mock = calls[ '_CassandraApp__reset_user_password_to_default' ] pw_reset_mock.assert_called_once_with(test_app._ADMIN_USER) calls['secure'].assert_called_once_with( update_user=pw_reset_mock.return_value) calls['restart'].assert_called_once_with() calls['stop_db'].assert_called_once_with() calls[ '_CassandraApp__enable_remote_access' ].assert_called_once_with() enable_service_on_boot.assert_called_once_with( test_app.service_candidates) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_change_cluster_name(self, _): fake_status = MagicMock() fake_status.is_running = True test_app = cass_service.CassandraApp() test_app.status = fake_status with patch.multiple( test_app, start_db=DEFAULT, stop_db=DEFAULT, restart=DEFAULT, _update_cluster_name_property=DEFAULT, _CassandraApp__reset_cluster_name=DEFAULT) as calls: sample_name = NonCallableMagicMock() test_app.change_cluster_name(sample_name) calls['_CassandraApp__reset_cluster_name'].assert_called_once_with( sample_name) calls['_update_cluster_name_property'].assert_called_once_with( sample_name) calls['restart'].assert_called_once_with() @patch.object(cass_service, 'CONF', DEFAULT) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_apply_post_restore_updates(self, _, conf_mock): fake_status = MagicMock() fake_status.is_running = False test_app = cass_service.CassandraApp() test_app.status = fake_status with patch.multiple( test_app, start_db=DEFAULT, stop_db=DEFAULT, _update_cluster_name_property=DEFAULT, _reset_admin_password=DEFAULT, change_cluster_name=DEFAULT) as calls: backup_info = {'instance_id': 'old_id'} conf_mock.guest_id = 'new_id' test_app._apply_post_restore_updates(backup_info) calls['_update_cluster_name_property'].assert_called_once_with( 'old_id') calls['_reset_admin_password'].assert_called_once_with() calls['start_db'].assert_called_once_with(update_db=False) calls['change_cluster_name'].assert_called_once_with('new_id') calls['stop_db'].assert_called_once_with() def _prepare_dynamic(self, packages, config_content='MockContent', device_path='/dev/vdb', is_db_installed=True, backup_info=None, is_root_enabled=False, overrides=None): mock_status = MagicMock() mock_app = MagicMock() mock_app.status = mock_status self.manager._app = mock_app mock_status.begin_install = MagicMock(return_value=None) mock_app.install_if_needed = MagicMock(return_value=None) mock_app.init_storage_structure = MagicMock(return_value=None) mock_app.write_config = MagicMock(return_value=None) mock_app.apply_initial_guestagent_configuration = MagicMock( return_value=None) mock_app.restart = MagicMock(return_value=None) mock_app.start_db = MagicMock(return_value=None) mock_app.stop_db = MagicMock(return_value=None) mock_app._remove_system_tables = MagicMock(return_value=None) os.path.exists = MagicMock(return_value=True) volume.VolumeDevice.format = MagicMock(return_value=None) volume.VolumeDevice.migrate_data = MagicMock(return_value=None) volume.VolumeDevice.mount = MagicMock(return_value=None) volume.VolumeDevice.mount_points = MagicMock(return_value=[]) with patch.object(pkg.Package, 'pkg_is_installed', return_value=is_db_installed): # invocation self.manager.prepare(context=self.context, packages=packages, config_contents=config_content, databases=None, memory_mb='2048', users=None, device_path=device_path, mount_point=self.__MOUNT_POINT, backup_info=backup_info, overrides=None, cluster_config=None) # verification/assertion mock_status.begin_install.assert_any_call() mock_app.install_if_needed.assert_any_call(packages) mock_app._remove_system_tables.assert_any_call() mock_app.init_storage_structure.assert_any_call('/var/lib/cassandra') mock_app.apply_initial_guestagent_configuration.assert_any_call( cluster_name=None) mock_app.start_db.assert_any_call(update_db=False) mock_app.stop_db.assert_any_call() if backup_info: mock_app._apply_post_restore_updates.assert_called_once_with( backup_info) def test_keyspace_validation(self): valid_name = self._get_random_name(32) db = models.CassandraSchema(valid_name) self.assertEqual(valid_name, db.name) with ExpectedException(ValueError): models.CassandraSchema(self._get_random_name(33)) def test_user_validation(self): valid_name = self._get_random_name(65535) usr = models.CassandraUser(valid_name, 'password') self.assertEqual(valid_name, usr.name) self.assertEqual('password', usr.password) with ExpectedException(ValueError): models.CassandraUser(self._get_random_name(65536)) @classmethod def _serialize_collection(self, *collection): return [item.serialize() for item in collection] @classmethod def _get_random_name(self, size, chars=string.letters + string.digits): return ''.join(random.choice(chars) for _ in range(size)) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_create_database(self, conn): db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') db3 = models.CassandraSchema(self._get_random_name(32)) self.manager.create_database(self.context, self._serialize_collection(db1, db2, db3)) conn.return_value.execute.assert_has_calls([ call(self.__CREATE_DB_FORMAT, (db1.name,)), call(self.__CREATE_DB_FORMAT, (db2.name,)), call(self.__CREATE_DB_FORMAT, (db3.name,)) ]) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_delete_database(self, conn): db = models.CassandraSchema(self._get_random_name(32)) self.manager.delete_database(self.context, db.serialize()) conn.return_value.execute.assert_called_once_with( self.__DROP_DB_FORMAT, (db.name,)) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_create_user(self, conn): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2', '') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') self.manager.create_user(self.context, self._serialize_collection(usr1, usr2, usr3)) conn.return_value.execute.assert_has_calls([ call(self.__CREATE_USR_FORMAT, (usr1.name,), (usr1.password,)), call(self.__CREATE_USR_FORMAT, (usr2.name,), (usr2.password,)), call(self.__CREATE_USR_FORMAT, (usr3.name,), (usr3.password,)) ]) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_delete_user(self, conn): usr = models.CassandraUser(self._get_random_name(1025), 'password') self.manager.delete_user(self.context, usr.serialize()) conn.return_value.execute.assert_called_once_with( self.__DROP_USR_FORMAT, (usr.name,)) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_change_passwords(self, conn): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2', '') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') self.manager.change_passwords(self.context, self._serialize_collection( usr1, usr2, usr3)) conn.return_value.execute.assert_has_calls([ call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)), call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)), call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,)) ]) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_alter_user_password(self, conn): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2', '') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') self.admin.alter_user_password(usr1) self.admin.alter_user_password(usr2) self.admin.alter_user_password(usr3) conn.return_value.execute.assert_has_calls([ call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)), call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)), call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,)) ]) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_grant_access(self, conn): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr1', 'password') db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') db3 = models.CassandraSchema('db3') self.manager.grant_access(self.context, usr1.name, None, [db1.name, db2.name]) self.manager.grant_access(self.context, usr2.name, None, [db3.name]) expected = [] for modifier in self.__ACCESS_MODIFIERS: expected.append(call(self.__GRANT_FORMAT, (modifier, db1.name, usr1.name))) expected.append(call(self.__GRANT_FORMAT, (modifier, db3.name, usr2.name))) conn.return_value.execute.assert_has_calls(expected, any_order=True) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_revoke_access(self, conn): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr1', 'password') db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') self.manager.revoke_access(self.context, usr1.name, None, db1.name) self.manager.revoke_access(self.context, usr2.name, None, db2.name) conn.return_value.execute.assert_has_calls([ call(self.__REVOKE_FORMAT, (db1.name, usr1.name)), call(self.__REVOKE_FORMAT, (db2.name, usr2.name)) ]) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_get_available_keyspaces(self, conn): self.manager.list_databases(self.context) conn.return_value.execute.assert_called_once_with( self.__LIST_DB_FORMAT) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_list_databases(self, conn): db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') db3 = models.CassandraSchema(self._get_random_name(32)) with patch.object(self.admin, self.__N_GAK, return_value={db1, db2, db3}): found = self.manager.list_databases(self.context) self.assertEqual(2, len(found)) self.assertEqual(3, len(found[0])) self.assertEqual(None, found[1]) self.assertIn(db1.serialize(), found[0]) self.assertIn(db2.serialize(), found[0]) self.assertIn(db3.serialize(), found[0]) with patch.object(self.admin, self.__N_GAK, return_value=set()): found = self.manager.list_databases(self.context) self.assertEqual(([], None), found) def test_get_acl(self): r0 = NonCallableMagicMock(username='user1', resource='', permission='SELECT') r1 = NonCallableMagicMock(username='user2', resource='', permission='SELECT') r2 = NonCallableMagicMock(username='user2', resource='', permission='SELECT') r3 = NonCallableMagicMock(username='user2', resource='', permission='ALTER') r4 = NonCallableMagicMock(username='user3', resource='', permission='SELECT') r5 = NonCallableMagicMock(username='user3', resource='', permission='ALTER') r6 = NonCallableMagicMock(username='user3', resource='', permission='') r7 = NonCallableMagicMock(username='user3', resource='', permission='') r8 = NonCallableMagicMock(username='user3', resource='', permission='DELETE') r9 = NonCallableMagicMock(username='user4', resource='', permission='UPDATE') r10 = NonCallableMagicMock(username='user4', resource='', permission='DELETE') available_ks = {models.CassandraSchema('ks1'), models.CassandraSchema('ks2'), models.CassandraSchema('ks3')} mock_result_set = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r9, r9, r10] execute_mock = MagicMock(return_value=mock_result_set) mock_client = MagicMock(execute=execute_mock) with patch.object(self.admin, self.__N_GAK, return_value=available_ks) as gak_mock: acl = self.admin._get_acl(mock_client) execute_mock.assert_called_once_with( self.__LIST_PERMISSIONS_FORMAT) gak_mock.assert_called_once_with(mock_client) self.assertEqual({'user1': {'ks1': {'SELECT'}, 'ks2': {'SELECT'}, 'ks3': {'SELECT'}}, 'user2': {'ks1': {'SELECT'}, 'ks2': {'SELECT', 'ALTER'}}, 'user3': {'ks1': {'DELETE'}}, 'user4': {'ks1': {'UPDATE', 'DELETE'}, 'ks2': {'UPDATE'}, 'ks3': {'UPDATE'}} }, acl) mock_result_set = [r1, r2, r3] execute_mock = MagicMock(return_value=mock_result_set) mock_client = MagicMock(execute=execute_mock) with patch.object(self.admin, self.__N_GAK, return_value=available_ks) as gak_mock: acl = self.admin._get_acl(mock_client, username='user2') execute_mock.assert_called_once_with( self.__LIST_PERMISSIONS_OF_FORMAT.format('user2')) gak_mock.assert_not_called() self.assertEqual({'user2': {'ks1': {'SELECT'}, 'ks2': {'SELECT', 'ALTER'}}}, acl) mock_result_set = [] execute_mock = MagicMock(return_value=mock_result_set) mock_client = MagicMock(execute=execute_mock) with patch.object(self.admin, self.__N_GAK, return_value=available_ks) as gak_mock: acl = self.admin._get_acl(mock_client, username='nonexisting') execute_mock.assert_called_once_with( self.__LIST_PERMISSIONS_OF_FORMAT.format('nonexisting')) gak_mock.assert_not_called() self.assertEqual({}, acl) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_get_listed_users(self, conn): usr1 = models.CassandraUser(self._get_random_name(1025)) usr2 = models.CassandraUser(self._get_random_name(1025)) usr3 = models.CassandraUser(self._get_random_name(1025)) db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') usr1.databases.append(db1.serialize()) usr3.databases.append(db2.serialize()) rv_1 = NonCallableMagicMock() rv_1.configure_mock(name=usr1.name, super=False) rv_2 = NonCallableMagicMock() rv_2.configure_mock(name=usr2.name, super=False) rv_3 = NonCallableMagicMock() rv_3.configure_mock(name=usr3.name, super=True) with patch.object(conn.return_value, 'execute', return_value=iter( [rv_1, rv_2, rv_3])): with patch.object(self.admin, '_get_acl', return_value={usr1.name: {db1.name: {'SELECT'}, db2.name: {}}, usr3.name: {db2.name: {'SELECT'}}} ): usrs = self.manager.list_users(self.context) conn.return_value.execute.assert_has_calls([ call(self.__LIST_USR_FORMAT), ], any_order=True) self.assertIn(usr1.serialize(), usrs[0]) self.assertIn(usr2.serialize(), usrs[0]) self.assertIn(usr3.serialize(), usrs[0]) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_list_access(self, conn): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') db1 = models.CassandraSchema('db1').serialize() db2 = models.CassandraSchema('db2').serialize() usr2.databases.append(db1) usr3.databases.append(db1) usr3.databases.append(db2) with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, usr3}): usr1_dbs = self.manager.list_access(self.context, usr1.name, None) usr2_dbs = self.manager.list_access(self.context, usr2.name, None) usr3_dbs = self.manager.list_access(self.context, usr3.name, None) self.assertEqual([], usr1_dbs) self.assertEqual([db1], usr2_dbs) self.assertEqual([db1, db2], usr3_dbs) with patch.object(self.admin, self.__N_GLU, return_value=set()): with ExpectedException(exception.UserNotFound): self.manager.list_access(self.context, usr3.name, None) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_list_users(self, conn): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, usr3}): found = self.manager.list_users(self.context) self.assertEqual(2, len(found)) self.assertEqual(3, len(found[0])) self.assertEqual(None, found[1]) self.assertIn(usr1.serialize(), found[0]) self.assertIn(usr2.serialize(), found[0]) self.assertIn(usr3.serialize(), found[0]) with patch.object(self.admin, self.__N_GLU, return_value=set()): self.assertEqual(([], None), self.manager.list_users(self.context)) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_get_user(self, conn): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, usr3}): found = self.manager.get_user(self.context, usr2.name, None) self.assertEqual(usr2.serialize(), found) with patch.object(self.admin, self.__N_GLU, return_value=set()): self.assertIsNone( self.manager.get_user(self.context, usr2.name, None)) @patch.object(cass_service.CassandraAdmin, '_deserialize_keyspace', side_effect=lambda p1: p1) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_rename_user(self, conn, ks_deserializer): usr = models.CassandraUser('usr') db1 = models.CassandraSchema('db1').serialize() db2 = models.CassandraSchema('db2').serialize() usr.databases.append(db1) usr.databases.append(db2) new_user = models.CassandraUser('new_user') with patch(self.__N_CAU, return_value=new_user): with patch.object(self.admin, self.__N_BU, return_value=usr): with patch.object(self.admin, self.__N_CU) as create: with patch.object(self.admin, self.__N_GFA) as grant: with patch.object(self.admin, self.__N_DU) as drop: usr_attrs = {'name': 'user', 'password': 'trove'} self.manager.update_attributes(self.context, usr.name, None, usr_attrs) create.assert_called_once_with(ANY, new_user) grant.assert_has_calls([call(ANY, db1, ANY), call(ANY, db2, ANY)]) drop.assert_called_once_with(ANY, usr) @patch.object(cass_service.CassandraLocalhostConnection, '__enter__') def test_update_attributes(self, conn): usr = models.CassandraUser('usr', 'pwd') with patch.object(self.admin, self.__N_BU, return_value=usr): usr_attrs = {'name': usr.name, 'password': usr.password} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) self.assertEqual(0, rename.call_count) self.assertEqual(0, alter.call_count) usr_attrs = {'name': 'user', 'password': 'password'} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) rename.assert_called_once_with(ANY, usr, usr_attrs['name'], usr_attrs['password']) self.assertEqual(0, alter.call_count) usr_attrs = {'name': 'user', 'password': usr.password} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) rename.assert_called_once_with(ANY, usr, usr_attrs['name'], usr_attrs['password']) self.assertEqual(0, alter.call_count) usr_attrs = {'name': 'user'} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: with ExpectedException( exception.UnprocessableEntity, "Updating username " "requires specifying a password as well."): self.manager.update_attributes(self.context, usr.name, None, usr_attrs) self.assertEqual(0, rename.call_count) self.assertEqual(0, alter.call_count) usr_attrs = {'name': usr.name, 'password': 'password'} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) alter.assert_called_once_with(ANY, usr) self.assertEqual(0, rename.call_count) usr_attrs = {'password': usr.password} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) self.assertEqual(0, rename.call_count) self.assertEqual(0, alter.call_count) usr_attrs = {'password': 'trove'} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) alter.assert_called_once_with(ANY, usr) self.assertEqual(0, rename.call_count) def test_update_overrides(self): cfg_mgr_mock = MagicMock() self.manager._app.configuration_manager = cfg_mgr_mock overrides = NonCallableMagicMock() self.manager.update_overrides(Mock(), overrides) cfg_mgr_mock.apply_user_override.assert_called_once_with(overrides) cfg_mgr_mock.remove_user_override.assert_not_called() def test_remove_overrides(self): cfg_mgr_mock = MagicMock() self.manager._app.configuration_manager = cfg_mgr_mock self.manager.update_overrides(Mock(), {}, remove=True) cfg_mgr_mock.remove_user_override.assert_called_once_with() cfg_mgr_mock.apply_user_override.assert_not_called() def test_apply_overrides(self): self.assertIsNone( self.manager.apply_overrides(Mock(), NonCallableMagicMock())) def test_enable_root(self): with patch.object(self.manager._app, 'is_root_enabled', return_value=False): with patch.object(cass_service.CassandraAdmin, '_create_superuser') as create_mock: self.manager.enable_root(self.context) create_mock.assert_called_once_with(ANY) with patch.object(self.manager._app, 'is_root_enabled', return_value=True): with patch.object(cass_service.CassandraAdmin, 'alter_user_password') as alter_mock: self.manager.enable_root(self.context) alter_mock.assert_called_once_with(ANY) def test_is_root_enabled(self): trove_admin = Mock() trove_admin.configure_mock(name=self.manager._app._ADMIN_USER) other_admin = Mock() other_admin.configure_mock(name='someuser') with patch.object(cass_service.CassandraAdmin, 'list_superusers', return_value=[]): self.assertFalse(self.manager.is_root_enabled(self.context)) with patch.object(cass_service.CassandraAdmin, 'list_superusers', return_value=[trove_admin]): self.assertFalse(self.manager.is_root_enabled(self.context)) with patch.object(cass_service.CassandraAdmin, 'list_superusers', return_value=[other_admin]): self.assertTrue(self.manager.is_root_enabled(self.context)) with patch.object(cass_service.CassandraAdmin, 'list_superusers', return_value=[trove_admin, other_admin]): self.assertTrue(self.manager.is_root_enabled(self.context)) trove-5.0.0/trove/tests/unittests/guestagent/test_couchbase_manager.py0000664000567000056710000001436312701410316027566 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat import tempfile import mock from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch from oslo_utils import netutils from trove.common import utils from trove.guestagent import backup from trove.guestagent.datastore.experimental.couchbase import ( manager as couch_manager) from trove.guestagent.datastore.experimental.couchbase import ( service as couch_service) from trove.guestagent import volume from trove.tests.unittests import trove_testtools class GuestAgentCouchbaseManagerTest(trove_testtools.TestCase): def setUp(self): super(GuestAgentCouchbaseManagerTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.manager = couch_manager.Manager() self.packages = 'couchbase-server' app_patcher = patch.multiple( couch_service.CouchbaseApp, stop_db=DEFAULT, start_db=DEFAULT, restart=DEFAULT) self.addCleanup(app_patcher.stop) app_patcher.start() netutils_patcher = patch.object(netutils, 'get_my_ipv4') self.addCleanup(netutils_patcher.stop) netutils_patcher.start() def tearDown(self): super(GuestAgentCouchbaseManagerTest, self).tearDown() def test_update_status(self): mock_status = MagicMock() self.manager.appStatus = mock_status self.manager.update_status(self.context) mock_status.update.assert_any_call() def test_prepare_device_path_true(self): self._prepare_dynamic() def test_prepare_from_backup(self): self._prepare_dynamic(backup_id='backup_id_123abc') @patch.multiple(couch_service.CouchbaseApp, install_if_needed=DEFAULT, start_db_with_conf_changes=DEFAULT, initial_setup=DEFAULT) @patch.multiple(volume.VolumeDevice, format=DEFAULT, mount=DEFAULT, mount_points=Mock(return_value=[])) @patch.object(backup, 'restore') def _prepare_dynamic(self, device_path='/dev/vdb', backup_id=None, *mocks, **kwmocks): # covering all outcomes is starting to cause trouble here backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'CbBackup', 'checksum': 'fake-checksum'} if backup_id else None mock_status = MagicMock() mock_status.begin_install = MagicMock(return_value=None) self.manager.appStatus = mock_status instance_ram = 2048 mount_point = '/var/lib/couchbase' self.manager.prepare(self.context, self.packages, None, instance_ram, None, device_path=device_path, mount_point=mount_point, backup_info=backup_info, overrides=None, cluster_config=None) # verification/assertion mock_status.begin_install.assert_any_call() kwmocks['install_if_needed'].assert_any_call(self.packages) if backup_info: backup.restore.assert_any_call(self.context, backup_info, mount_point) def test_restart(self): mock_status = MagicMock() self.manager.appStatus = mock_status couch_service.CouchbaseApp.restart = MagicMock(return_value=None) # invocation self.manager.restart(self.context) # verification/assertion couch_service.CouchbaseApp.restart.assert_any_call() def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status couch_service.CouchbaseApp.stop_db = MagicMock(return_value=None) # invocation self.manager.stop_db(self.context) # verification/assertion couch_service.CouchbaseApp.stop_db.assert_any_call( do_not_start_on_reboot=False) def __fake_mkstemp(self): self.tempfd, self.tempname = self.original_mkstemp() return self.tempfd, self.tempname def __fake_mkstemp_raise(self): raise OSError(11, 'Resource temporarily unavailable') def __cleanup_tempfile(self): if self.tempname: os.unlink(self.tempname) @mock.patch.object(utils, 'execute_with_timeout', Mock(return_value=('0', ''))) def test_write_password_to_file1(self): self.original_mkstemp = tempfile.mkstemp self.tempname = None with mock.patch.object(tempfile, 'mkstemp', self.__fake_mkstemp): self.addCleanup(self.__cleanup_tempfile) rootaccess = couch_service.CouchbaseRootAccess() rootaccess.write_password_to_file('mypassword') filepermissions = os.stat(self.tempname).st_mode self.assertEqual(stat.S_IRUSR, filepermissions & 0o777) @mock.patch.object(utils, 'execute_with_timeout', Mock(return_value=('0', ''))) @mock.patch( 'trove.guestagent.datastore.experimental.couchbase.service.LOG') def test_write_password_to_file2(self, mock_logging): self.original_mkstemp = tempfile.mkstemp self.tempname = None with mock.patch.object(tempfile, 'mkstemp', self.__fake_mkstemp_raise): rootaccess = couch_service.CouchbaseRootAccess() self.assertRaises(RuntimeError, rootaccess.write_password_to_file, 'mypassword') trove-5.0.0/trove/tests/unittests/guestagent/test_guestagent_utils.py0000664000567000056710000001445412701410316027527 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.guestagent.common import guestagent_utils from trove.tests.unittests import trove_testtools class TestGuestagentUtils(trove_testtools.TestCase): def test_update_dict(self): data = [{ 'dict': {}, 'update': {}, 'expected': {}, }, { 'dict': None, 'update': {}, 'expected': {}, }, { 'dict': {}, 'update': None, 'expected': {}, }, { 'dict': {}, 'update': None, 'expected': {}, }, { 'dict': None, 'update': {'name': 'Tom'}, 'expected': {'name': 'Tom'}, }, { 'dict': {}, 'update': {'name': 'Tom'}, 'expected': {'name': 'Tom'}, }, { 'dict': {'name': 'Tom'}, 'update': {}, 'expected': {'name': 'Tom'}, }, { 'dict': {'key1': 'value1', 'dict1': {'key1': 'value1', 'key2': 'value2'}}, 'update': {'key1': 'value1+', 'key2': 'value2', 'dict1': {'key3': 'value3'}}, 'expected': {'key1': 'value1+', 'key2': 'value2', 'dict1': {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}}, }, { 'dict': {'d1': {'d2': {'d3': {'k1': 'v1'}}}}, 'update': {'d1': {'d2': {'d3': {'k2': 'v2'}}}}, 'expected': {'d1': {'d2': {'d3': {'k1': 'v1', 'k2': 'v2'}}}}, }, { 'dict': {'timeout': 0, 'save': [[900, 1], [300, 10]]}, 'update': {'save': [[300, 20], [60, 10000]]}, 'expected': {'timeout': 0, 'save': [[300, 20], [60, 10000]]}, }, { 'dict': {'rpc_address': '0.0.0.0', 'broadcast_rpc_address': '0.0.0.0', 'listen_address': '0.0.0.0', 'seed_provider': [{ 'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 'parameters': [{'seeds': '0.0.0.0'}]}] }, 'update': {'rpc_address': '127.0.0.1', 'seed_provider': {'parameters': { 'seeds': '127.0.0.1'}} }, 'expected': {'rpc_address': '127.0.0.1', 'broadcast_rpc_address': '0.0.0.0', 'listen_address': '0.0.0.0', 'seed_provider': [{ 'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 'parameters': [{'seeds': '127.0.0.1'}]}] }, }, { 'dict': {'rpc_address': '127.0.0.1', 'broadcast_rpc_address': '0.0.0.0', 'listen_address': '0.0.0.0', 'seed_provider': [{ 'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 'parameters': [{'seeds': '0.0.0.0'}]}] }, 'update': {'seed_provider': [{'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider'}] }, 'expected': {'rpc_address': '127.0.0.1', 'broadcast_rpc_address': '0.0.0.0', 'listen_address': '0.0.0.0', 'seed_provider': [{ 'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider' }]}, }] count = 0 for record in data: count += 1 target = record['dict'] update = record['update'] expected = record['expected'] result = guestagent_utils.update_dict(update, target) msg = 'Unexpected result for test %s' % str(count) self.assertEqual(expected, result, msg) def test_build_file_path(self): self.assertEqual( 'base_dir/base_name', guestagent_utils.build_file_path('base_dir', 'base_name')) self.assertEqual( 'base_dir/base_name.ext1', guestagent_utils.build_file_path('base_dir', 'base_name', 'ext1')) self.assertEqual( 'base_dir/base_name.ext1.ext2', guestagent_utils.build_file_path( 'base_dir', 'base_name', 'ext1', 'ext2')) def test_flatten_expand_dict(self): self._assert_flatten_expand_dict({}, {}) self._assert_flatten_expand_dict({'ns1': 1}, {'ns1': 1}) self._assert_flatten_expand_dict( {'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}}, {'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10}) def _assert_flatten_expand_dict(self, nested_dict, flattened_dict): self.assertEqual( flattened_dict, guestagent_utils.flatten_dict(nested_dict)) self.assertEqual( nested_dict, guestagent_utils.expand_dict(flattened_dict)) def test_to_bytes(self): self.assertEqual('1024', guestagent_utils.to_bytes('1024')) self.assertEqual('1048576', guestagent_utils.to_bytes('1024K')) self.assertEqual('1073741824', guestagent_utils.to_bytes('1024M')) self.assertEqual('1099511627776', guestagent_utils.to_bytes('1024G')) self.assertEqual('1024T', guestagent_utils.to_bytes('1024T')) self.assertEqual(1024, guestagent_utils.to_bytes(1024)) self.assertEqual('Hello!', guestagent_utils.to_bytes('Hello!')) self.assertEqual('', guestagent_utils.to_bytes('')) self.assertIsNone(guestagent_utils.to_bytes(None)) trove-5.0.0/trove/tests/unittests/taskmanager/0000775000567000056710000000000012701410521022645 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/taskmanager/__init__.py0000664000567000056710000000000012701410316024746 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/unittests/taskmanager/test_api.py0000664000567000056710000001072012701410316025031 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from mock import patch from trove.common import context from trove.common import exception from trove.common.rpc.version import RPC_API_VERSION from trove.common.strategies.cluster.experimental.mongodb.taskmanager import ( MongoDbTaskManagerAPI) from trove.guestagent import models as agent_models from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools class ApiTest(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client') def setUp(self, *args): super(ApiTest, self).setUp() self.context = context.TroveContext() self.api = task_api.API(self.context) self._mock_rpc_client() def _verify_rpc_prepare_before_cast(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION) def _verify_cast(self, *args, **kwargs): self.call_context.cast.assert_called_once_with(self.context, *args, **kwargs) def _mock_rpc_client(self): self.call_context = trove_testtools.TroveTestContext(self) self.api.client.prepare = Mock(return_value=self.call_context) self.call_context.cast = Mock() def test_detach_replica(self): self.api.detach_replica('some-instance-id') self._verify_rpc_prepare_before_cast() self._verify_cast('detach_replica', instance_id='some-instance-id') def test_promote_to_replica_source(self): self.api.promote_to_replica_source('some-instance-id') self._verify_rpc_prepare_before_cast() self._verify_cast('promote_to_replica_source', instance_id='some-instance-id') def test_eject_replica_source(self): self.api.eject_replica_source('some-instance-id') self._verify_rpc_prepare_before_cast() self._verify_cast('eject_replica_source', instance_id='some-instance-id') def test_create_cluster(self): self.api.create_cluster('some-cluster-id') self._verify_rpc_prepare_before_cast() self._verify_cast('create_cluster', cluster_id='some-cluster-id') def test_delete_cluster(self): self.api.delete_cluster('some-cluster-id') self._verify_rpc_prepare_before_cast() self._verify_cast('delete_cluster', cluster_id='some-cluster-id') @patch.object(agent_models, 'AgentHeartBeat') def test_delete_heartbeat(self, mock_agent_heart_beat): mock_heartbeat = Mock() mock_agent_heart_beat.return_value.find_by_instance_id = Mock( return_value=mock_heartbeat) self.api._delete_heartbeat('some-cluster-id') mock_heartbeat.delete.assert_called_with() @patch.object(agent_models, 'AgentHeartBeat') @patch('trove.taskmanager.api.LOG') def test_exception_delete_heartbeat(self, mock_logging, mock_agent_heart_beat): mock_agent_heart_beat.return_value.find_by_instance_id.side_effect = ( exception.ModelNotFoundError) self.api._delete_heartbeat('some-cluster-id') mock_agent_heart_beat.return_value.delete.assert_not_called() def test_transform_obj(self): flavor = Mock() self.assertRaisesRegexp(ValueError, ('Could not transform %s' % flavor), self.api._transform_obj, flavor) class TestAPI(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client') def test_load_api(self, get_client_mock): context = trove_testtools.TroveTestContext(self) manager = 'mongodb' self.assertIsInstance(task_api.load(context), task_api.API) self.assertIsInstance(task_api.load(context, manager), MongoDbTaskManagerAPI) trove-5.0.0/trove/tests/unittests/taskmanager/test_galera_clusters.py0000664000567000056710000002774512701410320027451 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from mock import Mock from mock import patch from trove.cluster.models import ClusterTasks as ClusterTaskStatus from trove.cluster.models import DBCluster from trove.common.exception import GuestError from trove.common.strategies.cluster.experimental.galera_common.taskmanager \ import GaleraCommonClusterTasks from trove.common.strategies.cluster.experimental.galera_common.taskmanager \ import GaleraCommonTaskManagerStrategy from trove.datastore import models as datastore_models from trove.instance.models import BaseInstance from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceTasks from trove.taskmanager.models import ServiceStatuses from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class GaleraClusterTasksTest(trove_testtools.TestCase): def setUp(self): super(GaleraClusterTasksTest, self).setUp() util.init_db() self.cluster_id = "1232" self.cluster_name = "Cluster-1234" self.tenant_id = "6789" self.db_cluster = DBCluster(ClusterTaskStatus.NONE, id=self.cluster_id, created=str(datetime.date), updated=str(datetime.date), name=self.cluster_name, task_id=ClusterTaskStatus.NONE._code, tenant_id=self.tenant_id, datastore_version_id="1", deleted=False) self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, type="member") self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2", compute_instance_id="compute-2", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-2", datastore_version_id="1", cluster_id=self.cluster_id, type="member") self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="member3", compute_instance_id="compute-3", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-3", datastore_version_id="1", cluster_id=self.cluster_id, type="member") mock_ds1 = Mock() mock_ds1.name = 'pxc' mock_dv1 = Mock() mock_dv1.name = '7.1' self.clustertasks = GaleraCommonClusterTasks( Mock(), self.db_cluster, datastore=mock_ds1, datastore_version=mock_dv1) self.cluster_context = { 'replication_user': { 'name': "name", 'password': "password", }, 'cluster_name': self.cluster_name, 'admin_password': "admin_password" } @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_bad_status(self, mock_logging, mock_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.FAILED ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(InstanceServiceStatus, 'find_by') def test_all_instances_ready(self, mock_find): (mock_find.return_value. get_status.return_value) = ServiceStatuses.INSTANCE_READY ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) self.assertTrue(ret_val) @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch.object(GaleraCommonClusterTasks, '_all_instances_ready', return_value=False) @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_cluster_instance_not_ready(self, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_update): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_update.assert_called_with(self.cluster_id) @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch.object(GaleraCommonClusterTasks, 'reset_task') @patch.object(GaleraCommonClusterTasks, 'get_ip') @patch.object(GaleraCommonClusterTasks, '_all_instances_ready') @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch('trove.common.strategies.cluster.experimental.galera_common.' 'taskmanager.LOG') def test_create_cluster_fail(self, mock_logging, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_ip, mock_reset_task, mock_update_status): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) mock_ip.return_value = "10.0.0.2" guest_client = Mock() guest_client.install_cluster = Mock(side_effect=GuestError("Error")) with patch.object(GaleraCommonClusterTasks, 'get_guest', return_value=guest_client): self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_update_status.assert_called_with('1232') mock_reset_task.assert_called_with() @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch('trove.common.strategies.cluster.experimental.galera_common.' 'taskmanager.LOG') def test_grow_cluster_does_not_exist(self, mock_logging, mock_update_status): context = Mock() bad_cluster_id = '1234' new_instances = [Mock(), Mock()] self.clustertasks.grow_cluster(context, bad_cluster_id, new_instances) mock_update_status.assert_called_with( '1234', status=InstanceTasks.GROWING_ERROR) @patch.object(GaleraCommonClusterTasks, '_check_cluster_for_root') @patch.object(GaleraCommonClusterTasks, 'reset_task') @patch.object(GaleraCommonClusterTasks, '_render_cluster_config') @patch.object(GaleraCommonClusterTasks, 'get_ip') @patch.object(GaleraCommonClusterTasks, 'get_guest') @patch.object(GaleraCommonClusterTasks, '_all_instances_ready', return_value=True) @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_grow_cluster_successs(self, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_guest, mock_ip, mock_render, mock_reset_task, mock_check_root): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_ip.return_value = "10.0.0.2" context = Mock() new_instances = [Mock(), Mock()] mock_guest.get_cluster_context = Mock( return_value=self.cluster_context) mock_guest.reset_admin_password = Mock() self.clustertasks.grow_cluster(context, self.cluster_id, new_instances) mock_reset_task.assert_called_with() @patch.object(GaleraCommonClusterTasks, 'reset_task') @patch.object(Instance, 'load') @patch.object(Instance, 'delete') @patch.object(DBInstance, 'find_all') @patch.object(GaleraCommonClusterTasks, 'get_guest') @patch.object(GaleraCommonClusterTasks, 'get_ip') @patch.object(GaleraCommonClusterTasks, '_render_cluster_config') def test_shrink_cluster_success(self, mock_render, mock_ip, mock_guest, mock_find_all, mock_delete, mock_load, mock_reset_task): mock_find_all.return_value.all.return_value = [self.dbinst1] context = Mock() remove_instances = [Mock()] mock_ip.return_value = "10.0.0.2" mock_guest.get_cluster_context = Mock( return_value=self.cluster_context) self.clustertasks.shrink_cluster(context, self.cluster_id, remove_instances) mock_reset_task.assert_called_with() @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch('trove.common.strategies.cluster.experimental.galera_common.' 'taskmanager.LOG') def test_shrink_cluster_does_not_exist(self, mock_logging, mock_update_status): context = Mock() bad_cluster_id = '1234' remove_instances = [Mock()] self.clustertasks.shrink_cluster(context, bad_cluster_id, remove_instances) mock_update_status.assert_called_with( '1234', status=InstanceTasks.SHRINKING_ERROR) class GaleraTaskManagerStrategyTest(trove_testtools.TestCase): def test_task_manager_cluster_tasks_class(self): strategy = GaleraCommonTaskManagerStrategy() self.assertFalse( hasattr(strategy.task_manager_cluster_tasks_class, 'rebuild_cluster')) self.assertTrue(callable( strategy.task_manager_cluster_tasks_class.create_cluster)) def test_task_manager_api_class(self): strategy = GaleraCommonTaskManagerStrategy() self.assertFalse(hasattr(strategy.task_manager_api_class, 'add_new_node')) trove-5.0.0/trove/tests/unittests/taskmanager/test_manager.py0000664000567000056710000003252712701410316025703 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock, patch, PropertyMock from trove.backup.models import Backup from trove.instance.tasks import InstanceTasks from trove.taskmanager.manager import Manager from trove.taskmanager import models from trove.taskmanager import service from trove.common.exception import TroveError, ReplicationSlaveAttachError from proboscis.asserts import assert_equal from trove.tests.unittests import trove_testtools class TestManager(trove_testtools.TestCase): def setUp(self): super(TestManager, self).setUp() self.manager = Manager() self.context = trove_testtools.TroveTestContext(self) self.mock_slave1 = Mock() self.mock_slave2 = Mock() type(self.mock_slave1).id = PropertyMock(return_value='some-inst-id') type(self.mock_slave2).id = PropertyMock(return_value='inst1') self.mock_old_master = Mock() type(self.mock_old_master).slaves = PropertyMock( return_value=[self.mock_slave1, self.mock_slave2]) self.mock_master = Mock() type(self.mock_master).slaves = PropertyMock( return_value=[self.mock_slave1, self.mock_slave2]) def tearDown(self): super(TestManager, self).tearDown() self.manager = None def test_getattr_lookup(self): self.assertTrue(callable(self.manager.delete_cluster)) self.assertTrue(callable(self.manager.mongodb_add_shard_cluster)) def test_most_current_replica(self): master = Mock() master.id = 32 def test_case(txn_list, selected_master): with patch.object(self.manager, '_get_replica_txns', return_value=txn_list): result = self.manager._most_current_replica(master, None) assert_equal(result, selected_master) with self.assertRaisesRegexp(TroveError, 'not all replicating from same'): test_case([['a', '2a99e-32bf', 2], ['b', '2a', 1]], None) test_case([['a', '2a99e-32bf', 2]], 'a') test_case([['a', '2a', 1], ['b', '2a', 2]], 'b') test_case([['a', '2a', 2], ['b', '2a', 1]], 'a') test_case([['a', '2a', 1], ['b', '2a', 1]], 'a') test_case([['a', None, 0]], 'a') test_case([['a', None, 0], ['b', '2a', 1]], 'b') def test_detach_replica(self): slave = Mock() master = Mock() with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[slave, master]): self.manager.detach_replica(self.context, 'some-inst-id') slave.detach_replica.assert_called_with(master) @patch.object(Manager, '_set_task_status') def test_promote_to_replica_source(self, mock_set_task_status): with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_slave1, self.mock_old_master, self.mock_slave2]): self.manager.promote_to_replica_source( self.context, 'some-inst-id') self.mock_slave1.detach_replica.assert_called_with( self.mock_old_master, for_failover=True) self.mock_old_master.attach_replica.assert_called_with( self.mock_slave1) self.mock_slave1.make_read_only.assert_called_with(False) self.mock_slave2.detach_replica.assert_called_with( self.mock_old_master, for_failover=True) self.mock_slave2.attach_replica.assert_called_with(self.mock_slave1) self.mock_old_master.demote_replication_master.assert_any_call() mock_set_task_status.assert_called_with(([self.mock_old_master] + [self.mock_slave1, self.mock_slave2]), InstanceTasks.NONE) @patch.object(Manager, '_set_task_status') @patch.object(Manager, '_most_current_replica') def test_eject_replica_source(self, mock_most_current_replica, mock_set_task_status): with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_master, self.mock_slave1, self.mock_slave2]): self.manager.eject_replica_source(self.context, 'some-inst-id') mock_most_current_replica.assert_called_with(self.mock_master, [self.mock_slave1, self.mock_slave2]) mock_set_task_status.assert_called_with(([self.mock_master] + [self.mock_slave1, self.mock_slave2]), InstanceTasks.NONE) @patch.object(Manager, '_set_task_status') @patch('trove.taskmanager.manager.LOG') def test_exception_TroveError_promote_to_replica_source(self, *args): self.mock_slave2.detach_replica = Mock(side_effect=TroveError) with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_slave1, self.mock_old_master, self.mock_slave2]): self.assertRaises(ReplicationSlaveAttachError, self.manager.promote_to_replica_source, self.context, 'some-inst-id') @patch.object(Manager, '_set_task_status') @patch.object(Manager, '_most_current_replica') @patch('trove.taskmanager.manager.LOG') def test_exception_TroveError_eject_replica_source( self, mock_logging, mock_most_current_replica, mock_set_tast_status): self.mock_slave2.detach_replica = Mock(side_effect=TroveError) mock_most_current_replica.return_value = self.mock_slave1 with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_master, self.mock_slave1, self.mock_slave2]): self.assertRaises(ReplicationSlaveAttachError, self.manager.eject_replica_source, self.context, 'some-inst-id') @patch.object(Manager, '_set_task_status') def test_error_promote_to_replica_source(self, *args): self.mock_slave2.detach_replica = Mock( side_effect=RuntimeError('Error')) with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_slave1, self.mock_old_master, self.mock_slave2]): self.assertRaisesRegexp(RuntimeError, 'Error', self.manager.promote_to_replica_source, self.context, 'some-inst-id') @patch('trove.taskmanager.manager.LOG') def test_error_demote_replication_master_promote_to_replica_source( self, mock_logging): self.mock_old_master.demote_replication_master = Mock( side_effect=RuntimeError('Error')) with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_slave1, self.mock_old_master, self.mock_slave2]): self.assertRaises(ReplicationSlaveAttachError, self.manager.promote_to_replica_source, self.context, 'some-inst-id') @patch.object(Manager, '_set_task_status') @patch.object(Manager, '_most_current_replica') def test_error_eject_replica_source(self, mock_most_current_replica, mock_set_tast_status): self.mock_slave2.detach_replica = Mock( side_effect=RuntimeError('Error')) mock_most_current_replica.return_value = self.mock_slave1 with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_master, self.mock_slave1, self.mock_slave2]): self.assertRaisesRegexp(RuntimeError, 'Error', self.manager.eject_replica_source, self.context, 'some-inst-id') @patch.object(Backup, 'delete') def test_create_replication_slave(self, mock_backup_delete): mock_tasks = Mock() mock_snapshot = {'dataset': {'snapshot_id': 'test-id'}} mock_tasks.get_replication_master_snapshot = Mock( return_value=mock_snapshot) mock_flavor = Mock() with patch.object(models.FreshInstanceTasks, 'load', return_value=mock_tasks): self.manager.create_instance(self.context, ['id1'], Mock(), mock_flavor, Mock(), None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'some_password', None, Mock(), 'some-master-id', None, None, None) mock_tasks.get_replication_master_snapshot.assert_called_with( self.context, 'some-master-id', mock_flavor, 'temp-backup-id', replica_number=1) mock_backup_delete.assert_called_with(self.context, 'test-id') @patch.object(models.FreshInstanceTasks, 'load') @patch.object(Backup, 'delete') @patch('trove.taskmanager.manager.LOG') def test_exception_create_replication_slave(self, mock_logging, mock_delete, mock_load): mock_load.return_value.create_instance = Mock(side_effect=TroveError) self.assertRaises(TroveError, self.manager.create_instance, self.context, ['id1', 'id2'], Mock(), Mock(), Mock(), None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'some_password', None, Mock(), 'some-master-id', None, None, None) def test_AttributeError_create_instance(self): self.assertRaisesRegexp( AttributeError, 'Cannot create multiple non-replica instances.', self.manager.create_instance, self.context, ['id1', 'id2'], Mock(), Mock(), Mock(), None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'some_password', None, Mock(), None, None, None, None) def test_create_instance(self): mock_tasks = Mock() mock_flavor = Mock() mock_override = Mock() with patch.object(models.FreshInstanceTasks, 'load', return_value=mock_tasks): self.manager.create_instance(self.context, 'id1', 'inst1', mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'password', None, mock_override, None, None, None, None) mock_tasks.create_instance.assert_called_with(mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'password', None, mock_override, None, None, None, None) mock_tasks.wait_for_instance.assert_called_with(36000, mock_flavor) def test_create_cluster(self): mock_tasks = Mock() with patch.object(models, 'load_cluster_tasks', return_value=mock_tasks): self.manager.create_cluster(self.context, 'some-cluster-id') mock_tasks.create_cluster.assert_called_with(self.context, 'some-cluster-id') def test_delete_cluster(self): mock_tasks = Mock() with patch.object(models, 'load_cluster_tasks', return_value=mock_tasks): self.manager.delete_cluster(self.context, 'some-cluster-id') mock_tasks.delete_cluster.assert_called_with(self.context, 'some-cluster-id') class TestTaskManagerService(trove_testtools.TestCase): def test_app_factory(self): test_service = service.app_factory(Mock()) self.assertIsInstance(test_service, service.TaskService) trove-5.0.0/trove/tests/unittests/taskmanager/test_models.py0000664000567000056710000013324112701410316025547 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os from tempfile import NamedTemporaryFile import uuid from cinderclient import exceptions as cinder_exceptions import cinderclient.v2.client as cinderclient from mock import Mock, MagicMock, patch, PropertyMock, call from novaclient import exceptions as nova_exceptions import novaclient.v2.flavors import novaclient.v2.servers from oslo_utils import timeutils from swiftclient.client import ClientException from testtools.matchers import Equals, Is import trove.backup.models from trove.backup import models as backup_models from trove.backup import state import trove.common.context from trove.common.exception import GuestError from trove.common.exception import MalformedSecurityGroupRuleError from trove.common.exception import PollTimeOut from trove.common.exception import TroveError from trove.common.instance import ServiceStatuses from trove.common.notification import TroveInstanceModifyVolume from trove.common import remote import trove.common.template as template from trove.common import utils from trove.datastore import models as datastore_models import trove.db.models from trove.extensions.common import models as common_models from trove.extensions.mysql import models as mysql_models import trove.guestagent.api from trove.instance.models import BaseInstance from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceStatus from trove.instance.tasks import InstanceTasks from trove import rpc from trove.taskmanager import models as taskmanager_models from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util INST_ID = 'dbinst-id-1' VOLUME_ID = 'volume-id-1' class FakeOptGroup(object): def __init__(self, tcp_ports=['3306', '3301-3307'], udp_ports=[]): self.tcp_ports = tcp_ports self.udp_ports = udp_ports class fake_Server: def __init__(self): self.id = None self.name = None self.image_id = None self.flavor_id = None self.files = None self.userdata = None self.security_groups = None self.block_device_mapping = None self.status = 'ACTIVE' class fake_ServerManager: def create(self, name, image_id, flavor_id, files, userdata, security_groups, block_device_mapping, availability_zone=None, nics=None, config_drive=False): server = fake_Server() server.id = "server_id" server.name = name server.image_id = image_id server.flavor_id = flavor_id server.files = files server.userdata = userdata server.security_groups = security_groups server.block_device_mapping = block_device_mapping server.availability_zone = availability_zone server.nics = nics return server class fake_nova_client: def __init__(self): self.servers = fake_ServerManager() class fake_InstanceServiceStatus(object): _instance = None def __init__(self): self.deleted = False self.status = None pass def set_status(self, status): self.status = status pass def get_status(self): return self.status @classmethod def find_by(cls, **kwargs): if not cls._instance: cls._instance = fake_InstanceServiceStatus() return cls._instance def save(self): pass def delete(self): self.deleted = True pass def is_deleted(self): return self.deleted class fake_DBInstance(object): _instance = None def __init__(self): self.deleted = False pass @classmethod def find_by(cls, **kwargs): if not cls._instance: cls._instance = fake_DBInstance() return cls._instance def set_task_status(self, status): self.status = status pass def get_task_status(self): return self.status def save(self): pass def delete(self): self.deleted = True pass def is_deleted(self): return self.deleted class FreshInstanceTasksTest(trove_testtools.TestCase): def setUp(self): super(FreshInstanceTasksTest, self).setUp() mock_instance = patch('trove.instance.models.FreshInstance') mock_instance.start() self.addCleanup(mock_instance.stop) mock_instance.id = Mock(return_value='instance_id') mock_instance.tenant_id = Mock(return_value="tenant_id") mock_instance.hostname = Mock(return_value="hostname") mock_instance.name = Mock(return_value='name') mock_instance.nova_client = Mock( return_value=fake_nova_client()) mock_datastore_v = patch( 'trove.datastore.models.DatastoreVersion') mock_datastore_v.start() self.addCleanup(mock_datastore_v.stop) mock_datastore = patch( 'trove.datastore.models.Datastore') mock_datastore.start() self.addCleanup(mock_datastore.stop) taskmanager_models.FreshInstanceTasks.nova_client = fake_nova_client() self.orig_ISS_find_by = InstanceServiceStatus.find_by self.orig_DBI_find_by = DBInstance.find_by self.userdata = "hello moto" self.guestconfig_content = "guest config" with NamedTemporaryFile(suffix=".cloudinit", delete=False) as f: self.cloudinit = f.name f.write(self.userdata) with NamedTemporaryFile(delete=False) as f: self.guestconfig = f.name f.write(self.guestconfig_content) self.freshinstancetasks = taskmanager_models.FreshInstanceTasks( None, Mock(), None, None) self.tm_sg_create_inst_patch = patch.object( trove.taskmanager.models.SecurityGroup, 'create_for_instance', Mock(return_value={'id': uuid.uuid4(), 'name': uuid.uuid4()})) self.tm_sg_create_inst_mock = self.tm_sg_create_inst_patch.start() self.addCleanup(self.tm_sg_create_inst_patch.stop) self.tm_sgr_create_sgr_patch = patch.object( trove.taskmanager.models.SecurityGroupRule, 'create_sec_group_rule') self.tm_sgr_create_sgr_mock = self.tm_sgr_create_sgr_patch.start() self.addCleanup(self.tm_sgr_create_sgr_patch.stop) self.task_models_conf_patch = patch('trove.taskmanager.models.CONF') self.task_models_conf_mock = self.task_models_conf_patch.start() self.addCleanup(self.task_models_conf_patch.stop) def tearDown(self): super(FreshInstanceTasksTest, self).tearDown() os.remove(self.cloudinit) os.remove(self.guestconfig) InstanceServiceStatus.find_by = self.orig_ISS_find_by DBInstance.find_by = self.orig_DBI_find_by def test_create_instance_userdata(self): cloudinit_location = os.path.dirname(self.cloudinit) datastore_manager = os.path.splitext(os.path.basename(self. cloudinit))[0] def fake_conf_getter(*args, **kwargs): if args[0] == 'cloudinit_location': return cloudinit_location else: return '' self.task_models_conf_mock.get.side_effect = fake_conf_getter server = self.freshinstancetasks._create_server( None, None, None, datastore_manager, None, None, None) self.assertEqual(server.userdata, self.userdata) def test_create_instance_guestconfig(self): def fake_conf_getter(*args, **kwargs): if args[0] == 'guest_config': return self.guestconfig if args[0] == 'guest_info': return 'guest_info.conf' if args[0] == 'injected_config_location': return '/etc/trove/conf.d' else: return '' self.task_models_conf_mock.get.side_effect = fake_conf_getter # execute files = self.freshinstancetasks._get_injected_files("test") # verify self.assertTrue( '/etc/trove/conf.d/guest_info.conf' in files) self.assertTrue( '/etc/trove/conf.d/trove-guestagent.conf' in files) self.assertEqual( self.guestconfig_content, files['/etc/trove/conf.d/trove-guestagent.conf']) def test_create_instance_guestconfig_compat(self): def fake_conf_getter(*args, **kwargs): if args[0] == 'guest_config': return self.guestconfig if args[0] == 'guest_info': return '/etc/guest_info' if args[0] == 'injected_config_location': return '/etc' else: return '' self.task_models_conf_mock.get.side_effect = fake_conf_getter # execute files = self.freshinstancetasks._get_injected_files("test") # verify self.assertTrue( '/etc/guest_info' in files) self.assertTrue( '/etc/trove-guestagent.conf' in files) self.assertEqual( self.guestconfig_content, files['/etc/trove-guestagent.conf']) def test_create_instance_with_az_kwarg(self): self.task_models_conf_mock.get.return_value = '' # execute server = self.freshinstancetasks._create_server( None, None, None, None, None, availability_zone='nova', nics=None) # verify self.assertIsNotNone(server) def test_create_instance_with_az(self): self.task_models_conf_mock.get.return_value = '' # execute server = self.freshinstancetasks._create_server( None, None, None, None, None, 'nova', None) # verify self.assertIsNotNone(server) def test_create_instance_with_az_none(self): self.task_models_conf_mock.get.return_value = '' # execute server = self.freshinstancetasks._create_server( None, None, None, None, None, None, None) # verify self.assertIsNotNone(server) @patch.object(InstanceServiceStatus, 'find_by', return_value=fake_InstanceServiceStatus.find_by()) @patch.object(DBInstance, 'find_by', return_value=fake_DBInstance.find_by()) @patch('trove.taskmanager.models.LOG') def test_update_status_of_instance_failure( self, mock_logging, dbi_find_by_mock, iss_find_by_mock): self.task_models_conf_mock.get.return_value = '' self.freshinstancetasks.update_statuses_on_time_out() self.assertEqual(ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT, fake_InstanceServiceStatus.find_by().get_status()) self.assertEqual(InstanceTasks.BUILDING_ERROR_TIMEOUT_GA, fake_DBInstance.find_by().get_task_status()) def test_create_sg_rules_success(self): datastore_manager = 'mysql' self.task_models_conf_mock.get = Mock(return_value=FakeOptGroup()) self.freshinstancetasks._create_secgroup(datastore_manager) self.assertEqual(2, taskmanager_models.SecurityGroupRule. create_sec_group_rule.call_count) def test_create_sg_rules_format_exception_raised(self): datastore_manager = 'mysql' self.task_models_conf_mock.get = Mock( return_value=FakeOptGroup(tcp_ports=['3306', '-3306'])) self.freshinstancetasks.update_db = Mock() self.assertRaises(MalformedSecurityGroupRuleError, self.freshinstancetasks._create_secgroup, datastore_manager) def test_create_sg_rules_greater_than_exception_raised(self): datastore_manager = 'mysql' self.task_models_conf_mock.get = Mock( return_value=FakeOptGroup(tcp_ports=['3306', '33060-3306'])) self.freshinstancetasks.update_db = Mock() self.assertRaises(MalformedSecurityGroupRuleError, self.freshinstancetasks._create_secgroup, datastore_manager) def test_create_sg_rules_success_with_duplicated_port_or_range(self): datastore_manager = 'mysql' self.task_models_conf_mock.get = Mock( return_value=FakeOptGroup( tcp_ports=['3306', '3306', '3306-3307', '3306-3307'])) self.freshinstancetasks.update_db = Mock() self.freshinstancetasks._create_secgroup(datastore_manager) self.assertEqual(2, taskmanager_models.SecurityGroupRule. create_sec_group_rule.call_count) def test_create_sg_rules_exception_with_malformed_ports_or_range(self): datastore_manager = 'mysql' self.task_models_conf_mock.get = Mock( return_value=FakeOptGroup(tcp_ports=['A', 'B-C'])) self.freshinstancetasks.update_db = Mock() self.assertRaises(MalformedSecurityGroupRuleError, self.freshinstancetasks._create_secgroup, datastore_manager) @patch.object(BaseInstance, 'update_db') @patch('trove.taskmanager.models.CONF') @patch('trove.taskmanager.models.LOG') def test_error_sec_group_create_instance(self, mock_logging, mock_conf, mock_update_db): mock_conf.get = Mock( return_value=FakeOptGroup(tcp_ports=['3306', '-3306'])) mock_flavor = {'id': 7, 'ram': 256, 'name': 'smaller_flavor'} self.assertRaisesRegexp( TroveError, 'Error creating security group for instance', self.freshinstancetasks.create_instance, mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, None, None, None, None, Mock(), None, None, None, None) @patch.object(BaseInstance, 'update_db') @patch.object(backup_models.Backup, 'get_by_id') @patch.object(taskmanager_models.FreshInstanceTasks, 'report_root_enabled') @patch.object(taskmanager_models.FreshInstanceTasks, '_get_injected_files') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_secgroup') @patch.object(taskmanager_models.FreshInstanceTasks, '_build_volume_info') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_server') @patch.object(taskmanager_models.FreshInstanceTasks, '_guest_prepare') @patch.object(template, 'SingleInstanceConfigTemplate') @patch.object(template, 'OverrideConfigTemplate') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_dns_entry', side_effect=TroveError) @patch('trove.taskmanager.models.LOG') def test_error_create_dns_entry_create_instance(self, *args): mock_flavor = {'id': 6, 'ram': 512, 'name': 'big_flavor'} self.assertRaisesRegexp( TroveError, 'Error creating DNS entry for instance', self.freshinstancetasks.create_instance, mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, Mock(), None, 'root_password', None, Mock(), None, None, None, None) @patch.object(BaseInstance, 'update_db') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_dns_entry') @patch.object(taskmanager_models.FreshInstanceTasks, '_get_injected_files') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_server') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_secgroup') @patch.object(taskmanager_models.FreshInstanceTasks, '_build_volume_info') @patch.object(taskmanager_models.FreshInstanceTasks, '_guest_prepare') @patch.object(template, 'SingleInstanceConfigTemplate') def test_create_instance(self, mock_single_instance_template, mock_guest_prepare, mock_build_volume_info, mock_create_secgroup, *args): mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} config_content = {'config_contents': 'some junk'} mock_single_instance_template.return_value.config_contents = ( config_content) overrides = Mock() self.freshinstancetasks.create_instance(mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, None, None, None, None, overrides, None, None, 'volume_type', None) mock_create_secgroup.assert_called_with('mysql') mock_build_volume_info.assert_called_with('mysql', volume_size=2, volume_type='volume_type') mock_guest_prepare.assert_called_with( 768, mock_build_volume_info(), 'mysql-server', None, None, None, config_content, None, overrides, None, None, None) @patch.object(trove.guestagent.api.API, 'attach_replication_slave') @patch.object(rpc, 'get_client') def test_attach_replication_slave(self, mock_get_client, mock_attach_replication_slave): mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} snapshot = {'replication_strategy': 'MysqlGTIDReplication', 'master': {'id': 'master-id'}} config_content = {'config_contents': 'some junk'} replica_config = MagicMock() replica_config.config_contents = config_content with patch.object(taskmanager_models.FreshInstanceTasks, '_render_replica_config', return_value=replica_config): self.freshinstancetasks.attach_replication_slave(snapshot, mock_flavor) mock_attach_replication_slave.assert_called_with(snapshot, config_content) @patch.object(BaseInstance, 'update_db') @patch.object(rpc, 'get_client') @patch.object(taskmanager_models.FreshInstanceTasks, '_render_replica_config') @patch.object(trove.guestagent.api.API, 'attach_replication_slave', side_effect=GuestError) @patch('trove.taskmanager.models.LOG') def test_error_attach_replication_slave(self, *args): mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} snapshot = {'replication_strategy': 'MysqlGTIDReplication', 'master': {'id': 'master-id'}} self.assertRaisesRegexp( TroveError, 'Error attaching instance', self.freshinstancetasks.attach_replication_slave, snapshot, mock_flavor) class ResizeVolumeTest(trove_testtools.TestCase): def setUp(self): super(ResizeVolumeTest, self).setUp() self.utils_poll_until_patch = patch.object(utils, 'poll_until') self.utils_poll_until_mock = self.utils_poll_until_patch.start() self.addCleanup(self.utils_poll_until_patch.stop) self.timeutils_isotime_patch = patch.object(timeutils, 'isotime') self.timeutils_isotime_mock = self.timeutils_isotime_patch.start() self.addCleanup(self.timeutils_isotime_patch.stop) self.instance = Mock() self.old_vol_size = 1 self.new_vol_size = 2 self.action = taskmanager_models.ResizeVolumeAction(self.instance, self.old_vol_size, self.new_vol_size) class FakeGroup(): def __init__(self): self.mount_point = 'var/lib/mysql' self.device_path = '/dev/vdb' self.taskmanager_models_CONF = patch.object(taskmanager_models, 'CONF') self.mock_conf = self.taskmanager_models_CONF.start() self.mock_conf.get = Mock(return_value=FakeGroup()) self.addCleanup(self.taskmanager_models_CONF.stop) def tearDown(self): super(ResizeVolumeTest, self).tearDown() @patch('trove.taskmanager.models.LOG') def test_resize_volume_unmount_exception(self, mock_logging): self.instance.guest.unmount_volume = Mock( side_effect=GuestError("test exception")) self.assertRaises(GuestError, self.action._unmount_volume, recover_func=self.action._recover_restart) self.assertEqual(1, self.instance.restart.call_count) self.instance.guest.unmount_volume.side_effect = None self.instance.reset_mock() @patch('trove.taskmanager.models.LOG') def test_resize_volume_detach_exception(self, mock_logging): self.instance.nova_client.volumes.delete_server_volume = Mock( side_effect=nova_exceptions.ClientException("test exception")) self.assertRaises(nova_exceptions.ClientException, self.action._detach_volume, recover_func=self.action._recover_mount_restart) self.assertEqual(1, self.instance.guest.mount_volume.call_count) self.assertEqual(1, self.instance.restart.call_count) self.instance.nova_client.volumes.delete_server_volume.side_effect = ( None) self.instance.reset_mock() @patch('trove.taskmanager.models.LOG') def test_resize_volume_extend_exception(self, mock_logging): self.instance.volume_client.volumes.extend = Mock( side_effect=cinder_exceptions.ClientException("test exception")) self.assertRaises(cinder_exceptions.ClientException, self.action._extend, recover_func=self.action._recover_full) attach_count = ( self.instance.nova_client.volumes.create_server_volume.call_count) self.assertEqual(1, attach_count) self.assertEqual(1, self.instance.guest.mount_volume.call_count) self.assertEqual(1, self.instance.restart.call_count) self.instance.volume_client.volumes.extend.side_effect = None self.instance.reset_mock() @patch('trove.taskmanager.models.LOG') def test_resize_volume_verify_extend_no_volume(self, mock_logging): self.instance.volume_client.volumes.get = Mock( return_value=None) self.assertRaises(cinder_exceptions.ClientException, self.action._verify_extend) self.instance.reset_mock() @patch('trove.taskmanager.models.LOG') def test_resize_volume_poll_timeout(self, mock_logging): utils.poll_until = Mock(side_effect=PollTimeOut) self.assertRaises(PollTimeOut, self.action._verify_extend) self.assertEqual(2, self.instance.volume_client.volumes.get.call_count) utils.poll_until.side_effect = None self.instance.reset_mock() @patch.object(TroveInstanceModifyVolume, 'notify') def test_resize_volume_active_server_succeeds(self, *args): server = Mock(status=InstanceStatus.ACTIVE) self.instance.attach_mock(server, 'server') self.action.execute() self.assertEqual(1, self.instance.guest.stop_db.call_count) self.assertEqual(1, self.instance.guest.unmount_volume.call_count) detach_count = ( self.instance.nova_client.volumes.delete_server_volume.call_count) self.assertEqual(1, detach_count) extend_count = self.instance.volume_client.volumes.extend.call_count self.assertEqual(1, extend_count) attach_count = ( self.instance.nova_client.volumes.create_server_volume.call_count) self.assertEqual(1, attach_count) self.assertEqual(1, self.instance.guest.resize_fs.call_count) self.assertEqual(1, self.instance.guest.mount_volume.call_count) self.assertEqual(1, self.instance.restart.call_count) self.instance.reset_mock() def test_resize_volume_server_error_fails(self): server = Mock(status=InstanceStatus.ERROR) self.instance.attach_mock(server, 'server') self.assertRaises(TroveError, self.action.execute) self.instance.reset_mock() class BuiltInstanceTasksTest(trove_testtools.TestCase): def get_inst_service_status(self, status_id, statuses): answers = [] for i, status in enumerate(statuses): inst_svc_status = InstanceServiceStatus(status, id="%s-%s" % (status_id, i)) inst_svc_status.save = MagicMock(return_value=None) answers.append(inst_svc_status) return answers def _stub_volume_client(self): self.instance_task._volume_client = MagicMock(spec=cinderclient.Client) stub_volume_mgr = MagicMock(spec=cinderclient.volumes.VolumeManager) self.instance_task.volume_client.volumes = stub_volume_mgr stub_volume_mgr.extend = MagicMock(return_value=None) stub_new_volume = cinderclient.volumes.Volume( stub_volume_mgr, {'status': 'available', 'size': 2}, True) stub_volume_mgr.get = MagicMock(return_value=stub_new_volume) stub_volume_mgr.attach = MagicMock(return_value=None) def setUp(self): super(BuiltInstanceTasksTest, self).setUp() self.new_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} stub_nova_server = MagicMock() self.rpc_patches = patch.multiple( rpc, get_notifier=MagicMock(), get_client=MagicMock()) self.rpc_mocks = self.rpc_patches.start() self.addCleanup(self.rpc_patches.stop) db_instance = DBInstance(InstanceTasks.NONE, id=INST_ID, name='resize-inst-name', datastore_version_id='1', datastore_id='id-1', flavor_id='6', manager='mysql', created=datetime.datetime.utcnow(), updated=datetime.datetime.utcnow(), compute_instance_id='computeinst-id-1', tenant_id='testresize-tenant-id', volume_size='1', volume_id=VOLUME_ID) # this is used during the final check of whether the resize successful db_instance.server_status = 'ACTIVE' self.db_instance = db_instance self.dm_dv_load_by_uuid_patch = patch.object( datastore_models.DatastoreVersion, 'load_by_uuid', MagicMock( return_value=datastore_models.DatastoreVersion(db_instance))) self.dm_dv_load_by_uuid_mock = self.dm_dv_load_by_uuid_patch.start() self.addCleanup(self.dm_dv_load_by_uuid_patch.stop) self.dm_ds_load_patch = patch.object( datastore_models.Datastore, 'load', MagicMock( return_value=datastore_models.Datastore(db_instance))) self.dm_ds_load_mock = self.dm_ds_load_patch.start() self.addCleanup(self.dm_ds_load_patch.stop) self.instance_task = taskmanager_models.BuiltInstanceTasks( trove.common.context.TroveContext(), db_instance, stub_nova_server, InstanceServiceStatus(ServiceStatuses.RUNNING, id='inst-stat-id-0')) self.instance_task._guest = MagicMock(spec=trove.guestagent.api.API) self.instance_task._nova_client = MagicMock( spec=novaclient.client) self.stub_server_mgr = MagicMock( spec=novaclient.v2.servers.ServerManager) self.stub_running_server = MagicMock( spec=novaclient.v2.servers.Server) self.stub_running_server.status = 'ACTIVE' self.stub_running_server.flavor = {'id': 6, 'ram': 512} self.stub_verifying_server = MagicMock( spec=novaclient.v2.servers.Server) self.stub_verifying_server.status = 'VERIFY_RESIZE' self.stub_verifying_server.flavor = {'id': 8, 'ram': 768} self.stub_server_mgr.get = MagicMock( return_value=self.stub_verifying_server) self.instance_task._nova_client.servers = self.stub_server_mgr stub_flavor_manager = MagicMock( spec=novaclient.v2.flavors.FlavorManager) self.instance_task._nova_client.flavors = stub_flavor_manager nova_flavor = novaclient.v2.flavors.Flavor(stub_flavor_manager, self.new_flavor, True) stub_flavor_manager.get = MagicMock(return_value=nova_flavor) answers = (status for status in self.get_inst_service_status('inst_stat-id', [ServiceStatuses.SHUTDOWN, ServiceStatuses.RUNNING, ServiceStatuses.RUNNING, ServiceStatuses.RUNNING])) def side_effect_func(*args, **kwargs): if 'instance_id' in kwargs: return answers.next() elif ('id' in kwargs and 'deleted' in kwargs and not kwargs['deleted']): return db_instance else: return MagicMock() self.dbm_dbmb_patch = patch.object( trove.db.models.DatabaseModelBase, 'find_by', MagicMock(side_effect=side_effect_func)) self.dbm_dbmb_mock = self.dbm_dbmb_patch.start() self.addCleanup(self.dbm_dbmb_patch.stop) self.template_patch = patch.object( template, 'SingleInstanceConfigTemplate', MagicMock(spec=template.SingleInstanceConfigTemplate)) self.template_mock = self.template_patch.start() self.addCleanup(self.template_patch.stop) db_instance.save = MagicMock(return_value=None) self.tbmb_running_patch = patch.object( trove.backup.models.Backup, 'running', MagicMock(return_value=None)) self.tbmb_running_mock = self.tbmb_running_patch.start() self.addCleanup(self.tbmb_running_patch.stop) if 'volume' in self._testMethodName: self._stub_volume_client() stub_floating_ips_manager = MagicMock( spec=novaclient.v2.floating_ips.FloatingIPManager) self.instance_task._nova_client.floating_ips = ( stub_floating_ips_manager) floatingip = novaclient.v2.floating_ips.FloatingIP( stub_floating_ips_manager, {'ip': '192.168.10.1'}, True) stub_floating_ips_manager.list = MagicMock(return_value=[floatingip]) def tearDown(self): super(BuiltInstanceTasksTest, self).tearDown() def test_resize_flavor(self): orig_server = self.instance_task.server self.instance_task.resize_flavor({'id': 1, 'ram': 512}, self.new_flavor) # verify self.assertIsNot(self.instance_task.server, orig_server) self.instance_task._guest.stop_db.assert_any_call( do_not_start_on_reboot=True) orig_server.resize.assert_any_call(self.new_flavor['id']) self.assertThat(self.db_instance.task_status, Is(InstanceTasks.NONE)) self.assertEqual(1, self.stub_server_mgr.get.call_count) self.assertThat(self.db_instance.flavor_id, Is(self.new_flavor['id'])) @patch('trove.taskmanager.models.LOG') def test_resize_flavor_resize_failure(self, mock_logging): orig_server = self.instance_task.server self.stub_verifying_server.status = 'ERROR' with patch.object(self.instance_task._nova_client.servers, 'get', return_value=self.stub_verifying_server): # execute self.assertRaises(TroveError, self.instance_task.resize_flavor, {'id': 1, 'ram': 512}, self.new_flavor) # verify self.assertTrue(self.stub_server_mgr.get.called) self.assertIs(self.instance_task.server, self.stub_verifying_server) self.instance_task._guest.stop_db.assert_any_call( do_not_start_on_reboot=True) orig_server.resize.assert_any_call(self.new_flavor['id']) self.assertThat(self.db_instance.task_status, Is(InstanceTasks.NONE)) self.assertThat(self.db_instance.flavor_id, Is('6')) @patch.object(utils, 'poll_until') def test_reboot(self, mock_poll): self.instance_task.datastore_status_matches = Mock(return_value=True) self.instance_task._refresh_datastore_status = Mock() self.instance_task.server.reboot = Mock() self.instance_task.set_datastore_status_to_paused = Mock() self.instance_task.reboot() self.instance_task._guest.stop_db.assert_any_call() self.instance_task._refresh_datastore_status.assert_any_call() self.instance_task.server.reboot.assert_any_call() self.instance_task.set_datastore_status_to_paused.assert_any_call() @patch.object(utils, 'poll_until') @patch('trove.taskmanager.models.LOG') def test_reboot_datastore_not_ready(self, mock_logging, mock_poll): self.instance_task.datastore_status_matches = Mock(return_value=False) self.instance_task._refresh_datastore_status = Mock() self.instance_task.server.reboot = Mock() self.instance_task.set_datastore_status_to_paused = Mock() self.instance_task.reboot() self.instance_task._guest.stop_db.assert_any_call() self.instance_task._refresh_datastore_status.assert_any_call() assert not self.instance_task.server.reboot.called assert not self.instance_task.set_datastore_status_to_paused.called @patch.object(BaseInstance, 'update_db') def test_detach_replica(self, mock_update_db): self.instance_task.detach_replica(Mock(), True) self.instance_task._guest.detach_replica.assert_called_with(True) mock_update_db.assert_called_with(slave_of_id=None) @patch('trove.taskmanager.models.LOG') def test_error_detach_replica(self, mock_logging): with patch.object(self.instance_task._guest, 'detach_replica', side_effect=GuestError): self.assertRaises(GuestError, self.instance_task.detach_replica, Mock(), True) @patch.object(BaseInstance, 'update_db') def test_make_read_only(self, mock_update_db): read_only = MagicMock() self.instance_task.make_read_only(read_only) self.instance_task._guest.make_read_only.assert_called_with(read_only) @patch.object(BaseInstance, 'update_db') def test_attach_replica(self, mock_update_db): master = MagicMock() replica_context = trove_testtools.TroveTestContext(self) mock_guest = MagicMock() mock_guest.get_replica_context = Mock(return_value=replica_context) type(master).guest = PropertyMock(return_value=mock_guest) config_content = {'config_contents': 'some junk'} replica_config = MagicMock() replica_config.config_contents = config_content with patch.object(taskmanager_models.BuiltInstanceTasks, '_render_replica_config', return_value=replica_config): self.instance_task.attach_replica(master) self.instance_task._guest.attach_replica.assert_called_with( replica_context, config_content) mock_update_db.assert_called_with(slave_of_id=master.id) @patch('trove.taskmanager.models.LOG') def test_error_attach_replica(self, mock_logging): with patch.object(self.instance_task._guest, 'attach_replica', side_effect=GuestError): self.assertRaises(GuestError, self.instance_task.attach_replica, Mock()) def test_get_floating_ips(self): floating_ips = self.instance_task._get_floating_ips() self.assertEqual('192.168.10.1', floating_ips['192.168.10.1'].ip) @patch.object(BaseInstance, 'get_visible_ip_addresses', return_value=['192.168.10.1']) def test_detach_public_ips(self, mock_address): removed_ips = self.instance_task.detach_public_ips() self.assertEqual(['192.168.10.1'], removed_ips) def test_attach_public_ips(self): self.instance_task.attach_public_ips(['192.168.10.1']) self.stub_verifying_server.add_floating_ip.assert_called_with( '192.168.10.1') @patch.object(BaseInstance, 'update_db') def test_enable_as_master(self, mock_update_db): test_func = self.instance_task._guest.enable_as_master config_content = {'config_contents': 'some junk'} replica_source_config = MagicMock() replica_source_config.config_contents = config_content with patch.object(self.instance_task, '_render_replica_source_config', return_value=replica_source_config): self.instance_task.enable_as_master() mock_update_db.assert_called_with(slave_of_id=None) test_func.assert_called_with(config_content) def test_get_last_txn(self): self.instance_task.get_last_txn() self.instance_task._guest.get_last_txn.assert_any_call() def test_get_latest_txn_id(self): self.instance_task.get_latest_txn_id() self.instance_task._guest.get_latest_txn_id.assert_any_call() def test_wait_for_txn(self): self.instance_task.wait_for_txn(None) self.instance_task._guest.wait_for_txn.assert_not_called() txn = Mock() self.instance_task.wait_for_txn(txn) self.instance_task._guest.wait_for_txn.assert_called_with(txn) def test_cleanup_source_on_replica_detach(self): test_func = self.instance_task._guest.cleanup_source_on_replica_detach replica_info = Mock() self.instance_task.cleanup_source_on_replica_detach(replica_info) test_func.assert_called_with(replica_info) def test_demote_replication_master(self): self.instance_task.demote_replication_master() self.instance_task._guest.demote_replication_master.assert_any_call() class BackupTasksTest(trove_testtools.TestCase): def setUp(self): super(BackupTasksTest, self).setUp() self.backup = backup_models.DBBackup() self.backup.id = 'backup_id' self.backup.name = 'backup_test', self.backup.description = 'test desc' self.backup.location = 'http://xxx/z_CLOUD/12e48.xbstream.gz' self.backup.instance_id = 'instance id' self.backup.created = 'yesterday' self.backup.updated = 'today' self.backup.size = 2.0 self.backup.state = state.BackupState.NEW self.container_content = (None, [{'name': 'first'}, {'name': 'second'}, {'name': 'third'}]) self.bm_backup_patches = patch.multiple( backup_models.Backup, delete=MagicMock(return_value=None), get_by_id=MagicMock(return_value=self.backup)) self.bm_backup_mocks = self.bm_backup_patches.start() self.addCleanup(self.bm_backup_patches.stop) self.bm_DBBackup_patch = patch.object( backup_models.DBBackup, 'save', MagicMock(return_value=self.backup)) self.bm_DBBackup_mock = self.bm_DBBackup_patch.start() self.addCleanup(self.bm_DBBackup_patch.stop) self.backup.delete = MagicMock(return_value=None) self.swift_client = MagicMock() self.create_swift_client_patch = patch.object( remote, 'create_swift_client', MagicMock(return_value=self.swift_client)) self.create_swift_client_mock = self.create_swift_client_patch.start() self.addCleanup(self.create_swift_client_patch.stop) self.swift_client.head_container = MagicMock( side_effect=ClientException("foo")) self.swift_client.head_object = MagicMock( side_effect=ClientException("foo")) self.swift_client.get_container = MagicMock( return_value=self.container_content) self.swift_client.delete_object = MagicMock(return_value=None) self.swift_client.delete_container = MagicMock(return_value=None) def tearDown(self): super(BackupTasksTest, self).tearDown() def test_delete_backup_nolocation(self): self.backup.location = '' taskmanager_models.BackupTasks.delete_backup('dummy context', self.backup.id) self.backup.delete.assert_any_call() @patch('trove.taskmanager.models.LOG') def test_delete_backup_fail_delete_manifest(self, mock_logging): with patch.object(self.swift_client, 'delete_object', side_effect=ClientException("foo")): with patch.object(self.swift_client, 'head_object', return_value={}): self.assertRaises( TroveError, taskmanager_models.BackupTasks.delete_backup, 'dummy context', self.backup.id) self.assertFalse(backup_models.Backup.delete.called) self.assertEqual( state.BackupState.DELETE_FAILED, self.backup.state, "backup should be in DELETE_FAILED status") @patch('trove.taskmanager.models.LOG') def test_delete_backup_fail_delete_segment(self, mock_logging): with patch.object(self.swift_client, 'delete_object', side_effect=ClientException("foo")): self.assertRaises( TroveError, taskmanager_models.BackupTasks.delete_backup, 'dummy context', self.backup.id) self.assertFalse(backup_models.Backup.delete.called) self.assertEqual( state.BackupState.DELETE_FAILED, self.backup.state, "backup should be in DELETE_FAILED status") def test_parse_manifest(self): manifest = 'container/prefix' cont, prefix = taskmanager_models.BackupTasks._parse_manifest(manifest) self.assertEqual('container', cont) self.assertEqual('prefix', prefix) def test_parse_manifest_bad(self): manifest = 'bad_prefix' cont, prefix = taskmanager_models.BackupTasks._parse_manifest(manifest) self.assertIsNone(cont) self.assertIsNone(prefix) def test_parse_manifest_long(self): manifest = 'container/long/path/to/prefix' cont, prefix = taskmanager_models.BackupTasks._parse_manifest(manifest) self.assertEqual('container', cont) self.assertEqual('long/path/to/prefix', prefix) def test_parse_manifest_short(self): manifest = 'container/' cont, prefix = taskmanager_models.BackupTasks._parse_manifest(manifest) self.assertEqual('container', cont) self.assertEqual('', prefix) class NotifyMixinTest(trove_testtools.TestCase): def test_get_service_id(self): id_map = { 'mysql': '123', 'percona': 'abc' } mixin = taskmanager_models.NotifyMixin() self.assertThat(mixin._get_service_id('mysql', id_map), Equals('123')) @patch('trove.taskmanager.models.LOG') def test_get_service_id_unknown(self, mock_logging): id_map = { 'mysql': '123', 'percona': 'abc' } transformer = taskmanager_models.NotifyMixin() self.assertThat(transformer._get_service_id('m0ng0', id_map), Equals('unknown-service-id-error')) class RootReportTest(trove_testtools.TestCase): def setUp(self): super(RootReportTest, self).setUp() util.init_db() def tearDown(self): super(RootReportTest, self).tearDown() def test_report_root_first_time(self): report = mysql_models.RootHistory.create( None, utils.generate_uuid(), 'root') self.assertIsNotNone(report) def test_report_root_double_create(self): uuid = utils.generate_uuid() history = mysql_models.RootHistory(uuid, 'root').save() with patch.object(mysql_models.RootHistory, 'load', Mock(return_value=history)): report = mysql_models.RootHistory.create( None, uuid, 'root') self.assertTrue(mysql_models.RootHistory.load.called) self.assertEqual(history.user, report.user) self.assertEqual(history.id, report.id) class ClusterRootTest(trove_testtools.TestCase): @patch.object(common_models.RootHistory, "create") @patch.object(common_models.Root, "create") def test_cluster_root_create(self, root_create, root_history_create): context = Mock() uuid = utils.generate_uuid() user = "root" password = "rootpassword" cluster_instances = [utils.generate_uuid(), utils.generate_uuid()] common_models.ClusterRoot.create(context, uuid, user, password, cluster_instances) root_create.assert_called_with(context, uuid, user, password, cluster_instances_list=None) self.assertEqual(2, root_history_create.call_count) calls = [ call(context, cluster_instances[0], user), call(context, cluster_instances[1], user) ] root_history_create.assert_has_calls(calls) trove-5.0.0/trove/tests/unittests/taskmanager/test_clusters.py0000664000567000056710000005400612701410316026131 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from mock import Mock from mock import patch from trove.cluster.models import ClusterTasks as ClusterTaskStatus from trove.cluster.models import DBCluster from trove.common.strategies.cluster.experimental.mongodb.taskmanager import ( MongoDbClusterTasks as ClusterTasks) from trove.common import utils from trove.datastore import models as datastore_models from trove.instance.models import BaseInstance from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceTasks from trove.taskmanager.models import ServiceStatuses from trove.tests.unittests import trove_testtools class MongoDbClusterTasksTest(trove_testtools.TestCase): def setUp(self): super(MongoDbClusterTasksTest, self).setUp() self.cluster_id = "1232" self.cluster_name = "Cluster-1234" self.tenant_id = "6789" self.db_cluster = DBCluster(ClusterTaskStatus.NONE, id=self.cluster_id, created=str(datetime.date), updated=str(datetime.date), name=self.cluster_name, task_id=ClusterTaskStatus.NONE._code, tenant_id=self.tenant_id, datastore_version_id="1", deleted=False) self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="member") self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2", compute_instance_id="compute-2", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-2", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="member") self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="mongos", compute_instance_id="compute-3", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-3", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="query_router") self.dbinst4 = DBInstance(InstanceTasks.NONE, id="4", name="configserver", compute_instance_id="compute-4", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-4", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="config_server") mock_ds1 = Mock() mock_ds1.name = 'mongodb' mock_dv1 = Mock() mock_dv1.name = '2.0.4' self.clustertasks = ClusterTasks(Mock(), self.db_cluster, datastore=mock_ds1, datastore_version=mock_dv1) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_bad_status(self, mock_logging, mock_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.FAILED ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(InstanceServiceStatus, 'find_by') def test_all_instances_ready(self, mock_find): (mock_find.return_value. get_status.return_value) = ServiceStatuses.INSTANCE_READY ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch( 'trove.common.strategies.cluster.experimental.mongodb.taskmanager.LOG') def test_init_replica_set_failure(self, mock_logging, mock_dv, mock_ds, mock_ip, mock_guest, mock_update): member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) primary_member = member1 other_members = [member2] mock_ip.side_effect = ["10.0.0.3"] mock_guest().prep_primary.return_value = Mock() mock_guest().add_members.return_value = Mock() mock_guest.return_value.add_members = Mock( side_effect=Exception("Boom!")) ret_val = self.clustertasks._init_replica_set(primary_member, other_members) mock_update.assert_called_with(self.cluster_id, shard_id='shard-1') self.assertFalse(ret_val) @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_init_replica_set(self, mock_dv, mock_ds, mock_ip, mock_guest): member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) primary_member = member1 other_members = [member2] mock_ip.side_effect = ["10.0.0.3"] mock_guest().prep_primary.return_value = Mock() mock_guest().add_members.return_value = Mock() ret_val = self.clustertasks._init_replica_set(primary_member, other_members) mock_guest.return_value.add_members.assert_called_with( ["10.0.0.3"] ) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(ClusterTasks, '_init_replica_set') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch( 'trove.common.strategies.cluster.experimental.mongodb.taskmanager.LOG') def test_create_shard_failure(self, mock_logging, mock_dv, mock_ds, mock_ip, mock_guest, mock_init_rs, mock_update): member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) members = [member1, member2] mock_ip.side_effect = ["10.0.0.2"] query_router = [ BaseInstance(Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) ] mock_guest().get_replica_set_name.return_value = 'testrs' mock_add_shard = Mock(side_effect=Exception("Boom!")) mock_guest().add_shard = mock_add_shard ret_val = self.clustertasks._create_shard(query_router, members) mock_init_rs.assert_called_with(member1, [member2]) mock_update.assert_called_with(self.cluster_id, shard_id="shard-1") self.assertFalse(ret_val) @patch.object(ClusterTasks, '_init_replica_set') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_shard(self, mock_dv, mock_ds, mock_ip, mock_guest, mock_init_rs): member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) members = [member1, member2] mock_ip.side_effect = ["10.0.0.2"] query_router = [ BaseInstance(Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) ] mock_guest().get_replica_set_name.return_value = 'testrs' mock_add_shard = Mock() mock_guest().add_shard = mock_add_shard ret_val = self.clustertasks._create_shard(query_router, members) mock_init_rs.assert_called_with(member1, [member2]) mock_add_shard.assert_called_with("testrs", "10.0.0.2") self.assertTrue(ret_val) @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, '_create_shard') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(Instance, 'load') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(DBInstance, 'find_all') def test_add_shard_cluster(self, mock_find_all, mock_all_instances_ready, mock_load, mock_dv, mock_ds, mock_add_shard, mock_guest, mock_reset_task): mock_find_all.return_value.all.return_value = [self.dbinst1, self.dbinst2, self.dbinst3, self.dbinst4] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) mock_all_instances_ready.return_value = True mock_add_shard.return_value = True mock_guest.return_value.cluster_complete.return_value = Mock() self.clustertasks.add_shard_cluster(Mock(), self.cluster_id, "shard-1", "rs1") mock_guest.return_value.cluster_complete.assert_called_with() mock_reset_task.assert_called_with() @patch.object(DBCluster, 'save') @patch.object(DBCluster, 'find_by') @patch.object(DBInstance, 'find_all') def test_delete_cluster(self, mock_find_all, mock_find_by, mock_save): mock_find_all.return_value.all.return_value = [] mock_find_by.return_value = self.db_cluster self.clustertasks.delete_cluster(Mock(), self.cluster_id) self.assertEqual(ClusterTaskStatus.NONE, self.db_cluster.task_status) mock_save.assert_called_with() @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, '_create_shard') @patch.object(ClusterTasks, 'get_guest') @patch.object(utils, 'generate_random_password', return_value='pwd') @patch.object(ClusterTasks, 'get_ip') @patch.object(Instance, 'load') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_cluster(self, mock_dv, mock_ds, mock_find_all, mock_all_instances_ready, mock_load, mock_ip, mock_password, mock_guest, mock_create_shard, mock_reset_task): mock_find_all.return_value.all.return_value = [self.dbinst1, self.dbinst2, self.dbinst3, self.dbinst4] mock_all_instances_ready.return_value = True member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) config_server = BaseInstance( Mock(), self.dbinst4, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_load.side_effect = [member1, member2, query_router, config_server] mock_ip.side_effect = ["10.0.0.5"] mock_create_shard.return_value = True self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_guest().add_config_servers.assert_called_with(["10.0.0.5"]) mock_guest().create_admin_user.assert_called_with("pwd") mock_create_shard.assert_called_with( query_router, [member1, member2] ) self.assertEqual(4, mock_guest().cluster_complete.call_count) mock_reset_task.assert_called_with() @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_cluster_admin_password') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch( 'trove.common.strategies.cluster.experimental.mongodb.taskmanager.LOG') def test_add_query_routers_failure(self, mock_logging, mock_dv, mock_ds, mock_password, mock_guest, mock_update): query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_guest.side_effect = Exception("Boom!") ret_val = self.clustertasks._add_query_routers([query_router], ['10.0.0.5']) mock_update.assert_called_with(self.cluster_id) self.assertFalse(ret_val) @patch.object(ClusterTasks, 'get_guest') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_add_query_routers(self, mock_dv, mock_ds, mock_guest): password = 'pwd' query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) ret_val = self.clustertasks._add_query_routers([query_router], ['10.0.0.5'], admin_password=password) mock_guest.assert_called_with(query_router) mock_guest().add_config_servers.assert_called_with(['10.0.0.5']) mock_guest().store_admin_password.assert_called_with(password) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'get_guest') @patch.object(utils, 'generate_random_password') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_add_query_routers_new_cluster(self, mock_dv, mock_ds, mock_gen_password, mock_guest): password = 'pwd' query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_gen_password.return_value = password ret_val = self.clustertasks._add_query_routers([query_router], ['10.0.0.5']) mock_guest.assert_called_with(query_router) mock_guest().add_config_servers.assert_called_with(['10.0.0.5']) mock_guest().create_admin_user.assert_called_with(password) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(DBInstance, 'find_all') def _run_grow_cluster(self, mock_find_all, mock_all_instances_ready, mock_guest, mock_reset_task, new_instances_ids=None): mock_find_all().all.return_value = [self.dbinst1, self.dbinst2, self.dbinst3, self.dbinst4] mock_all_instances_ready.return_value = True self.clustertasks.grow_cluster(Mock(), self.cluster_id, new_instances_ids) self.assertEqual(len(new_instances_ids), mock_guest().cluster_complete.call_count) mock_reset_task.assert_called_with() @patch.object(ClusterTasks, '_add_query_routers') @patch.object(ClusterTasks, 'get_cluster_admin_password') @patch.object(ClusterTasks, 'get_ip') @patch.object(Instance, 'load') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_grow_cluster_query_router(self, mock_dv, mock_ds, mock_load, mock_ip, mock_get_password, mock_add_query_router): query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) config_server = BaseInstance( Mock(), self.dbinst4, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_load.side_effect = [query_router, config_server] mock_ip.return_value = '10.0.0.5' mock_add_query_router.return_value = True self._run_grow_cluster(new_instances_ids=[query_router.id]) mock_add_query_router.assert_called_with( [query_router], ['10.0.0.5'], admin_password=mock_get_password() ) @patch.object(ClusterTasks, '_create_shard') @patch.object(Instance, 'load') @patch.object(ClusterTasks, '_get_running_query_router_id') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_grow_cluster_shard(self, mock_dv, mock_ds, mock_running_qr_id, mock_load, mock_create_shard): mock_running_qr_id.return_value = '3' member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_load.side_effect = [member1, member2, query_router] mock_create_shard.return_value = True self._run_grow_cluster(new_instances_ids=[member1.id, member2.id]) mock_create_shard.assert_called_with( query_router, [member1, member2] ) trove-5.0.0/trove/tests/unittests/taskmanager/test_vertica_clusters.py0000664000567000056710000002473112701410316027650 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from mock import Mock from mock import patch from trove.cluster.models import ClusterTasks as ClusterTaskStatus from trove.cluster.models import DBCluster import trove.common.context as context from trove.common.exception import GuestError from trove.common.strategies.cluster.experimental.vertica.taskmanager import ( VerticaClusterTasks as ClusterTasks) from trove.common.strategies.cluster.experimental.vertica.taskmanager import ( VerticaTaskManagerAPI as task_api) from trove.common.strategies.cluster.experimental.vertica.taskmanager import ( VerticaTaskManagerStrategy as task_strategy) from trove.datastore import models as datastore_models from trove.instance.models import BaseInstance from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceTasks from trove import rpc from trove.taskmanager.models import ServiceStatuses from trove.tests.unittests import trove_testtools class VerticaClusterTasksTest(trove_testtools.TestCase): def setUp(self): super(VerticaClusterTasksTest, self).setUp() self.cluster_id = "1232" self.cluster_name = "Cluster-1234" self.tenant_id = "6789" self.db_cluster = DBCluster(ClusterTaskStatus.NONE, id=self.cluster_id, created=str(datetime.date), updated=str(datetime.date), name=self.cluster_name, task_id=ClusterTaskStatus.NONE._code, tenant_id=self.tenant_id, datastore_version_id="1", deleted=False) self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, type="master") self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2", compute_instance_id="compute-2", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-2", datastore_version_id="1", cluster_id=self.cluster_id, type="member") self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="member3", compute_instance_id="compute-3", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-3", datastore_version_id="1", cluster_id=self.cluster_id, type="member") mock_ds1 = Mock() mock_ds1.name = 'vertica' mock_dv1 = Mock() mock_dv1.name = '7.1' self.clustertasks = ClusterTasks(Mock(), self.db_cluster, datastore=mock_ds1, datastore_version=mock_dv1) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_bad_status(self, mock_logging, mock_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.FAILED ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(InstanceServiceStatus, 'find_by') def test_all_instances_ready(self, mock_find): (mock_find.return_value. get_status.return_value) = ServiceStatuses.INSTANCE_READY ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, '_all_instances_ready', return_value=False) @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_cluster_instance_not_ready(self, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_reset_task): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_reset_task.assert_called_with() @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_cluster(self, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_ip, mock_guest, mock_reset_task): cluster_instances = [self.dbinst1, self.dbinst2, self.dbinst3] for instance in cluster_instances: if instance['type'] == "master": mock_find_all.return_value.all.return_value = [self.dbinst1] mock_ready.return_value = True mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) mock_ip.return_value = "10.0.0.2" self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_guest.return_value.install_cluster.assert_called_with( ['10.0.0.2']) mock_reset_task.assert_called_with() mock_guest.return_value.cluster_complete.assert_called_with() @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, 'get_ip') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch( 'trove.common.strategies.cluster.experimental.vertica.taskmanager.LOG') def test_create_cluster_fail(self, mock_logging, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_ip, mock_reset_task, mock_update_status): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) mock_ip.return_value = "10.0.0.2" guest_client = Mock() guest_client.install_cluster = Mock(side_effect=GuestError("Error")) with patch.object(ClusterTasks, 'get_guest', return_value=guest_client): self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_update_status.assert_called_with('1232') mock_reset_task.assert_called_with() class VerticaTaskManagerAPITest(trove_testtools.TestCase): @patch.object(rpc, 'get_client', Mock(return_value=Mock())) def setUp(self): super(VerticaTaskManagerAPITest, self).setUp() self.context = context.TroveContext() self.api = task_api(self.context) self.call_context = trove_testtools.TroveTestContext(self) self.api.client.prepare = Mock(return_value=self.call_context) self.call_context.cast = Mock() self.rpc_api_version = '1.0' def test_task_manager_api_cast(self): self.api._cast(method_name='test_method', version=self.rpc_api_version) self.call_context.cast.assert_called_with(self.context, 'test_method') class VerticaTaskManagerStrategyTest(trove_testtools.TestCase): def test_task_manager_cluster_tasks_class(self): vertica_strategy = task_strategy() self.assertFalse( hasattr(vertica_strategy.task_manager_cluster_tasks_class, 'rebuild_cluster')) self.assertTrue(callable( vertica_strategy.task_manager_cluster_tasks_class.create_cluster)) def test_task_manager_api_class(self): vertica_strategy = task_strategy() self.assertFalse(hasattr(vertica_strategy.task_manager_api_class, 'add_new_node')) self.assertTrue( callable(vertica_strategy.task_manager_api_class._cast)) trove-5.0.0/trove/tests/config.py0000664000567000056710000001563012701410316020134 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles configuration options for the tests. The tests are capable of running in other contexts, such as in a VM or against a real deployment. Using this configuration ensures we can run them in other environments if we choose to. """ from collections import Mapping import json import os # TODO(tim.simpson): I feel like this class already exists somewhere in core # Python. class FrozenDict(Mapping): def __init__(self, original): self.original = original def __len__(self): return self.original.__len__() def __iter__(self, *args, **kwargs): return self.original.__iter__(self, *args, **kwargs) def __getitem__(self, *args, **kwargs): return self.original.__getitem__(*args, **kwargs) def __str__(self): return self.original.__str__() USAGE_ENDPOINT = os.environ.get("USAGE_ENDPOINT", "trove.tests.util.usage.UsageVerifier") class TestConfig(object): """ Holds test configuration values which can be accessed as attributes or using the values dictionary. """ def __init__(self): """ Create TestConfig, and set default values. These will be overwritten by the "load_from" methods below. """ self._loaded_files = [] self._values = { 'clean_slate': os.environ.get("CLEAN_SLATE", "False") == "True", 'fake_mode': os.environ.get("FAKE_MODE", "False") == "True", 'nova_auth_url': "http://localhost:5000/v2.0", 'trove_auth_url': "http://localhost:5000/v2.0/tokens", 'dbaas_url': "http://localhost:8775/v1.0/dbaas", 'version_url': "http://localhost:8775/", 'nova_url': "http://localhost:8774/v2", 'swift_url': "http://localhost:8080/v1/AUTH_", 'dbaas_datastore': "mysql", 'dbaas_datastore_id': "a00000a0-00a0-0a00-00a0-000a000000aa", 'dbaas_datastore_name_no_versions': "Test_Datastore_1", 'dbaas_datastore_version': "5.5", 'dbaas_datastore_version_id': "b00000b0-00b0-0b00-00b0-" "000b000000bb", 'dbaas_inactive_datastore_version': "mysql_inactive_version", 'instance_create_time': 16 * 60, 'mysql_connection_method': {"type": "direct"}, 'typical_nova_image_name': None, 'white_box': os.environ.get("WHITE_BOX", "False") == "True", 'test_mgmt': False, 'use_local_ovz': False, "known_bugs": {}, "in_proc_server": True, "report_directory": os.environ.get("REPORT_DIRECTORY", None), "trove_volume_support": True, "trove_volume_size": 1, "trove_max_volumes_per_tenant": 100, "trove_max_instances_per_tenant": 55, "usage_endpoint": USAGE_ENDPOINT, "root_on_create": False, "mysql": { "configurations": { "valid_values": { "connect_timeout": 120, "local_infile": 0, "collation_server": "latin1_swedish_ci" }, "appending_values": { "join_buffer_size": 1048576, "connect_timeout": 15 }, "nondynamic_parameter": { "join_buffer_size": 1048576, "innodb_buffer_pool_size": 57671680 }, "out_of_bounds_under": { "connect_timeout": -10 }, "out_of_bounds_over": { "connect_timeout": 1000000 }, "parameters_list": [ "key_buffer_size", "connect_timeout" ] }, "volume_support": True, }, "redis": {"volume_support": False}, } self._frozen_values = FrozenDict(self._values) self._users = None def get(self, name, default_value): return self.values.get(name, default_value) def get_report(self): return PrintReporter() def load_from_line(self, line): index = line.find("=") if index >= 0: key = line[:index] value = line[index + 1:] self._values[key] = value def load_include_files(self, original_file, files): directory = os.path.dirname(original_file) for file_sub_path in files: file_full_path = os.path.join(directory, file_sub_path) self.load_from_file(file_full_path) def load_from_file(self, file_path): if file_path in self._loaded_files: return file_contents = open(file_path, "r").read() try: contents = json.loads(file_contents) except Exception as exception: raise RuntimeError("Error loading conf file \"%s\"." % file_path, exception) finally: self._loaded_files.append(file_path) if "include-files" in contents: self.load_include_files(file_path, contents['include-files']) del contents['include-files'] self._values.update(contents) def __getattr__(self, name): if name not in self._values: raise AttributeError('Configuration value "%s" not found.' % name) else: return self._values[name] def python_cmd_list(self): """The start of a command list to use when running Python scripts.""" commands = [] if self.use_venv: commands.append("%s/tools/with_venv.sh" % self.nova_code_root) return list commands.append("python") return commands @property def users(self): if self._users is None: from trove.tests.util.users import Users self._users = Users(self.values['users']) return self._users @property def values(self): return self._frozen_values class PrintReporter(object): def log(self, msg): print("[REPORT] %s" % msg) def update(self): pass # Ignore. This is used in other reporters. CONFIG = TestConfig() del TestConfig.__init__ trove-5.0.0/trove/tests/examples/0000775000567000056710000000000012701410521020124 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/examples/__init__.py0000664000567000056710000000000012701410316022225 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/examples/client.py0000664000567000056710000002703012701410316021760 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import re import time from urlparse import urlparse from proboscis.asserts import fail from troveclient.compat.client import TroveHTTPClient from trove.tests.config import CONFIG print_req = True def shorten_url(url): parsed = urlparse(url) if parsed.query: method_url = parsed.path + '?' + parsed.query else: method_url = parsed.path return method_url class SnippetWriter(object): def __init__(self, conf, get_replace_list): self.conf = conf self.get_replace_list = get_replace_list def output_request(self, user_details, name, url, output_headers, body, content_type, method, static_auth_token=True): headers = [] parsed = urlparse(url) method_url = shorten_url(url) headers.append("%s %s HTTP/1.1" % (method, method_url)) headers.append("User-Agent: %s" % output_headers['User-Agent']) headers.append("Host: %s" % parsed.netloc) # static_auth_token option for documentation purposes if static_auth_token: output_token = '87c6033c-9ff6-405f-943e-2deb73f278b7' else: output_token = output_headers['X-Auth-Token'] headers.append("X-Auth-Token: %s" % output_token) headers.append("Accept: %s" % output_headers['Accept']) print("OUTPUT HEADERS: %s" % output_headers) headers.append("Content-Type: %s" % output_headers['Content-Type']) self.write_file(user_details, name, "-%s.txt" % content_type, url, method, "request", output='\n'.join(headers)) pretty_body = self.format_body(body, content_type) self.write_file(user_details, name, ".%s" % content_type, url, method, "request", output=pretty_body) def output_response(self, user_details, name, content_type, url, method, resp, body): version = "1.1" # if resp.version == 11 else "1.0" lines = [ ["HTTP/%s %s %s" % (version, resp.status, resp.reason)], ["Content-Type: %s" % resp['content-type']], ] if 'via' in resp: lines.append(["Via: %s" % resp['via']]) lines.append(["Content-Length: %s" % resp['content-length']]) lines.append(["Date: Mon, 18 Mar 2013 19:09:17 GMT"]) if 'server' in resp: lines.append(["Server: %s" % resp["server"]]) new_lines = [x[0] for x in lines] joined_lines = '\n'.join(new_lines) self.write_file(user_details, name, "-%s.txt" % content_type, url, method, "response", output=joined_lines) if body: pretty_body = self.format_body(body, content_type) self.write_file(user_details, name, ".%s" % content_type, url, method, "response", output=pretty_body) def format_body(self, body, content_type): assert content_type == 'json' try: if self.conf['replace_dns_hostname']: before = r'\"hostname\": \"[a-zA-Z0-9-_\.]*\"' after = '\"hostname\": \"%s\"' % self.conf[ 'replace_dns_hostname'] body = re.sub(before, after, body) return json.dumps(json.loads(body), sort_keys=True, indent=4) except Exception: return body or '' def write_request_file(self, user_details, name, content_type, url, method, req_headers, request_body): if print_req: print("\t%s req url:%s" % (content_type, url)) print("\t%s req method:%s" % (content_type, method)) print("\t%s req headers:%s" % (content_type, req_headers)) print("\t%s req body:%s" % (content_type, request_body)) self.output_request(user_details, name, url, req_headers, request_body, content_type, method) def write_response_file(self, user_details, name, content_type, url, method, resp, resp_content): if print_req: print("\t%s resp:%s" % (content_type, resp)) print("\t%s resp content:%s" % (content_type, resp_content)) self.output_response(user_details, name, content_type, url, method, resp, resp_content) def write_file(self, user_details, name, content_type, url, method, in_or_out, output): output = output.replace(user_details['tenant'], '1234') if self.conf['replace_host']: output = output.replace(user_details['api_url'], self.conf['replace_host']) pre_host_port = urlparse(user_details['service_url']).netloc post_host = urlparse(self.conf['replace_host']).netloc output = output.replace(pre_host_port, post_host) output = output.replace("fake_host", "hostname") output = output.replace("FAKE_", "") for resource in self.get_replace_list(): output = output.replace(str(resource[0]), str(resource[1])) filename = "%s/db-%s-%s%s" % (self.conf['directory'], name.replace('_', '-'), in_or_out, content_type) self._write_file(filename, output) def _write_file(self, filename, output): empty = len(output.strip()) == 0 # Manipulate actual data to appease doc niceness checks actual = [line.rstrip() for line in output.split("\n")] if not empty and actual[len(actual) - 1] != '': actual.append("") def goofy_diff(a, b): diff = [] for i in range(len(a)): if i < len(b): if a[i].rstrip() != b[i].rstrip(): diff.append('Expected line %d :%s\n' ' Actual line %d :%s' % (i + 1, a[i], i + 1, b[i])) else: diff.append("Expected line %d :%s" % (i + 1, a[i])) for j in range(len(b) - len(a)): i2 = len(a) + j diff.append(" Actual line %d :%s" % (i2 + 1, b[i2])) return diff def write_actual_file(): # Always write the file. with open(filename, "w") as file: for line in actual: file.write("%s\n" % line) def assert_output_matches(): if os.path.isfile(filename): with open(filename, 'r') as original_file: original = original_file.read() if empty: fail('Error: output missing in new snippet generation ' 'for %s. Old content follows:\n"""%s"""' % (filename, original)) elif filename.endswith('.json'): assert_json_matches(original) else: assert_file_matches(original) elif not empty: fail('Error: new file necessary where there was no file ' 'before. Filename=%s\nContent follows:\n"""%s"""' % (filename, output)) def assert_file_matches(original): expected = original.split('\n') # Remove the last item which will look like a duplicated # file ending newline expected.pop() diff = '\n'.join(goofy_diff(expected, actual)) if diff: fail('Error: output files differ for %s:\n%s' % (filename, diff)) def order_json(json_obj): """Sort the json object so that it can be compared properly.""" if isinstance(json_obj, list): return sorted(order_json(elem) for elem in json_obj) if isinstance(json_obj, dict): return sorted( (key, order_json(value)) for key, value in json_obj.items()) else: return json_obj def assert_json_matches(original): try: expected_json = json.loads(original) actual_json = json.loads(output) except ValueError: fail('Invalid json!\nExpected: %s\nActual: %s' % (original, output)) if order_json(expected_json) != order_json(actual_json): # Re-Use the same failure output if the json is different assert_file_matches(original) if not os.environ.get('TESTS_FIX_EXAMPLES'): assert_output_matches() elif not empty: write_actual_file() # This method is mixed into the client class. # It requires the following fields: snippet_writer, content_type, and # "name," the last of which must be set before each call. def write_to_snippet(self, args, kwargs, resp, body): if self.name is None: raise RuntimeError("'name' not set before call.") url = args[0] method = args[1] request_headers = kwargs['headers'] request_body = kwargs.get('body', None) response_headers = resp response_body = body # Log request user_details = { 'api_url': self.service_url, 'service_url': self.service_url, 'tenant': self.tenant, } self.snippet_writer.write_request_file(user_details, self.name, self.content_type, url, method, request_headers, request_body) self.snippet_writer.write_response_file(user_details, self.name, self.content_type, url, method, response_headers, response_body) # Create a short url to assert against. short_url = url base_url = self.service_url for prefix in (base_url): if short_url.startswith(prefix): short_url = short_url[len(prefix):] self.old_info = { 'url': shorten_url(short_url), 'method': method, 'request_headers': request_headers, 'request_body': request_body, 'response_headers': response_headers, 'response_body': response_body } def add_fake_response_headers(headers): """ Fakes other items that would appear if you were using, just to make up an example, a proxy. """ conf = CONFIG.examples if 'via' in conf and 'via' not in headers: headers['via'] = conf['via'] if 'server' in conf and 'server' not in headers: headers['server'] = conf['server'] if 'date' not in headers: date_string = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) headers['date'] = date_string class JsonClient(TroveHTTPClient): content_type = 'json' def http_log(self, args, kwargs, resp, body): add_fake_response_headers(resp) self.pretty_log(args, kwargs, resp, body) def write_snippet(): return write_to_snippet(self, args, kwargs, resp, body) self.write_snippet = write_snippet trove-5.0.0/trove/tests/examples/snippets.py0000664000567000056710000012736712701410316022365 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import json import time from oslo_log import log as logging from proboscis.asserts import assert_equal from proboscis.asserts import assert_true from proboscis.asserts import Check from proboscis import before_class from proboscis import SkipTest from proboscis import test from proboscis import TestProgram from troveclient.compat import client as trove_client from troveclient.compat import Dbaas from troveclient.compat import TroveHTTPClient from trove.tests.config import CONFIG from trove.tests.examples.client import JsonClient from trove.tests.examples.client import SnippetWriter trove_client._logger.setLevel(logging.CRITICAL) FAKE_INFO = {'m': 30, 's': 0, 'uuid': 'abcdef00-aaaa-aaaa-aaaa-bbbbbbbbbbbb'} EXAMPLE_BACKUP_ID = "a9832168-7541-4536-b8d9-a8a9b79cf1b4" EXAMPLE_BACKUP_INCREMENTAL_ID = "2e351a71-dd28-4bcb-a7d6-d36a5b487173" EXAMPLE_CONFIG_ID = "43a6ea86-e959-4735-9e46-a6a5d4a2d80f" EXAMPLE_INSTANCE_ID = "44b277eb-39be-4921-be31-3d61b43651d7" EXAMPLE_INSTANCE_ID_2 = "d5a9db64-7ef7-41c5-8e1e-4013166874bc" EXAMPLE_CONFIG_SERVER_ID = "271898715" def get_now(): from datetime import datetime return datetime(2014, 10, 30, hour=12, minute=FAKE_INFO['m'], second=FAKE_INFO['s']) def get_uuid(): return FAKE_INFO['uuid'] def set_fake_stuff(uuid=None, minute=None, unique_id=None): if uuid: FAKE_INFO['uuid'] = uuid if minute: FAKE_INFO['minute'] = minute if unique_id: from trove.common.template import SingleInstanceConfigTemplate def fake_calc_id(self): return unique_id SingleInstanceConfigTemplate._calculate_unique_id = fake_calc_id def monkey_patch_uuid_and_date(): import uuid uuid.uuid4 = get_uuid from trove.common import utils utils.utcnow = get_now utils.generate_uuid = get_uuid @test def load_config_file(): global conf if CONFIG.get("examples", None) is None: fail("Missing 'examples' config in test config.") conf = CONFIG.examples global normal_user normal_user = CONFIG.users.find_user_by_name(conf['normal_user_name']) global admin_user admin_user = CONFIG.users.find_user_by_name(conf['admin_user_name']) def create_client_args(user): auth_strategy = None kwargs = { 'service_type': 'trove', 'insecure': CONFIG.values['trove_client_insecure'], } def set_optional(kwargs_name, test_conf_name): value = CONFIG.values.get(test_conf_name, None) if value is not None: kwargs[kwargs_name] = value service_url = CONFIG.get('override_trove_api_url', None) if user.requirements.is_admin: service_url = CONFIG.get('override_admin_trove_api_url', service_url) if service_url: kwargs['service_url'] = service_url auth_strategy = None if user.requirements.is_admin: auth_strategy = CONFIG.get('admin_auth_strategy', CONFIG.auth_strategy) else: auth_strategy = CONFIG.auth_strategy set_optional('region_name', 'trove_client_region_name') if CONFIG.values.get('override_trove_api_url_append_tenant', False): kwargs['service_url'] += "/" + user.tenant if auth_strategy == 'fake': from troveclient.compat import auth class FakeAuth(auth.Authenticator): def authenticate(self): class FakeCatalog(object): def __init__(self, auth): self.auth = auth def get_public_url(self): return "%s/%s" % (CONFIG.dbaas_url, self.auth.tenant) def get_token(self): return self.auth.tenant return FakeCatalog(self) auth_strategy = FakeAuth if auth_strategy: kwargs['auth_strategy'] = auth_strategy if not user.requirements.is_admin: auth_url = CONFIG.trove_auth_url else: auth_url = CONFIG.values.get('trove_admin_auth_url', CONFIG.trove_auth_url) if CONFIG.values.get('trove_client_cls'): cls_name = CONFIG.trove_client_cls kwargs['client_cls'] = import_class(cls_name) kwargs['tenant'] = user.tenant kwargs['auth_url'] = auth_url return (user.auth_user, user.auth_key), kwargs def create_client(cls, user): args, kwargs = create_client_args(user) kwargs['client_cls'] = cls client = Dbaas(*args, **kwargs) return client def make_client(user): args, kwargs = create_client_args(user) kwargs['client_cls'] = JsonClient client = Dbaas(*args, **kwargs) client.client.name = "auth" client.authenticate() return client def write_snippet(get_replace_list, client, name, url, method, status, reason, func, *func_args): """ 'name' is the name of the file, while 'url,' 'method,' 'status,' and 'reason' are expected values that are asserted against. If func_args is present, it is a list of lists, each one of which is passed as the *args to the two invocations of "func". """ func_args = func_args or [] snippet_writer = SnippetWriter(conf, get_replace_list) results = [] client.client.snippet_writer = snippet_writer client.client.name = name args = func_args result = func(client, *args) # Now write the snippet (if this happens earlier we can't replace # data such as the instance ID). client.client.write_snippet() with Check() as check: check.equal(client.client.old_info['url'], url) check.equal(client.client.old_info['method'], method) check.equal(client.client.old_info['response_headers'].status, status) check.equal(client.client.old_info['response_headers'].reason, reason) results.append(result) # To prevent this from writing a snippet somewhere else... client.client.name = "junk" return results JSON_INDEX = 0 class Example(object): @classmethod def get_replace_list(cls): return [] def snippet(self, *args, **kwargs): return write_snippet(self.get_replace_list, self.client, *args, **kwargs) @test(depends_on=[load_config_file], enabled=False) class Versions(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def get_versions(self): self.snippet( "versions", "", "GET", 200, "OK", lambda client: client.versions.index(conf['version_url'])) @test def get_version(self): def version_call(client): return client.versions.index(conf['version_url'] + "/v1.0/") self.snippet("versions", "/v1.0", "GET", 200, "OK", get_version) @test(depends_on=[load_config_file]) class Flavors(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def get_flavors(self): self.snippet( "flavors", "/flavors", "GET", 200, "OK", lambda client: client.flavors.list()) @test def get_flavor_by_id(self): self.snippet( "flavors_by_id", "/flavors/1", "GET", 200, "OK", lambda client: client.flavors.get(1)) @test(depends_on=[load_config_file]) def clean_slate(): client = create_client(TroveHTTPClient, admin_user) client.client.name = "list" instances = client.instances.list() assert_equal(0, len(instances), "Instance count must be zero.") @test(depends_on=[clean_slate]) class CreateInstance(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def post_create_instance(self): set_fake_stuff(uuid=EXAMPLE_INSTANCE_ID) def create_instance(client, name): instance = client.instances.create( name, 1, volume={'size': 2}, databases=[ { "name": "sampledb", "character_set": "utf8", "collate": "utf8_general_ci" }, { "name": "nextround" } ], users=[ { "databases": [{"name": "sampledb"}], "name": "demouser", "password": "demopassword" } ]) assert_equal(instance.status, "BUILD") return instance self.instances = self.snippet( "create_instance", "/instances", "POST", 200, "OK", create_instance, "json_rack_instance") def an_instance_is_not_active(self): for instance in self.instances: instance = self.client.instances.get(instance.id) if instance.status != "ACTIVE": assert_equal(instance.status, "BUILD") return True return False @test(depends_on=[post_create_instance]) def wait_for_instances(self): while self.an_instance_is_not_active(): time.sleep(1) global json_instance json_instance = self.instances[0] @test(depends_on=[CreateInstance], groups=['uses_instances']) class Databases(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def post_create_databases(self): self.snippet( "create_databases", "/instances/%s/databases" % json_instance.id, "POST", 202, "Accepted", lambda client: client.databases.create( json_instance.id, databases=[ { "name": "testingdb", "character_set": "utf8", "collate": "utf8_general_ci" }, { "name": "anotherdb" }, { "name": "oneMoreDB" }])) @test(depends_on=[post_create_databases]) def get_list_databases(self): self.snippet( "list_databases", "/instances/%s/databases" % json_instance.id, "GET", 200, "OK", lambda client: client.databases.list(json_instance.id)) @test(depends_on=[post_create_databases]) def get_list_databases_limit_two(self): results = self.snippet( "list_databases_pagination", "/instances/%s/databases?limit=1" % json_instance.id, "GET", 200, "OK", lambda client: client.databases.list(json_instance.id, limit=1)) assert_equal(1, len(results[JSON_INDEX])) assert_equal("anotherdb", results[JSON_INDEX].next) @test(depends_on=[post_create_databases], runs_after=[get_list_databases, get_list_databases_limit_two]) def delete_databases(self): self.snippet( "delete_databases", "/instances/%s/databases/testingdb" % json_instance.id, "DELETE", 202, "Accepted", lambda client: client.databases.delete(json_instance.id, 'testingdb')) @test(depends_on=[CreateInstance], groups=['uses_instances']) class Users(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def post_create_users(self): self.snippet( "create_users", "/instances/%s/users" % json_instance.id, "POST", 202, "Accepted", lambda client: client.users.create( json_instance.id, [{ "name": "dbuser1", "password": "password", "databases": [ { "name": "databaseA" } ] }, { "name": "dbuser2", "password": "password", "databases": [ { "name": "databaseB" }, { "name": "databaseC" } ] }, { "name": "dbuser3", "password": "password", "databases": [ { "name": "databaseD" } ] }])) @test(depends_on=[post_create_users]) def get_list_users(self): self.snippet( "list_users", "/instances/%s/users" % json_instance.id, "GET", 200, "OK", lambda client: client.users.list(json_instance.id)) @test(depends_on=[post_create_users]) def get_list_users_limit_two(self): self.snippet( "list_users_pagination", "/instances/%s/users?limit=2" % json_instance.id, "GET", 200, "OK", lambda client: client.users.list(json_instance.id, limit=2)) @test(depends_on=[post_create_users], runs_after=[get_list_users, get_list_users_limit_two]) def delete_users(self): user_name = "demouser" self.snippet( "delete_users", "/instances/%s/users/%s" % (json_instance.id, user_name), "DELETE", 202, "Accepted", lambda client: client.users.delete(json_instance.id, username=user_name)) @test(depends_on=[post_create_users]) def modify_user_attributes(self): old_user_name = "dbuser1" self.snippet( "change_user_attributes", "/instances/%s/users/%s" % (json_instance.id, old_user_name), "PUT", 202, "Accepted", lambda client: client.users.update_attributes( json_instance.id, username=old_user_name, newuserattr={ "name": "new_username", "password": "new_password" } ) ) @test(depends_on=[CreateInstance], groups=['uses_instances']) class Root(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def post_enable_root_access(self): self.snippet( "enable_root_user", "/instances/%s/root" % json_instance.id, "POST", 200, "OK", lambda client: client.root.create(json_instance.id)) @test(depends_on=[post_enable_root_access]) def get_check_root_access(self): results = self.snippet( "check_root_user", "/instances/%s/root" % json_instance.id, "GET", 200, "OK", lambda client: client.root.is_root_enabled(json_instance.id)) assert_equal(results[JSON_INDEX].rootEnabled, True) @test(depends_on=[get_check_root_access]) def delete_disable_root_access(self): self.snippet( "disable_root_user", "/instances/%s/root" % json_instance.id, "DELETE", 200, "OK", lambda client: client.root.delete(json_instance.id)) # restore root for subsequent tests self.post_enable_root_access() class ActiveMixin(Example): """Adds a method to wait for instance status to become ACTIVE.""" def _wait_for_active(self, *acceptable_states): global json_instance json_instance = self.client.instances.get(json_instance.id) print('instance.status=%s' % json_instance.status) while json_instance.status != "ACTIVE": assert_true( json_instance.status in acceptable_states, "Instance status == %s; expected it to be one of: %s" % (json_instance.status, acceptable_states)) time.sleep(0.1) json_instance = self.client.instances.get(json_instance.id) def _wait_for_restore_active(self, *acceptable_states): for instance in (self.json_restore, ): instance = self.client.instances.get(instance.id) print('instance.status=%s' % instance.status) while instance.status != "ACTIVE": assert_true( instance.status in acceptable_states, "Instance status == %s; expected it to be one of: %s" % (instance.status, acceptable_states)) time.sleep(0.1) instance = self.client.instances.get(instance.id) STATE = { "CONFIGURATION": None, "DATASTORE_ID": None, "DATASTORE_VERSION_ID": None, } @test(depends_on=[CreateInstance], groups=['uses_instances']) class Datastores(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def get_datastores_list(self): self.datastores = self.snippet( "datastores_list", "/datastores", "GET", 200, "OK", lambda client: client.datastores.list()) for result in self.datastores: assert_equal(1, len(result)) @test(depends_on=[get_datastores_list]) def get_datastore_by_id(self): ds, = self.datastores mysql_ds = [x for x in ds if x.name == 'mysql'] if not mysql_ds: fail('no mysql datastore found in list') ds_id = STATE["DATASTORE_ID"] = mysql_ds[JSON_INDEX].id self.datastore = self.snippet( "datastore_by_id", "/datastores/%s" % ds_id, "GET", 200, "OK", lambda client: client.datastores.get(ds_id)) @test(depends_on=[get_datastore_by_id]) def get_datastore_versions_list(self): ds_id = STATE["DATASTORE_ID"] self.datastore_versions = self.snippet( "datastore_versions_list", "/datastores/%s/versions" % ds_id, "GET", 200, "OK", lambda client: client.datastore_versions.list(ds_id)) @test(depends_on=[get_datastore_versions_list]) def get_datastore_version_by_id(self): ds_id = STATE["DATASTORE_ID"] ds_v_id = STATE["DATASTORE_VERSION_ID"] = ( self.datastore_versions[JSON_INDEX][0].id ) self.datastore_version = self.snippet( "datastore_version_by_id", "/datastores/%s/versions/%s" % (ds_id, ds_v_id), "GET", 200, "OK", lambda client: client.datastore_versions.get(ds_id, ds_v_id)) @test(depends_on=[Datastores], groups=['uses_instances']) class Configurations(ActiveMixin): @before_class def setup(self): self.client = make_client(normal_user) @test def get_configuration_parameters_for_datastore_version(self): ds_id = STATE["DATASTORE_ID"] ds_v_id = STATE["DATASTORE_VERSION_ID"] self.snippet( "configuration_parameters_for_datastore_version", "/datastores/%s/versions/%s/parameters" % (ds_id, ds_v_id), "GET", 200, "OK", lambda client: client.configuration_parameters.parameters( ds_id, ds_v_id ) ) @test def get_configuration_parameters_without_datastore_version(self): ds_v_id = STATE["DATASTORE_VERSION_ID"] self.params = self.snippet( "configuration_parameters_without_datastore_version", "/datastores/versions/%s/parameters" % (ds_v_id), "GET", 200, "OK", lambda client: ( client.configuration_parameters.parameters_by_version(ds_v_id) ) ) assert_true(self.params) @test(depends_on=[get_configuration_parameters_without_datastore_version]) def get_configuration_parameter_for_datastore_version(self): ds_id = STATE["DATASTORE_ID"] ds_v_id = STATE["DATASTORE_VERSION_ID"] param = self.params[JSON_INDEX][0].name self.snippet( "configuration_parameter_for_datastore_version", "/datastores/%s/versions/%s/parameters/%s" % (ds_id, ds_v_id, param), "GET", 200, "OK", lambda client: client.configuration_parameters.get_parameter( ds_id, ds_v_id, param)) @test(depends_on=[get_configuration_parameters_without_datastore_version]) def get_configuration_parameter_without_datastore_version(self): ds_v_id = STATE["DATASTORE_VERSION_ID"] param = self.params[JSON_INDEX][0].name def get_param(client): return client.configuration_parameters.get_parameter_by_version( ds_v_id, param ) self.params = self.snippet( "configuration_parameter_without_datastore_version", "/datastores/versions/%s/parameters/%s" % (ds_v_id, param), "GET", 200, "OK", get_param ) @test(depends_on=[get_configuration_parameter_without_datastore_version]) def create_configuration(self): set_fake_stuff(uuid=EXAMPLE_CONFIG_ID) ds_id = STATE["DATASTORE_ID"] ds_v_id = STATE["DATASTORE_VERSION_ID"] values = { "connect_timeout": 120, "collation_server": "latin1_swedish_ci" } def create(client): config = client.configurations.create( 'example-configuration-name', json.dumps(values), 'example description', ds_id, ds_v_id) return config self.configurations = self.snippet( "configuration_create", "/configurations", "POST", 200, "OK", create) STATE["CONFIGURATION"] = self.configurations[JSON_INDEX] @test(depends_on=[create_configuration]) def get_configuration(self): config = STATE["CONFIGURATION"] self.config = self.snippet( "configuration_details", "/configurations/%s" % config.id, "GET", 200, "OK", lambda client: client.configurations.get(config.id)) @test(depends_on=[create_configuration]) def list_configurations(self): self.configs = self.snippet( "configuration_list", "/configurations", "GET", 200, "OK", lambda client: client.configurations.list()) @test(depends_on=[list_configurations, get_configuration]) def edit_configuration(self): config = STATE["CONFIGURATION"] values = { 'connect_timeout': 300 } self.snippet( "configuration_edit_parameters", "/configurations/%s" % config.id, "PATCH", 200, "OK", lambda client: client.configurations.edit( config.id, json.dumps(values))) @test(depends_on=[edit_configuration]) def update_configuration(self): config = STATE["CONFIGURATION"] values = { 'connect_timeout': 150, 'collation_server': 'utf8_unicode_ci' } self.snippet( "configuration_update_parameters", "/configurations/%s" % config.id, "PUT", 202, "Accepted", lambda client: client.configurations.update( config.id, json.dumps(values), 'example-updated-name', 'example updated description')) @test(depends_on=[update_configuration]) def attach_configuration_to_instance(self): config = STATE["CONFIGURATION"] self.snippet( "configuration_attach_to_instance", "/instances/%s" % json_instance.id, "PUT", 202, "Accepted", lambda client: client.instances.modify( json_instance.id, config.id ) ) @test(depends_on=[attach_configuration_to_instance]) def list_configurations_instances(self): config = STATE["CONFIGURATION"] self.config_instances = self.snippet( "configuration_list_instances", "/configurations/%s/instances" % config.id, "GET", 200, "OK", lambda client: client.configurations.instances(config.id)) @test(depends_on=[list_configurations_instances]) def detach_configuration_from_instance(self): self.snippet( "configuration_detach_from_instance", "/instances/%s" % json_instance.id, "PUT", 202, "Accepted", lambda client: client.instances.modify( json_instance.id, "")) @test(depends_on=[detach_configuration_from_instance]) def instance_restart_after_configration_change(self): self.client.instances.restart(json_instance.id) self._wait_for_active("REBOOT") @test(depends_on=[CreateInstance], groups=['uses_instances']) class InstanceList(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def get_list_instance_index(self): results = self.snippet( "instances_index", "/instances", "GET", 200, "OK", lambda client: client.instances.list()) for result in results: assert_equal(1, len(result)) @test def get_instance_details(self): results = self.snippet( "instance_status_detail", "/instances/%s" % json_instance.id, "GET", 200, "OK", lambda client: client.instances.get(json_instance.id)) assert_equal(results[JSON_INDEX].id, json_instance.id) @test def get_default_instance_configuration(self): set_fake_stuff(unique_id=EXAMPLE_CONFIG_SERVER_ID) self.snippet( "get_default_instance_configuration", "/instances/%s/configuration" % json_instance.id, "GET", 200, "OK", lambda client: client.instances.configuration(json_instance.id)) @test def get_list_instance_index_limit_two(self): third_instance = self.client.instances.create( "The Third Instance", 1, volume={'size': 2}) third_instance = self.client.instances.get(third_instance.id) while third_instance.status != "ACTIVE": time.sleep(0.1) third_instance = self.client.instances.get(third_instance.id) results = self.snippet( "instances_index_pagination", "/instances?limit=2", "GET", 200, "OK", lambda client: client.instances.list(limit=2)) for result in results: assert_equal(2, len(result)) self.client.instances.delete(third_instance.id) @test(depends_on=[CreateInstance], groups=['uses_instances']) class Backups(ActiveMixin): @before_class def setup(self): self.client = make_client(normal_user) @test def create_backup(self): set_fake_stuff(uuid=EXAMPLE_BACKUP_ID) results = self.snippet( "backup_create", "/backups", "POST", 202, "Accepted", lambda client: client.backups.create( name='snapshot', instance=json_instance.id, description="My Backup" ) ) self._wait_for_active("BACKUP") assert_equal(len(results), 1) self.json_backup = results[JSON_INDEX] @test(depends_on=[create_backup]) def create_incremental_backup(self): set_fake_stuff(uuid=EXAMPLE_BACKUP_INCREMENTAL_ID) results = self.snippet( "backup_create_incremental", "/backups", "POST", 202, "Accepted", lambda client: client.backups.create( name='Incremental Snapshot', instance=json_instance.id, parent_id=EXAMPLE_BACKUP_ID, description="My Incremental Backup" ) ) self._wait_for_active("BACKUP") assert_equal(len(results), 1) self.json_backup2 = results[JSON_INDEX] @test(depends_on=[create_incremental_backup]) def get_backup(self): results = self.snippet( "backup_get", "/backups/%s" % self.json_backup.id, "GET", 200, "OK", lambda client: client.backups.get(self.json_backup.id)) assert_equal(len(results), 1) @test(depends_on=[create_incremental_backup]) def get_backups_for_instance(self): results = self.snippet( "backups_by_instance", "/instances/%s/backups" % json_instance.id, "GET", 200, "OK", lambda client: client.instances.backups(json_instance.id)) assert_equal(len(results), 1) @test(depends_on=[create_incremental_backup]) def list_backups(self): results = self.snippet( "backup_list", "/backups", "GET", 200, "OK", lambda client: client.backups.list()) assert_equal(len(results), 1) @test(depends_on=[create_backup]) def restore(self): set_fake_stuff(uuid=EXAMPLE_INSTANCE_ID_2) def create_instance(client, name, backup): instance = client.instances.create( name, 1, volume={'size': 2}, restorePoint={'backupRef': backup}) assert_equal(instance.status, "BUILD") return instance results = self.snippet( "backup_restore", "/instances", "POST", 200, "OK", lambda client: create_instance( client, "backup_instance", self.json_backup.id)) assert_equal(len(results), 1) self.json_restore = results[JSON_INDEX] self._wait_for_restore_active("BUILD") self.json_restore = self.client.instances.get(self.json_restore.id) assert_equal(self.json_restore.status, "ACTIVE") @test(depends_on=[restore]) def delete_restores(self): self.snippet( "restore_delete", "/instances/%s" % self.json_restore.id, "DELETE", 202, "Accepted", lambda client: client.instances.delete(self.json_restore.id)) self.json_restore = self.client.instances.get(self.json_restore.id) assert_equal(self.json_restore.status, "SHUTDOWN") @test(depends_on=[create_backup], runs_after=[get_backup, list_backups, restore, get_backups_for_instance]) def delete_backup(self): results = self.snippet( "backup_delete", "/backups/%s" % self.json_backup.id, "DELETE", 202, "Accepted", lambda client: client.backups.delete(self.json_backup.id)) assert_equal(len(results), 1) @test(depends_on=[CreateInstance], groups=['uses_instances']) class Actions(ActiveMixin): @before_class def setup(self): self.client = make_client(normal_user) @test def instance_restart(self): self.snippet( "instance_restart", "/instances/%s/action" % json_instance.id, "POST", 202, "Accepted", lambda client: client.instances.restart(json_instance.id)) self._wait_for_active("REBOOT") @test def instance_resize_volume(self): self.snippet( "instance_resize_volume", "/instances/%s/action" % json_instance.id, "POST", 202, "Accepted", lambda client: client.instances.resize_volume(json_instance.id, 4)) self._wait_for_active("RESIZE") assert_equal(json_instance.volume['size'], 4) @test def instance_resize_flavor(self): self.snippet( "instance_resize_flavor", ("/instances/%s/action" % json_instance.id), "POST", 202, "Accepted", lambda client: client.instances.resize_instance( json_instance.id, 3)) self._wait_for_active("RESIZE") # TODO(imsplitbit): remove coercion when troveclient fixes are in assert_equal(int(json_instance.flavor['id']), 3) @test(depends_on=[CreateInstance], groups=['uses_instances', "MgmtHosts"]) class MgmtHosts(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_list_hosts(self): results = self.snippet( "mgmt_list_hosts", "/mgmt/hosts", "GET", 200, "OK", lambda client: client.mgmt.hosts.index()) with Check() as check: for hosts in results: check.equal(2, len(hosts)) check.true("fake_host_1" == hosts[0].name or "fake_host_1" == hosts[1].name) check.true("fake_host_2" == hosts[0].name or "fake_host_2" == hosts[1].name) check.true(1 == results[0][1].instanceCount or 1 == results[0][0].instanceCount) @test def mgmt_get_host_detail(self): results = self.snippet( "mgmt_get_host_detail", "/mgmt/hosts/fake_host_1", "GET", 200, "OK", lambda client: client.mgmt.hosts.get("fake_host_1")) with Check() as check: for host in results: check.equal(results[0].name, "fake_host_1") # XML entries won't come back as these types. :( check.true(isinstance(results[0].percentUsed, int)), check.true(isinstance(results[0].totalRAM, int)), check.true(isinstance(results[0].usedRAM, int)), with Check() as check: for host in results: check.equal(1, len(host.instances)) for instance in host.instances: check.equal(instance['status'], 'ACTIVE') check.true(isinstance(instance['name'], basestring)) check.true(isinstance(instance['id'], basestring)) check.true(isinstance(instance['server_id'], basestring)) check.true(isinstance(instance['tenant_id'], basestring)) @test def mgmt_host_update_all(self): raise SkipTest("This isn't working... :(") self.snippet( "mgmt_host_update", "/mgmt/hosts/fake_host_1/instances/action", "POST", 202, "Accepted", lambda client: client.mgmt.hosts.update_all("fake_host_1")) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtStorage(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_storage(self): results = self.snippet( "mgmt_get_storage", "/mgmt/storage", "GET", 200, "OK", lambda client: client.mgmt.storage.index()) for index, devices in enumerate(results): with Check() as check: check.equal(1, len(devices)) device = devices[0] check.equal(int(device.capacity['available']), 90) check.equal(int(device.capacity['total']), 100) check.equal(device.name, "fake_storage") check.equal(int(device.provision['available']), 40) check.equal(int(device.provision['percent']), 10) check.equal(int(device.provision['total']), 50) check.equal(device.type, "test_type") check.equal(int(device.used), 10) if index == JSON_INDEX: check.true(isinstance(device.capacity['available'], int)) check.true(isinstance(device.capacity['total'], int)) check.true(isinstance(device.provision['available'], int)) check.true(isinstance(device.provision['percent'], int)) check.true(isinstance(device.provision['total'], int)) check.true(isinstance(device.used, int)) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtAccount(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_account_details(self): results = self.snippet( "mgmt_get_account_details", "/mgmt/accounts/%s" % conf['normal_user_tenant'], "GET", 200, "OK", lambda client: client.mgmt.accounts.show( conf['normal_user_tenant'], )) with Check() as check: for account_info in results: check.equal(conf['normal_user_tenant'], account_info.id) @test def mgmt_get_account_list(self): results = self.snippet( "mgmt_list_accounts", "/mgmt/accounts", "GET", 200, "OK", lambda client: client.mgmt.accounts.index()) matches = {conf['normal_user_tenant']: 2, conf['admin_user_tenant']: 0} for index, result in enumerate(results): for account in result.accounts: if account['id'] not in matches: fail("Did not expect this account ID: %s" % account['id']) expected_count = matches[account['id']] if index == JSON_INDEX: assert_equal(2, expected_count) else: assert_equal(2, expected_count) def for_both(func): @functools.wraps(func) def both(self): for result in self.results: func(self, result) return both @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtInstance(Example): @before_class def mgmt_get_instance_details(self): self.client = make_client(admin_user) self.results = self.snippet( "mgmt_get_instance_details", ("/mgmt/instances/%s" % json_instance.id), "GET", 200, "OK", lambda client: client.mgmt.instances.show(json_instance.id)) @test @for_both def created(self, result): assert_true(isinstance(result.created, basestring)) @test def deleted(self): assert_equal(self.results[JSON_INDEX].deleted, False) @test @for_both def flavor(self, result): # TODO(imsplitbit): remove the coercion when python-troveclient fixes # land in the public. assert_true( int(result.flavor['id']) == 1 or int(result.flavor['id']) == 3) assert_equal(len(result.flavor['links']), 2) @test @for_both def guest_status(self, result): assert_equal(result.guest_status['state_description'], 'running') @test(enabled=False) @for_both def host(self, result): assert_equal(result.host, 'fake_host_1') @test def id(self): assert_equal(self.results[JSON_INDEX].id, json_instance.id) @test @for_both def links(self, result): assert_true(isinstance(result.links, list)) for link in result.links: assert_true(isinstance(link, dict)) assert_true(isinstance(link['href'], basestring)) assert_true(isinstance(link['rel'], basestring)) @test def local_id(self): assert_true(isinstance(self.results[JSON_INDEX].server['local_id'], int)) @test @for_both def name(self, result): assert_true(isinstance(result.name, basestring)) @test @for_both def server_id(self, result): assert_true(isinstance(result.server['id'], basestring)) @test @for_both def status(self, result): assert_equal("ACTIVE", result.status) @test @for_both def task_description(self, result): assert_equal(result.task_description, "No tasks for the instance.") @test @for_both def tenant_id(self, result): assert_equal(result.tenant_id, conf['normal_user_tenant']) @test @for_both def updated(self, result): assert_true(isinstance(result.updated, basestring)) @test @for_both def volume(self, result): assert_true(isinstance(result.volume, dict)) assert_true('id' in result.volume) assert_true('size' in result.volume) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtInstanceIndex(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_instance_index(self, deleted=False): self.snippet( "mgmt_instance_index", "/mgmt/instances?deleted=false", "GET", 200, "OK", lambda client: client.mgmt.instances.index(deleted=False)) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtInstanceDiagnostics(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_instance_diagnostics(self): self.snippet( "mgmt_instance_diagnostics", ("/mgmt/instances/%s/diagnostics" % json_instance.id), "GET", 200, "OK", lambda client: client.diagnostics.get(json_instance.id)) @test(depends_on=[CreateInstance]) class MgmtInstanceRoot(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_root_details(self): self.snippet( "mgmt_get_root_details", ("/mgmt/instances/%s/root" % json_instance.id), "GET", 200, "OK", lambda client: client.mgmt.instances.root_enabled_history( json_instance.id) ) @test(depends_on=[CreateInstance], enabled=False) class MgmtInstanceHWInfo(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_hw_info(self): self.snippet( "mgmt_get_hw_info", ("/mgmt/instances/%s/hwinfo" % json_instance.id), "GET", 200, "OK", lambda client, id: client.hw_info.get(id), ([json_instance.id], )) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtInstanceReboot(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_instance_reboot(self): self.snippet( "instance_reboot", ("/mgmt/instances/%s/action" % json_instance.id), "POST", 202, "Accepted", lambda client: client.mgmt.instances.reboot(json_instance.id)) @test(depends_on=[CreateInstance], groups=['uses_instances'], enabled=False) class MgmtInstanceGuestUpdate(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_instance_guest_update(self): self.snippet( "guest_update", ("/mgmt/instances/%s/action" % json_instance.id), "POST", 202, "Accepted", lambda client: client.mgmt.instances.update(json_instance.id)) @test(depends_on=[CreateInstance], runs_after_groups=['uses_instances']) class ZzzDeleteInstance(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def zzz_delete_instance(self): global json_instance self.snippet( "delete_instance", "/instances/%s" % json_instance.id, "DELETE", 202, "Accepted", lambda client: client.instances.delete(json_instance.id)) json_instance = self.client.instances.get(json_instance.id) assert_equal(json_instance.status, "SHUTDOWN") @test(depends_on=[zzz_delete_instance]) def delete_configuration(self): config = STATE["CONFIGURATION"] self.configs = self.snippet( "configuration_delete", ("/configurations/%s" % config.id), "DELETE", 202, "Accepted", lambda client: client.configurations.delete(config.id)) if __name__ == "__main__": CONFIG.load_from_file("etc/tests/localhost.test.conf") TestProgram().run_and_exit() trove-5.0.0/trove/tests/db/0000775000567000056710000000000012701410521016673 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/db/__init__.py0000664000567000056710000000000012701410316020774 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/db/migrations.py0000664000567000056710000001726312701410316021434 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests database migration scripts for mysql. To run the tests, you'll need to set up db user named 'openstack_citest' with password 'openstack_citest' on localhost. This user needs db admin rights (i.e. create/drop database) """ import glob import os import migrate.versioning.api as migration_api from migrate.versioning import repository from oslo_concurrency import processutils from oslo_log import log as logging from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_true from proboscis import before_class from proboscis import SkipTest from proboscis import test import sqlalchemy import sqlalchemy.exc from trove.common.i18n import _ import trove.db.sqlalchemy.migrate_repo from trove.tests.util import event_simulator GROUP = "dbaas.db.migrations" LOG = logging.getLogger(__name__) @test(groups=[GROUP]) class ProjectTestCase(object): """Test migration scripts integrity.""" @test def test_all_migrations_have_downgrade(self): topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)) py_glob = os.path.join(topdir, "trove", "db", "sqlalchemy", "migrate_repo", "versions", "*.py") missing_downgrade = [] for path in glob.iglob(py_glob): has_upgrade = False has_downgrade = False with open(path, "r") as f: for line in f: if 'def upgrade(' in line: has_upgrade = True if 'def downgrade(' in line: has_downgrade = True if has_upgrade and not has_downgrade: fname = os.path.basename(path) missing_downgrade.append(fname) helpful_msg = (_("The following migration scripts are missing a " "downgrade implementation:\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) assert_true(not missing_downgrade, helpful_msg) @test(depends_on_classes=[ProjectTestCase], groups=[GROUP]) class TestTroveMigrations(object): """Test sqlalchemy-migrate migrations.""" USER = "openstack_citest" PASSWD = "openstack_citest" DATABASE = "openstack_citest" @before_class def setUp(self): event_simulator.allowable_empty_sleeps = 1 @after_class def tearDown(self): event_simulator.allowable_empty_sleeps = 0 def __init__(self): self.MIGRATE_FILE = trove.db.sqlalchemy.migrate_repo.__file__ self.REPOSITORY = repository.Repository( os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) self.INIT_VERSION = 0 def _get_connect_string(self, backend, database=None): """Get database connection string.""" args = {'backend': backend, 'user': self.USER, 'passwd': self.PASSWD} template = "%(backend)s://%(user)s:%(passwd)s@localhost" if database is not None: args['database'] = database template += "/%(database)s" return template % args def _is_backend_avail(self, backend): """Check database backend availability.""" connect_uri = self._get_connect_string(backend) engine = sqlalchemy.create_engine(connect_uri) try: connection = engine.connect() except Exception: # any error here means the database backend is not available return False else: connection.close() return True finally: if engine is not None: engine.dispose() def _execute_cmd(self, cmd=None): """Shell out and run the given command.""" out, err = processutils.trycmd(cmd, shell=True) assert_equal('', err, "Failed to run: '%(cmd)s' " "Output: '%(stdout)s' " "Error: '%(stderr)s'" % {'cmd': cmd, 'stdout': out, 'stderr': err}) def _reset_mysql(self): """Reset the MySQL test database Drop the MySQL test database if it already exists and create a new one. """ sql = ("drop database if exists %(database)s; " "create database %(database)s;" % {'database': self.DATABASE}) cmd = ("mysql -u \"%(user)s\" -p%(password)s -h %(host)s " "-e \"%(sql)s\"" % {'user': self.USER, 'password': self.PASSWD, 'host': 'localhost', 'sql': sql}) self._execute_cmd(cmd) @test def test_mysql_migration(self): db_backend = "mysql+mysqldb" # Gracefully skip this test if the developer do not have # MySQL running. MySQL should always be available on # the infrastructure if not self._is_backend_avail(db_backend): raise SkipTest("MySQL is not available.") self._reset_mysql() connect_string = self._get_connect_string(db_backend, self.DATABASE) engine = sqlalchemy.create_engine(connect_string) self._walk_versions(engine) engine.dispose() def _walk_versions(self, engine=None): """Walk through and test the migration scripts Determine latest version script from the repo, then upgrade from 1 through to the latest, then downgrade from the latest back to 1, with no data in the databases. This just checks that the schema itself upgrades and downgrades successfully. """ # Place the database under version control migration_api.version_control(engine, self.REPOSITORY, self.INIT_VERSION) assert_equal(self.INIT_VERSION, migration_api.db_version(engine, self.REPOSITORY)) LOG.debug('Latest version is %s' % self.REPOSITORY.latest) versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) # Snake walk from version 1 to the latest, testing the upgrade paths. # upgrade -> downgrade -> upgrade for version in versions: self._migrate_up(engine, version) self._migrate_down(engine, version - 1) self._migrate_up(engine, version) # Now snake walk back down to version 1 from the latest, testing the # downgrade paths. # downgrade -> upgrade -> downgrade for version in reversed(versions): self._migrate_down(engine, version - 1) self._migrate_up(engine, version) self._migrate_down(engine, version - 1) def _migrate_down(self, engine, version): """Migrate down to an old version of database.""" migration_api.downgrade(engine, self.REPOSITORY, version) assert_equal(version, migration_api.db_version(engine, self.REPOSITORY)) def _migrate_up(self, engine, version): """Migrate up to a new version of database.""" migration_api.upgrade(engine, self.REPOSITORY, version) assert_equal(version, migration_api.db_version(engine, self.REPOSITORY)) trove-5.0.0/trove/tests/api/0000775000567000056710000000000012701410521017057 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/api/limits.py0000664000567000056710000001261212701410316020736 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from datetime import datetime from nose.tools import assert_equal from nose.tools import assert_true from oslo_utils import timeutils from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove.common import cfg from trove.tests.fakes import limits as fake_limits from trove.tests.util import create_dbaas_client from trove.tests.util.users import Users CONF = cfg.CONF GROUP = "dbaas.api.limits" DEFAULT_RATE = CONF.http_get_rate DEFAULT_MAX_VOLUMES = CONF.max_volumes_per_tenant DEFAULT_MAX_INSTANCES = CONF.max_instances_per_tenant DEFAULT_MAX_BACKUPS = CONF.max_backups_per_tenant def ensure_limits_are_not_faked(func): def _cd(*args, **kwargs): fake_limits.ENABLED = True try: return func(*args, **kwargs) finally: fake_limits.ENABLED = False @test(groups=[GROUP]) class Limits(object): @before_class def setUp(self): users = [ { "auth_user": "rate_limit", "auth_key": "password", "tenant": "4000", "requirements": { "is_admin": False, "services": ["trove"] } }, { "auth_user": "rate_limit_exceeded", "auth_key": "password", "tenant": "4050", "requirements": { "is_admin": False, "services": ["trove"] } }] self._users = Users(users) rate_user = self._get_user('rate_limit') self.rd_client = create_dbaas_client(rate_user) def _get_user(self, name): return self._users.find_user_by_name(name) def __is_available(self, next_available): dt_next = timeutils.parse_isotime(next_available) dt_now = datetime.now() return dt_next.time() < dt_now.time() def _get_limits_as_dict(self, limits): d = {} for l in limits: d[l.verb] = l return d @test @ensure_limits_are_not_faked def test_limits_index(self): """Test_limits_index.""" limits = self.rd_client.limits.list() d = self._get_limits_as_dict(limits) # remove the abs_limits from the rate limits abs_limits = d.pop("ABSOLUTE", None) assert_equal(abs_limits.verb, "ABSOLUTE") assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES) assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS) assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES) for k in d: assert_equal(d[k].verb, k) assert_equal(d[k].unit, "MINUTE") assert_true(int(d[k].remaining) <= DEFAULT_RATE) assert_true(d[k].nextAvailable is not None) @test @ensure_limits_are_not_faked def test_limits_get_remaining(self): """Test_limits_get_remaining.""" limits = () for i in range(5): limits = self.rd_client.limits.list() d = self._get_limits_as_dict(limits) abs_limits = d["ABSOLUTE"] get = d["GET"] assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES) assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS) assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES) assert_equal(get.verb, "GET") assert_equal(get.unit, "MINUTE") assert_true(int(get.remaining) <= DEFAULT_RATE - 5) assert_true(get.nextAvailable is not None) @test @ensure_limits_are_not_faked def test_limits_exception(self): """Test_limits_exception.""" # use a different user to avoid throttling tests run out of order rate_user_exceeded = self._get_user('rate_limit_exceeded') rd_client = create_dbaas_client(rate_user_exceeded) get = None encountered = False for i in range(DEFAULT_RATE + 50): try: limits = rd_client.limits.list() d = self._get_limits_as_dict(limits) get = d["GET"] abs_limits = d["ABSOLUTE"] assert_equal(get.verb, "GET") assert_equal(get.unit, "MINUTE") assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES) assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS) assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES) except exceptions.OverLimit: encountered = True assert_true(encountered) assert_true(int(get.remaining) <= 50) trove-5.0.0/trove/tests/api/instances_mysql_down.py0000664000567000056710000001065112701410316023701 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Extra tests to create an instance, shut down MySQL, and delete it. """ import time import uuid from proboscis import asserts from proboscis import before_class from proboscis.decorators import time_out from proboscis import test from troveclient.compat import exceptions from trove.common.utils import poll_until from trove.tests.api.instances import EPHEMERAL_SUPPORT from trove.tests.api.instances import VOLUME_SUPPORT from trove.tests.util import create_client from trove.tests.util import test_config @test(groups=["dbaas.api.instances.down"]) class TestBase(object): """Base class for instance-down tests.""" @before_class def set_up(self): self.client = create_client(is_admin=False) self.mgmt_client = create_client(is_admin=True) if EPHEMERAL_SUPPORT: flavor_name = test_config.values.get('instance_eph_flavor_name', 'eph.rd-tiny') flavor2_name = test_config.values.get( 'instance_bigger_eph_flavor_name', 'eph.rd-smaller') else: flavor_name = test_config.values.get('instance_flavor_name', 'm1.tiny') flavor2_name = test_config.values.get( 'instance_bigger_flavor_name', 'm1.small') flavors = self.client.find_flavors_by_name(flavor_name) self.flavor_id = flavors[0].id self.name = "TEST_" + str(uuid.uuid4()) # Get the resize to flavor. flavors2 = self.client.find_flavors_by_name(flavor2_name) self.new_flavor_id = flavors2[0].id asserts.assert_not_equal(self.flavor_id, self.new_flavor_id) def _wait_for_active(self): poll_until(lambda: self.client.instances.get(self.id), lambda instance: instance.status == "ACTIVE", time_out=(60 * 8)) @test def create_instance(self): volume = None if VOLUME_SUPPORT: volume = {'size': 1} initial = self.client.instances.create(self.name, self.flavor_id, volume, [], []) self.id = initial.id self._wait_for_active() def _shutdown_instance(self): self.client.instances.get(self.id) self.mgmt_client.management.stop(self.id) @test(depends_on=[create_instance]) def put_into_shutdown_state(self): self._shutdown_instance() @test(depends_on=[put_into_shutdown_state]) @time_out(60 * 5) def resize_instance_in_shutdown_state(self): self.client.instances.resize_instance(self.id, self.new_flavor_id) self._wait_for_active() @test(depends_on=[create_instance], runs_after=[resize_instance_in_shutdown_state]) def put_into_shutdown_state_2(self): self._shutdown_instance() @test(depends_on=[put_into_shutdown_state_2], enabled=VOLUME_SUPPORT) @time_out(60 * 5) def resize_volume_in_shutdown_state(self): self.client.instances.resize_volume(self.id, 2) poll_until(lambda: self.client.instances.get(self.id), lambda instance: instance.volume['size'] == 2, time_out=(60 * 8)) @test(depends_on=[create_instance], runs_after=[resize_volume_in_shutdown_state]) def put_into_shutdown_state_3(self): self._shutdown_instance() @test(depends_on=[create_instance], runs_after=[put_into_shutdown_state_3]) @time_out(2 * 60) def delete_instances(self): instance = self.client.instances.get(self.id) instance.delete() while True: try: instance = self.client.instances.get(self.id) asserts.assert_equal("SHUTDOWN", instance.status) except exceptions.NotFound: break time.sleep(0.25) trove-5.0.0/trove/tests/api/user_access.py0000664000567000056710000005232412701410316021740 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from random import choice from proboscis import after_class from proboscis import asserts from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api.instances import instance_info from trove.tests.api.users import TestUsers from trove.tests import util from trove.tests.util import test_config GROUP = "dbaas.api.useraccess" GROUP_POSITIVE = GROUP + ".positive" GROUP_NEGATIVE = GROUP + ".negative" FAKE = test_config.values['fake_mode'] class UserAccessBase(object): """ Base class for Positive and Negative TestUserAccess classes """ users = [] databases = [] def set_up(self): self.dbaas = util.create_dbaas_client(instance_info.user) self.users = ["test_access_user"] self.databases = [("test_access_db%02i" % i) for i in range(4)] def _user_list_from_names(self, usernames): return [{"name": name, "password": "password", "databases": []} for name in usernames] def _grant_access_singular(self, user, databases, expected_response=202): """Grant a single user access to the databases listed. Potentially, expect an exception in the process. """ try: self.dbaas.users.grant(instance_info.id, user, databases) except exceptions.BadRequest: asserts.assert_equal(400, expected_response) except exceptions.NotFound: asserts.assert_equal(404, expected_response) except exceptions.ClientException: asserts.assert_equal(500, expected_response) finally: asserts.assert_equal(expected_response, self.dbaas.last_http_code) def _grant_access_plural(self, users, databases, expected_response=202): """Grant each user in the list access to all the databases listed. Potentially, expect an exception in the process. """ for user in users: self._grant_access_singular(user, databases, expected_response) def _revoke_access_singular(self, user, database, expected_response=202): """Revoke from a user access to the given database . Potentially, expect an exception in the process. """ try: self.dbaas.users.revoke(instance_info.id, user, database) asserts.assert_true(expected_response, self.dbaas.last_http_code) except exceptions.BadRequest: asserts.assert_equal(400, self.dbaas.last_http_code) except exceptions.NotFound: asserts.assert_equal(404, self.dbaas.last_http_code) def _revoke_access_plural(self, users, databases, expected_response=202): """Revoke from each user access to each database. Potentially, expect an exception in the process. """ for user in users: for database in databases: self._revoke_access_singular(user, database, expected_response) def _test_access(self, users, databases, expected_response=200): """Verify that each user in the list has access to each database in the list. """ for user in users: access = self.dbaas.users.list_access(instance_info.id, user) asserts.assert_equal(expected_response, self.dbaas.last_http_code) access = [db.name for db in access] asserts.assert_equal(set(access), set(databases)) def _test_ignore_access(self, users, databases, expected_response=200): databases = [d for d in databases if d not in ['lost+found', 'mysql', 'information_schema']] self._test_access(users, databases, expected_response) def _reset_access(self): for user in self.users: for database in self.databases + self.ghostdbs: try: self.dbaas.users.revoke(instance_info.id, user, database) asserts.assert_true(self.dbaas.last_http_code in [202, 404] ) except exceptions.NotFound: # This is all right here, since we're resetting. pass self._test_access(self.users, []) @test(depends_on_classes=[TestUsers], groups=[tests.DBAAS_API, GROUP, tests.INSTANCES], runs_after=[TestUsers]) class TestUserAccessPasswordChange(UserAccessBase): """ Test that change_password works. """ @before_class def setUp(self): super(TestUserAccessPasswordChange, self).set_up() def _check_mysql_connection(self, username, password, success=True): # This can only test connections for users with the host %. # Much more difficult to simulate connection attempts from other hosts. if FAKE: # "Fake mode; cannot test mysql connection." return conn = util.mysql_connection() if success: conn.create(instance_info.get_address(), username, password) else: conn.assert_fails(instance_info.get_address(), username, password) def _pick_a_user(self): users = self._user_list_from_names(self.users) return choice(users) # Pick one, it doesn't matter. @test() def test_change_password_bogus_user(self): user = self._pick_a_user() user["name"] = "thisuserhasanamethatstoolong" asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.change_passwords, instance_info.id, [user]) asserts.assert_equal(400, self.dbaas.last_http_code) @test() def test_change_password_nonexistent_user(self): user = self._pick_a_user() user["name"] = "thisuserDNE" asserts.assert_raises(exceptions.NotFound, self.dbaas.users.change_passwords, instance_info.id, [user]) asserts.assert_equal(404, self.dbaas.last_http_code) @test() def test_create_user_and_dbs(self): users = self._user_list_from_names(self.users) # Default password for everyone is 'password'. self.dbaas.users.create(instance_info.id, users) asserts.assert_equal(202, self.dbaas.last_http_code) databases = [{"name": db} for db in self.databases] self.dbaas.databases.create(instance_info.id, databases) asserts.assert_equal(202, self.dbaas.last_http_code) @test(depends_on=[test_create_user_and_dbs]) def test_initial_connection(self): user = self._pick_a_user() self._check_mysql_connection(user["name"], "password") @test(depends_on=[test_initial_connection]) def test_change_password(self): # Doesn't actually change anything, just tests that the call doesn't # have any problems. As an aside, also checks that a user can # change its password to the same thing again. user = self._pick_a_user() password = user["password"] self.dbaas.users.change_passwords(instance_info.id, [user]) self._check_mysql_connection(user["name"], password) @test(depends_on=[test_change_password]) def test_change_password_back(self): user = self._pick_a_user() old_password = user["password"] new_password = "NEWPASSWORD" user["password"] = new_password self.dbaas.users.change_passwords(instance_info.id, [user]) self._check_mysql_connection(user["name"], new_password) user["password"] = old_password self.dbaas.users.change_passwords(instance_info.id, [user]) self._check_mysql_connection(user["name"], old_password) @test(depends_on=[test_change_password_back]) def test_change_password_twice(self): # Changing the password twice isn't a problem. user = self._pick_a_user() password = "NEWPASSWORD" user["password"] = password self.dbaas.users.change_passwords(instance_info.id, [user]) self.dbaas.users.change_passwords(instance_info.id, [user]) self._check_mysql_connection(user["name"], password) @after_class(always_run=True) def tearDown(self): for database in self.databases: self.dbaas.databases.delete(instance_info.id, database) asserts.assert_equal(202, self.dbaas.last_http_code) for username in self.users: self.dbaas.users.delete(instance_info.id, username) @test(depends_on_classes=[TestUsers], groups=[tests.DBAAS_API, GROUP, GROUP_POSITIVE, tests.INSTANCES], runs_after=[TestUsers]) class TestUserAccessPositive(UserAccessBase): """ Test the creation and deletion of user grants. """ @before_class def setUp(self): super(TestUserAccessPositive, self).set_up() # None of the ghosts are real databases or users. self.ghostdbs = ["test_user_access_ghost_db"] self.ghostusers = ["test_ghostuser"] self.revokedbs = self.databases[:1] self.remainingdbs = self.databases[1:] def _ensure_nothing_else_created(self): # Make sure grants and revokes do not create users or databases. databases = self.dbaas.databases.list(instance_info.id) database_names = [db.name for db in databases] for ghost in self.ghostdbs: asserts.assert_true(ghost not in database_names) users = self.dbaas.users.list(instance_info.id) user_names = [user.name for user in users] for ghost in self.ghostusers: asserts.assert_true(ghost not in user_names) @test() def test_create_user_and_dbs(self): users = self._user_list_from_names(self.users) self.dbaas.users.create(instance_info.id, users) asserts.assert_equal(202, self.dbaas.last_http_code) databases = [{"name": db} for db in self.databases] self.dbaas.databases.create(instance_info.id, databases) asserts.assert_equal(202, self.dbaas.last_http_code) @test(depends_on=[test_create_user_and_dbs]) def test_no_access(self): # No users have any access to any database. self._reset_access() self._test_access(self.users, []) @test(depends_on=[test_no_access]) def test_grant_full_access(self): # The users are granted access to all test databases. self._reset_access() self._grant_access_plural(self.users, self.databases) self._test_access(self.users, self.databases) @test(depends_on=[test_no_access]) def test_grant_full_access_ignore_databases(self): # The users are granted access to all test databases. all_dbs = [] all_dbs.extend(self.databases) all_dbs.extend(['lost+found', 'mysql', 'information_schema']) self._reset_access() self._grant_access_plural(self.users, self.databases) self._test_ignore_access(self.users, self.databases) @test(depends_on=[test_grant_full_access]) def test_grant_idempotence(self): # Grant operations can be repeated with no ill effects. self._reset_access() for repeat in range(3): self._grant_access_plural(self.users, self.databases) self._test_access(self.users, self.databases) @test(depends_on=[test_grant_full_access]) def test_revoke_one_database(self): # Revoking permission removes that database from a user's list. self._reset_access() self._grant_access_plural(self.users, self.databases) self._test_access(self.users, self.databases) self._revoke_access_plural(self.users, self.revokedbs) self._test_access(self.users, self.remainingdbs) @test(depends_on=[test_grant_full_access]) def test_revoke_non_idempotence(self): # Revoking access cannot be repeated. self._reset_access() self._grant_access_plural(self.users, self.databases) self._revoke_access_plural(self.users, self.revokedbs) self._revoke_access_plural(self.users, self.revokedbs, 404) self._test_access(self.users, self.remainingdbs) @test(depends_on=[test_grant_full_access]) def test_revoke_all_access(self): # Revoking access to all databases will leave their access empty. self._reset_access() self._grant_access_plural(self.users, self.databases) self._revoke_access_plural(self.users, self.revokedbs) self._test_access(self.users, self.remainingdbs) @test(depends_on=[test_grant_full_access]) def test_grant_ghostdbs(self): # Grants to imaginary databases are acceptable, and are honored. self._reset_access() self._ensure_nothing_else_created() self._grant_access_plural(self.users, self.ghostdbs) self._ensure_nothing_else_created() @test(depends_on=[test_grant_full_access]) def test_revoke_ghostdbs(self): # Revokes to imaginary databases are acceptable, and are honored. self._reset_access() self._ensure_nothing_else_created() self._grant_access_plural(self.users, self.ghostdbs) self._revoke_access_plural(self.users, self.ghostdbs) self._ensure_nothing_else_created() @test(depends_on=[test_grant_full_access]) def test_grant_ghostusers(self): # You cannot grant permissions to imaginary users, as imaginary users # don't have passwords we can pull from mysql.users self._reset_access() self._grant_access_plural(self.ghostusers, self.databases, 404) @test(depends_on=[test_grant_full_access]) def test_revoke_ghostusers(self): # You cannot revoke permissions from imaginary users, as imaginary # users don't have passwords we can pull from mysql.users self._reset_access() self._revoke_access_plural(self.ghostusers, self.databases, 404) @after_class(always_run=True) def tearDown(self): self._reset_access() for database in self.databases: self.dbaas.databases.delete(instance_info.id, database) asserts.assert_equal(202, self.dbaas.last_http_code) for username in self.users: self.dbaas.users.delete(instance_info.id, username) @test(depends_on_classes=[TestUserAccessPositive], groups=[tests.DBAAS_API, GROUP, GROUP_NEGATIVE, tests.INSTANCES], depends_on=[TestUserAccessPositive]) class TestUserAccessNegative(UserAccessBase): """ Negative tests for the creation and deletion of user grants. """ @before_class def setUp(self): super(TestUserAccessNegative, self).set_up() self.users = ["qe_user?neg3F", "qe_user#neg23"] self.databases = [("qe_user_neg_db%02i" % i) for i in range(2)] self.ghostdbs = [] def _add_users(self, users, expected_response=202): user_list = self._user_list_from_names(users) try: self.dbaas.users.create(instance_info.id, user_list) asserts.assert_equal(self.dbaas.last_http_code, 202) except exceptions.BadRequest: asserts.assert_equal(self.dbaas.last_http_code, 400) asserts.assert_equal(expected_response, self.dbaas.last_http_code) @test() def test_create_duplicate_user_and_dbs(self): """ Create the same user to the first DB - allowed, not part of change """ users = self._user_list_from_names(self.users) self.dbaas.users.create(instance_info.id, users) asserts.assert_equal(202, self.dbaas.last_http_code) databases = [{"name": db} for db in self.databases] self.dbaas.databases.create(instance_info.id, databases) asserts.assert_equal(202, self.dbaas.last_http_code) @test(depends_on=[test_create_duplicate_user_and_dbs]) def test_neg_duplicate_useraccess(self): """ Grant duplicate users access to all database. """ username = "qe_user.neg2E" self._add_users([username]) self._add_users([username], 400) for repeat in range(3): self._grant_access_plural(self.users, self.databases) self._test_access(self.users, self.databases) @test() def test_re_create_user(self): user_list = ["re_create_user"] # create, grant, then check a new user self._add_users(user_list) self._test_access(user_list, []) self._grant_access_singular(user_list[0], self.databases) self._test_access(user_list, self.databases) # drop the user temporarily self.dbaas.users.delete(instance_info.id, user_list[0]) # check his access - user should not be found asserts.assert_raises(exceptions.NotFound, self.dbaas.users.list_access, instance_info.id, user_list[0]) # re-create the user self._add_users(user_list) # check his access - should not exist self._test_access(user_list, []) # grant user access to all database. self._grant_access_singular(user_list[0], self.databases) # check his access - user should exist self._test_access(user_list, self.databases) # revoke users access self._revoke_access_plural(user_list, self.databases) def _negative_user_test(self, username, databases, create_response=202, grant_response=202, access_response=200, revoke_response=202): # Try and fail to create the user. self._add_users([username], create_response) self._grant_access_singular(username, databases, grant_response) access = None try: access = self.dbaas.users.list_access(instance_info.id, username) asserts.assert_equal(200, self.dbaas.last_http_code) except exceptions.BadRequest: asserts.assert_equal(400, self.dbaas.last_http_code) except exceptions.NotFound: asserts.assert_equal(404, self.dbaas.last_http_code) finally: asserts.assert_equal(access_response, self.dbaas.last_http_code) if access is not None: access = [db.name for db in access] asserts.assert_equal(set(access), set(self.databases)) self._revoke_access_plural([username], databases, revoke_response) @test def test_user_withperiod(self): # This is actually fine; we escape dots in the user-host pairing. self._negative_user_test("test.user", self.databases) @test def test_user_empty_no_host(self): # This creates a request to ...//users//databases, # which is parsed to mean "show me user 'databases', which in this # case is a valid username, but not one of an extant user. self._negative_user_test("", self.databases, 400, 500, 404, 404) @test def test_user_empty_with_host(self): # self._negative_user_test("", self.databases, 400, 400, 400, 400) # Try and fail to create the user. empty_user = {"name": "", "host": "%", "password": "password", "databases": []} asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, [empty_user]) asserts.assert_equal(400, self.dbaas.last_http_code) asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.grant, instance_info.id, "", [], "%") asserts.assert_equal(400, self.dbaas.last_http_code) asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.list_access, instance_info.id, "", "%") asserts.assert_equal(400, self.dbaas.last_http_code) asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.revoke, instance_info.id, "", "db", "%") asserts.assert_equal(400, self.dbaas.last_http_code) @test def test_user_nametoolong(self): # You cannot create a user with this name. # Grant revoke, and access filter this username as invalid. self._negative_user_test("exceed_limit_user", self.databases, 400, 400, 400, 400) @test def test_user_allspaces(self): self._negative_user_test(" ", self.databases, 400, 400, 400, 400) @after_class(always_run=True) def tearDown(self): self._reset_access() for database in self.databases: self.dbaas.databases.delete(instance_info.id, database) asserts.assert_equal(202, self.dbaas.last_http_code) for username in self.users: self.dbaas.users.delete(instance_info.id, username) trove-5.0.0/trove/tests/api/versions.py0000664000567000056710000000634012701410316021306 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis.asserts import assert_equal from proboscis import before_class from proboscis import SkipTest from proboscis import test from troveclient.compat.exceptions import ClientException from trove import tests from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.versions" @test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES, 'DBAAS_VERSIONS'], depends_on_groups=["services.initialize"]) class Versions(object): """Test listing all versions and verify the current version.""" @before_class def setUp(self): """Sets up the client.""" user = test_config.users.find_user(Requirements(is_admin=False)) self.client = create_dbaas_client(user) @test def test_list_versions_index(self): versions = self.client.versions.index(test_config.version_url) assert_equal(1, len(versions)) assert_equal("CURRENT", versions[0].status, message="Version status: %s" % versions[0].status) expected_version = test_config.values['trove_version'] assert_equal(expected_version, versions[0].id, message="Version ID: %s" % versions[0].id) expected_api_updated = test_config.values['trove_api_updated'] assert_equal(expected_api_updated, versions[0].updated, message="Version updated: %s" % versions[0].updated) def _request(self, url, method='GET', response='200'): resp, body = None, None full_url = test_config.version_url + url try: resp, body = self.client.client.request(full_url, method) assert_equal(resp.get('status', ''), response) except ClientException as ce: assert_equal(str(ce.http_status), response) return body @test def test_no_slash_no_version(self): self._request('') @test def test_no_slash_with_version(self): if test_config.auth_strategy == "fake": raise SkipTest("Skipping this test since auth is faked.") self._request('/v1.0', response='401') @test def test_with_slash_no_version(self): self._request('/') @test def test_with_slash_with_version(self): if test_config.auth_strategy == "fake": raise SkipTest("Skipping this test since auth is faked.") self._request('/v1.0/', response='401') @test def test_request_no_version(self): self._request('/dbaas/instances', response='404') @test def test_request_bogus_version(self): self._request('/0.0/', response='404') trove-5.0.0/trove/tests/api/mgmt/0000775000567000056710000000000012701410521020023 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/api/mgmt/quotas.py0000664000567000056710000001551512701410316021722 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from proboscis import after_class from proboscis import asserts from proboscis.asserts import Check from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove.tests.config import CONFIG from trove.tests.util import create_client from trove.tests.util import create_dbaas_client from trove.tests.util import get_standby_instance_flavor from trove.tests.util.users import Requirements class QuotasBase(object): def setUp(self): self.user1 = CONFIG.users.find_user(Requirements(is_admin=False)) self.user2 = CONFIG.users.find_user(Requirements(is_admin=False)) asserts.assert_not_equal(self.user1.tenant, self.user2.tenant, "Not enough users to run QuotasTest." + " Needs >=2.") self.client1 = create_dbaas_client(self.user1) self.client2 = create_dbaas_client(self.user2) self.mgmt_client = create_client(is_admin=True) ''' Orig quotas from config "trove_max_instances_per_tenant": 55, "trove_max_volumes_per_tenant": 100, ''' self.original_quotas1 = self.mgmt_client.quota.show(self.user1.tenant) self.original_quotas2 = self.mgmt_client.quota.show(self.user2.tenant) def tearDown(self): self.mgmt_client.quota.update(self.user1.tenant, self.original_quotas1) self.mgmt_client.quota.update(self.user2.tenant, self.original_quotas2) @test(groups=["dbaas.api.mgmt.quotas"]) class DefaultQuotasTest(QuotasBase): @before_class def setUp(self): super(DefaultQuotasTest, self).setUp() @after_class def tearDown(self): super(DefaultQuotasTest, self).tearDown() @test def check_quotas_are_set_to_defaults(self): quotas = self.mgmt_client.quota.show(self.user1.tenant) with Check() as check: check.equal(CONFIG.trove_max_instances_per_tenant, quotas["instances"]) check.equal(CONFIG.trove_max_volumes_per_user, quotas["volumes"]) asserts.assert_equal(len(quotas), 2) @test(groups=["dbaas.api.mgmt.quotas"]) class ChangeInstancesQuota(QuotasBase): @before_class def setUp(self): super(ChangeInstancesQuota, self).setUp() self.mgmt_client.quota.update(self.user1.tenant, {"instances": 0}) asserts.assert_equal(200, self.mgmt_client.last_http_code) @after_class def tearDown(self): super(ChangeInstancesQuota, self).tearDown() @test def check_user2_is_not_affected_on_instances_quota_change(self): user2_current_quota = self.mgmt_client.quota.show(self.user2.tenant) asserts.assert_equal(self.original_quotas2, user2_current_quota, "Changing one user's quota affected another" + "user's quota." + " Original: %s. After Quota Change: %s" % (self.original_quotas2, user2_current_quota)) @test def verify_correct_update(self): quotas = self.mgmt_client.quota.show(self.user1.tenant) with Check() as check: check.equal(0, quotas["instances"]) check.equal(CONFIG.trove_max_volumes_per_tenant, quotas["volumes"]) asserts.assert_equal(len(quotas), 2) @test def create_too_many_instances(self): flavor, flavor_href = get_standby_instance_flavor(self.client1) asserts.assert_raises(exceptions.OverLimit, self.client1.instances.create, "too_many_instances", flavor_href, {'size': 1}) asserts.assert_equal(413, self.client1.last_http_code) @test(groups=["dbaas.api.mgmt.quotas"]) class ChangeVolumesQuota(QuotasBase): @before_class def setUp(self): super(ChangeVolumesQuota, self).setUp() self.mgmt_client.quota.update(self.user1.tenant, {"volumes": 0}) asserts.assert_equal(200, self.mgmt_client.last_http_code) @after_class def tearDown(self): super(ChangeVolumesQuota, self).tearDown() @test def check_volumes_overlimit(self): flavor, flavor_href = get_standby_instance_flavor(self.client1) asserts.assert_raises(exceptions.OverLimit, self.client1.instances.create, "too_large_volume", flavor_href, {'size': CONFIG.trove_max_accepted_volume_size + 1}) asserts.assert_equal(413, self.client1.last_http_code) @test def check_user2_is_not_affected_on_volumes_quota_change(self): user2_current_quota = self.mgmt_client.quota.show(self.user2.tenant) asserts.assert_equal(self.original_quotas2, user2_current_quota, "Changing one user's quota affected another" + "user's quota." + " Original: %s. After Quota Change: %s" % (self.original_quotas2, user2_current_quota)) @test def verify_correct_update(self): quotas = self.mgmt_client.quota.show(self.user1.tenant) with Check() as check: check.equal(CONFIG.trove_max_instances_per_tenant, quotas["instances"]) check.equal(0, quotas["volumes"]) asserts.assert_equal(len(quotas), 2) @test def create_too_large_volume(self): flavor, flavor_href = get_standby_instance_flavor(self.client1) asserts.assert_raises(exceptions.OverLimit, self.client1.instances.create, "too_large_volume", flavor_href, {'size': CONFIG.trove_max_accepted_volume_size + 1}) asserts.assert_equal(413, self.client1.last_http_code) # create an instance when I set the limit back to # multiple updates to the quota and it should do what you expect trove-5.0.0/trove/tests/api/mgmt/instances.py0000664000567000056710000002572512701410316022401 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis.asserts import assert_equal from proboscis.asserts import assert_raises from proboscis import before_class from proboscis.check import Check from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove.common.utils import poll_until from trove.tests.api.instances import CreateInstance from trove.tests.api.instances import GROUP_START from trove.tests.api.instances import GROUP_TEST from trove.tests.api.instances import instance_info from trove.tests.config import CONFIG from trove.tests.util.check import CollectionCheck from trove.tests.util.check import TypeCheck from trove.tests.util import create_client from trove.tests.util import create_dbaas_client from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.instances" @test(groups=[GROUP]) def mgmt_index_requires_admin_account(): """Verify that an admin context is required to call this function.""" client = create_client(is_admin=False) assert_raises(exceptions.Unauthorized, client.management.index) # These functions check some dictionaries in the returned response. def flavor_check(flavor): with CollectionCheck("flavor", flavor) as check: check.has_element("id", basestring) check.has_element("links", list) def datastore_check(datastore): with CollectionCheck("datastore", datastore) as check: check.has_element("type", basestring) check.has_element("version", basestring) def guest_status_check(guest_status): with CollectionCheck("guest_status", guest_status) as check: check.has_element("state_description", basestring) def volume_check(volume): with CollectionCheck("volume", volume) as check: check.has_element("id", basestring) check.has_element("size", int) check.has_element("used", float) check.has_element("total", float) @test(depends_on_groups=[GROUP_START], groups=[GROUP, GROUP_TEST]) def mgmt_instance_get(): """Tests the mgmt instances index method.""" reqs = Requirements(is_admin=True) user = CONFIG.users.find_user(reqs) client = create_dbaas_client(user) mgmt = client.management # Grab the info.id created by the main instance test which is stored in # a global. id = instance_info.id api_instance = mgmt.show(id) datastore = getattr(api_instance, 'datastore') datastore_type = datastore.get('type') # Print out all fields for extra info if the test fails. for name in dir(api_instance): print(str(name) + "=" + str(getattr(api_instance, name))) with TypeCheck("instance", api_instance) as instance: instance.has_field('created', basestring) instance.has_field('deleted', bool) # If the instance hasn't been deleted, this should be false... but # lets avoid creating more ordering work. instance.has_field('deleted_at', (basestring, None)) instance.has_field('flavor', dict, flavor_check) instance.has_field('datastore', dict, datastore_check) instance.has_field('guest_status', dict, guest_status_check) instance.has_field('id', basestring) instance.has_field('links', list) instance.has_field('name', basestring) # instance.has_field('server_status', basestring) instance.has_field('status', basestring) instance.has_field('tenant_id', basestring) instance.has_field('updated', basestring) # Can be None if no volume is given on this instance. volume_support = CONFIG.get(datastore_type, 'mysql')['volume_support'] if volume_support: instance.has_field('volume', dict, volume_check) else: instance.has_field('volume', None) # TODO(tim-simpson): Validate additional fields, assert # no extra fields exist. if api_instance.server is not None: print("the real content of server: %s" % dir(api_instance.server)) print("the type of server: %s" % type(api_instance.server)) print("the real content of api_instance: %s" % dir(api_instance)) print("the type of api_instance: %s" % type(api_instance)) print(hasattr(api_instance, "server")) with CollectionCheck("server", api_instance.server) as server: server.has_element("addresses", dict) server.has_element("deleted", bool) server.has_element("deleted_at", (basestring, None)) server.has_element("host", basestring) server.has_element("id", basestring) server.has_element("local_id", int) server.has_element("name", basestring) server.has_element("status", basestring) server.has_element("tenant_id", basestring) if (volume_support and CONFIG.trove_main_instance_has_volume): with CollectionCheck("volume", api_instance.volume) as volume: volume.has_element("attachments", list) volume.has_element("availability_zone", basestring) volume.has_element("created_at", (basestring, None)) volume.has_element("id", basestring) volume.has_element("size", int) volume.has_element("status", basestring) @test(groups=["fake." + GROUP]) class WhenMgmtInstanceGetIsCalledButServerIsNotReady(object): @before_class def set_up(self): """Create client for mgmt instance test (2).""" if not CONFIG.fake_mode: raise SkipTest("This test only works in fake mode.") self.client = create_client(is_admin=True) self.mgmt = self.client.management # Fake nova will fail a server ending with 'test_SERVER_ERROR'." # Fake volume will fail if the size is 13. # TODO(tim.simpson): This would be a lot nicer looking if we used a # traditional mock framework. datastore = {'type': 'mysql', 'version': '5.5'} body = {'datastore': datastore} vol_support = CONFIG.get(datastore['type'], 'mysql')['volume_support'] if vol_support: body.update({'size': 13}) response = self.client.instances.create( 'test_SERVER_ERROR', instance_info.dbaas_flavor_href, body, []) poll_until(lambda: self.client.instances.get(response.id), lambda instance: instance.status == 'ERROR', time_out=10) self.id = response.id @test def mgmt_instance_get(self): """Tests the mgmt get call works when the Nova server isn't ready.""" api_instance = self.mgmt.show(self.id) # Print out all fields for extra info if the test fails. for name in dir(api_instance): print(str(name) + "=" + str(getattr(api_instance, name))) # Print out all fields for extra info if the test fails. for name in dir(api_instance): print(str(name) + "=" + str(getattr(api_instance, name))) with TypeCheck("instance", api_instance) as instance: instance.has_field('created', basestring) instance.has_field('deleted', bool) # If the instance hasn't been deleted, this should be false... but # lets avoid creating more ordering work. instance.has_field('deleted_at', (basestring, None)) instance.has_field('flavor', dict, flavor_check) instance.has_field('datastore', dict, datastore_check) instance.has_field('guest_status', dict, guest_status_check) instance.has_field('id', basestring) instance.has_field('links', list) instance.has_field('name', basestring) # instance.has_field('server_status', basestring) instance.has_field('status', basestring) instance.has_field('tenant_id', basestring) instance.has_field('updated', basestring) # Can be None if no volume is given on this instance. instance.has_field('server', None) instance.has_field('volume', None) # TODO(tim-simpson): Validate additional fields, # assert no extra fields exist. @test(depends_on_classes=[CreateInstance], groups=[GROUP]) class MgmtInstancesIndex(object): """Tests the mgmt instances index method.""" @before_class def setUp(self): """Create client for mgmt instance test.""" reqs = Requirements(is_admin=True) self.user = CONFIG.users.find_user(reqs) self.client = create_dbaas_client(self.user) @test def test_mgmt_instance_index_fields_present(self): """ Verify that all the expected fields are returned by the index method. """ expected_fields = [ 'created', 'deleted', 'deleted_at', 'flavor', 'datastore', 'id', 'links', 'name', 'server', 'status', 'task_description', 'tenant_id', 'updated', ] if CONFIG.trove_volume_support: expected_fields.append('volume') index = self.client.management.index() if not hasattr(index, "deleted"): raise SkipTest("instance index must have a " "deleted label for this test") for instance in index: with Check() as check: for field in expected_fields: check.true(hasattr(instance, field), "Index lacks field %s" % field) @test def test_mgmt_instance_index_check_filter(self): """ Make sure that the deleted= filter works as expected, and no instances are excluded. """ if not hasattr(self.client.management.index, 'deleted'): raise SkipTest("instance index must have a deleted " "label for this test") instance_counts = [] for deleted_filter in (True, False): filtered_index = self.client.management.index( deleted=deleted_filter) instance_counts.append(len(filtered_index)) for instance in filtered_index: # Every instance listed here should have the proper value # for 'deleted'. assert_equal(deleted_filter, instance.deleted) full_index = self.client.management.index() # There should be no instances that are neither deleted or not-deleted. assert_equal(len(full_index), sum(instance_counts)) trove-5.0.0/trove/tests/api/mgmt/__init__.py0000664000567000056710000000000012701410316022124 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/api/mgmt/datastore_versions.py0000664000567000056710000001511512701410316024320 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis.check import Check from proboscis import test from troveclient.compat import exceptions from trove.tests.config import CONFIG from trove.tests.util import create_client from trove.tests.util import create_dbaas_client from trove.tests.util import create_nova_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.ds_versions" @test(groups=[GROUP]) def mgmt_datastore_version_list_requires_admin_account(): """Verify that an admin context is required to call this function.""" client = create_client(is_admin=False) assert_raises(exceptions.Unauthorized, client.mgmt_datastore_versions.list) @test(groups=[GROUP]) class MgmtDataStoreVersion(object): """Tests the mgmt datastore version methods.""" @before_class def setUp(self): """Create client for tests.""" reqs = Requirements(is_admin=True) self.user = CONFIG.users.find_user(reqs) self.client = create_dbaas_client(self.user) if test_config.nova_client is not None: nova_user = test_config.users.find_user( Requirements(services=["nova"])) self.nova_client = create_nova_client(nova_user) self.images = self.nova_client.images.list() def _find_ds_version_by_name(self, ds_version_name): ds_versions = self.client.mgmt_datastore_versions.list() for ds_version in ds_versions: if ds_version_name == ds_version.name: return ds_version @test def test_mgmt_ds_version_list_original_count(self): """Tests the mgmt datastore version list method.""" self.ds_versions = self.client.mgmt_datastore_versions.list() # By default we create two datastore-versions for mysql assert_equal(2, len(self.ds_versions.items)) @test(depends_on=[test_mgmt_ds_version_list_original_count]) def test_mgmt_ds_version_list_fields_present(self): """Verify that all expected fields are returned by list method.""" expected_fields = [ 'id', 'name', 'datastore_id', 'datastore_name', 'datastore_manager', 'image', 'packages', 'active', 'default', ] for ds_version in self.ds_versions: with Check() as check: for field in expected_fields: check.true(hasattr(ds_version, field), "List lacks field %s." % field) @test(depends_on=[test_mgmt_ds_version_list_original_count]) def test_mgmt_ds_version_get(self): """Tests the mgmt datastore version get method.""" test_version = self.ds_versions[0] found_ds_version = self.client.mgmt_datastore_versions.get( test_version.id) assert_equal(test_version.name, found_ds_version.name) assert_equal(test_version.datastore_id, found_ds_version.datastore_id) assert_equal(test_version.datastore_name, found_ds_version.datastore_name) assert_equal(test_version.datastore_manager, found_ds_version.datastore_manager) assert_equal(test_version.image, found_ds_version.image) assert_equal(test_version.packages, found_ds_version.packages) assert_equal(test_version.active, found_ds_version.active) assert_equal(test_version.default, found_ds_version.default) @test(depends_on=[test_mgmt_ds_version_list_original_count]) def test_mgmt_ds_version_create(self): """Tests the mgmt datastore version create method.""" response = self.client.mgmt_datastore_versions.create( 'test_version1', 'test_ds', 'test_mgr', self.images[0].id, ['vertica-7.1']) assert_equal(None, response) assert_equal(202, self.client.last_http_code) # Since we created one more ds_version # lets check count of total ds_versions, it should be increased by 1 new_ds_versions = self.client.mgmt_datastore_versions.list() assert_equal(len(self.ds_versions.items) + 1, len(new_ds_versions.items)) # Match the contents of newly created ds_version. self.created_version = self._find_ds_version_by_name('test_version1') assert_equal('test_version1', self.created_version.name) assert_equal('test_ds', self.created_version.datastore_name) assert_equal('test_mgr', self.created_version.datastore_manager) assert_equal(self.images[0].id, self.created_version.image) assert_equal(['vertica-7.1'], self.created_version.packages) assert_true(self.created_version.active) assert_false(self.created_version.default) @test(depends_on=[test_mgmt_ds_version_create]) def test_mgmt_ds_version_patch(self): """Tests the mgmt datastore version edit method.""" self.client.mgmt_datastore_versions.edit( self.created_version.id, image=self.images[1].id, packages=['pkg1']) assert_equal(202, self.client.last_http_code) # Lets match the content of patched datastore patched_ds_version = self._find_ds_version_by_name('test_version1') assert_equal(self.images[1].id, patched_ds_version.image) assert_equal(['pkg1'], patched_ds_version.packages) @test(depends_on=[test_mgmt_ds_version_patch]) def test_mgmt_ds_version_delete(self): """Tests the mgmt datastore version delete method.""" self.client.mgmt_datastore_versions.delete(self.created_version.id) assert_equal(202, self.client.last_http_code) # Lets match the total count of ds_version, # it should get back to original ds_versions = self.client.mgmt_datastore_versions.list() assert_equal(len(self.ds_versions.items), len(ds_versions.items)) trove-5.0.0/trove/tests/api/mgmt/admin_required.py0000664000567000056710000000650112701410316023371 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis.asserts import assert_raises from proboscis import before_class from proboscis import test from troveclient.compat.exceptions import Unauthorized from trove import tests from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.admin" @test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES], depends_on_groups=["services.initialize"]) class TestAdminRequired(object): """ These tests verify that admin privileges are checked when calling management level functions. """ @before_class def setUp(self): """Create the user and client for use in the subsequent tests.""" self.user = test_config.users.find_user(Requirements(is_admin=False)) self.dbaas = create_dbaas_client(self.user) @test def test_accounts_show(self): """A regular user may not view the details of any account.""" assert_raises(Unauthorized, self.dbaas.accounts.show, 0) @test def test_hosts_index(self): """A regular user may not view the list of hosts.""" assert_raises(Unauthorized, self.dbaas.hosts.index) @test def test_hosts_get(self): """A regular user may not view the details of any host.""" assert_raises(Unauthorized, self.dbaas.hosts.get, 0) @test def test_mgmt_show(self): """ A regular user may not view the management details of any instance. """ assert_raises(Unauthorized, self.dbaas.management.show, 0) @test def test_mgmt_root_history(self): """ A regular user may not view the root access history of any instance. """ assert_raises(Unauthorized, self.dbaas.management.root_enabled_history, 0) @test def test_mgmt_instance_reboot(self): """A regular user may not perform an instance reboot.""" assert_raises(Unauthorized, self.dbaas.management.reboot, 0) @test def test_mgmt_instance_reset_task_status(self): """A regular user may not perform an instance task status reset.""" assert_raises(Unauthorized, self.dbaas.management.reset_task_status, 0) @test def test_storage_index(self): """A regular user may not view the list of storage available.""" assert_raises(Unauthorized, self.dbaas.storage.index) @test def test_diagnostics_get(self): """A regular user may not view the diagnostics.""" assert_raises(Unauthorized, self.dbaas.diagnostics.get, 0) @test def test_hwinfo_get(self): """A regular user may not view the hardware info.""" assert_raises(Unauthorized, self.dbaas.hwinfo.get, 0) trove-5.0.0/trove/tests/api/mgmt/configurations.py0000664000567000056710000001763312701410316023443 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import asserts from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.configurations" @test(groups=[GROUP, tests.DBAAS_API, tests.PRE_INSTANCES], depends_on_groups=["services.initialize"]) class ConfigGroupsSetupBeforeInstanceCreation(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.admin_client = create_dbaas_client(self.user) self.datastore_version_id = self.admin_client.datastore_versions.get( "mysql", "5.5").id @test def test_valid_config_create_type(self): name = "testconfig-create" restart_required = 1 data_type = "string" max_size = None min_size = None client = self.admin_client.mgmt_configs param_list = client.parameters_by_version( self.datastore_version_id) asserts.assert_true(name not in [p.name for p in param_list]) client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) param_list = client.parameters_by_version( self.datastore_version_id) asserts.assert_true(name in [p.name for p in param_list]) param = client.get_parameter_by_version( self.datastore_version_id, name) asserts.assert_equal(name, param.name) asserts.assert_equal(restart_required, param.restart_required) asserts.assert_equal(data_type, param.type) # test the modify restart_required = 0 data_type = "integer" max_size = "10" min_size = "1" client.modify( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) param = client.get_parameter_by_version( self.datastore_version_id, name) asserts.assert_equal(name, param.name) asserts.assert_equal(restart_required, param.restart_required) asserts.assert_equal(data_type, param.type) asserts.assert_equal(max_size, param.max) asserts.assert_equal(min_size, param.min) client.delete(self.datastore_version_id, name) # test show deleted params work param_list = client.list_all_parameter_by_version( self.datastore_version_id) asserts.assert_true(name in [p.name for p in param_list]) param = client.get_any_parameter_by_version( self.datastore_version_id, name) asserts.assert_equal(name, param.name) asserts.assert_equal(restart_required, param.restart_required) asserts.assert_equal(data_type, param.type) asserts.assert_equal(int(max_size), int(param.max)) asserts.assert_equal(int(min_size), int(param.min)) asserts.assert_equal(True, param.deleted) asserts.assert_true(param.deleted_at) def test_create_config_type_twice_fails(self): name = "test-delete-config-types" restart_required = 1 data_type = "string" max_size = None min_size = None client = self.admin_client.mgmt_configs client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) asserts.assert_raises(exceptions.BadRequest, client.create, self.datastore_version_id, name, restart_required, data_type, max_size, min_size) client.delete(self.datastore_version_id, name) config_list = client.parameters_by_version(self.datastore_version_id) asserts.assert_true(name not in [conf.name for conf in config_list]) # testing that recreate of a deleted parameter works. client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) config_list = client.parameters_by_version(self.datastore_version_id) asserts.assert_false(name not in [conf.name for conf in config_list]) @test def test_delete_config_type(self): name = "test-delete-config-types" restart_required = 1 data_type = "string" max_size = None min_size = None client = self.admin_client.mgmt_configs client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) client.delete(self.datastore_version_id, name) config_list = client.parameters_by_version(self.datastore_version_id) asserts.assert_true(name not in [conf.name for conf in config_list]) @test def test_delete_config_type_fail(self): asserts.assert_raises( exceptions.BadRequest, self.admin_client.mgmt_configs.delete, self.datastore_version_id, "test-delete-config-types") @test def test_invalid_config_create_type(self): name = "testconfig_invalid_type" restart_required = 1 data_type = "other" max_size = None min_size = None asserts.assert_raises( exceptions.BadRequest, self.admin_client.mgmt_configs.create, self.datastore_version_id, name, restart_required, data_type, max_size, min_size) @test def test_invalid_config_create_restart_required(self): name = "testconfig_invalid_restart_required" restart_required = 5 data_type = "string" max_size = None min_size = None asserts.assert_raises( exceptions.BadRequest, self.admin_client.mgmt_configs.create, self.datastore_version_id, name, restart_required, data_type, max_size, min_size) @test def test_config_parameter_was_deleted_then_recreate_updates_it(self): name = "test-delete-and-recreate-param" restart_required = 1 data_type = "string" max_size = None min_size = None client = self.admin_client.mgmt_configs client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) client.delete(self.datastore_version_id, name) client.create( self.datastore_version_id, name, 0, data_type, max_size, min_size) param_list = client.list_all_parameter_by_version( self.datastore_version_id) asserts.assert_true(name in [p.name for p in param_list]) param = client.get_any_parameter_by_version( self.datastore_version_id, name) asserts.assert_equal(False, param.deleted) trove-5.0.0/trove/tests/api/mgmt/hosts.py0000664000567000056710000002112612701410316021541 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis.check import Check from proboscis import test from troveclient.compat import exceptions from trove.tests.api.instances import create_new_instance from trove.tests.api.instances import CreateInstance from trove.tests.config import CONFIG from trove.tests import DBAAS_API from trove.tests import INSTANCES from trove.tests import PRE_INSTANCES from trove.tests.util import create_dbaas_client from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.hosts" def percent_boundary(used_ram, total_ram): """Return a upper and lower bound for percent ram used.""" calc = int((1.0 * used_ram / total_ram) * 100) # return calculated percent +/- 2 to account for rounding errors lower_boundary = calc - 2 upper_boundary = calc + 2 return lower_boundary, upper_boundary @test(groups=[DBAAS_API, GROUP, PRE_INSTANCES], depends_on_groups=["services.initialize"], enabled=create_new_instance()) class HostsBeforeInstanceCreation(object): @before_class def setUp(self): self.user = CONFIG.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) self.host = None @test def test_empty_index_host_list(self): host_index_result = self.client.hosts.index() assert_not_equal(host_index_result, None, "list hosts call should not be empty: %s" % str(host_index_result)) assert_true(len(host_index_result) > 0, "list hosts length should be greater than zero: %r" % host_index_result) self.host = host_index_result[0] assert_true(self.host is not None, "Expected to find a host.") @test(depends_on=[test_empty_index_host_list]) def test_empty_index_host_list_single(self): self.host.name = self.host.name.replace(".", "\.") result = self.client.hosts.get(self.host) assert_not_equal(result, None, "Get host should not be empty for: %s" % self.host) with Check() as check: used_ram = int(result.usedRAM) total_ram = int(result.totalRAM) percent_used = int(result.percentUsed) lower, upper = percent_boundary(used_ram, total_ram) check.true(percent_used > lower, "percentUsed %r is below the lower boundary %r" % (percent_used, lower)) check.true(percent_used < upper, "percentUsed %r is above the upper boundary %r" % (percent_used, upper)) check.true(used_ram < total_ram, "usedRAM %r should be less than totalRAM %r" % (used_ram, total_ram)) check.true(percent_used < 100, "percentUsed should be less than 100 but was %r" % percent_used) check.true(total_ram > 0, "totalRAM should be greater than 0 but was %r" % total_ram) check.true(used_ram < total_ram, "usedRAM %r should be less than totalRAM %r" % (used_ram, total_ram)) @test(groups=[INSTANCES, GROUP], depends_on=[CreateInstance], enabled=create_new_instance()) class HostsMgmtCommands(object): @before_class def setUp(self): self.user = CONFIG.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) self.host = None @test def test_index_host_list(self): result = self.client.hosts.index() assert_not_equal(len(result), 0, "list hosts should not be empty: %s" % str(result)) hosts = [] # Find a host with an instanceCount > 0 for host in result: msg = 'Host: %s, Count: %s' % (host.name, host.instanceCount) hosts.append(msg) if int(host.instanceCount) > 0: self.host = host break msg = "Unable to find a host with instances: %r" % hosts assert_not_equal(self.host, None, msg) @test(depends_on=[test_index_host_list]) def test_index_host_list_single(self): self.host.name = self.host.name.replace(".", "\.") result = self.client.hosts.get(self.host) assert_not_equal(result, None, "list hosts should not be empty: %s" % str(result)) assert_true(len(result.instances) > 0, "instance list on the host should not be empty: %r" % result.instances) with Check() as check: used_ram = int(result.usedRAM) total_ram = int(result.totalRAM) percent_used = int(result.percentUsed) lower, upper = percent_boundary(used_ram, total_ram) check.true(percent_used > lower, "percentUsed %r is below the lower boundary %r" % (percent_used, lower)) check.true(percent_used < upper, "percentUsed %r is above the upper boundary %r" % (percent_used, upper)) check.true(used_ram < total_ram, "usedRAM %r should be less than totalRAM %r" % (used_ram, total_ram)) check.true(percent_used < 100, "percentUsed should be less than 100 but was %r" % percent_used) check.true(total_ram > 0, "totalRAM should be greater than 0 but was %r" % total_ram) check.true(used_ram < total_ram, "usedRAM %r should be less than totalRAM %r" % (used_ram, total_ram)) # Check all active instances and validate all the fields exist active_instance = None for instance in result.instances: print("instance: %s" % instance) if instance['status'] != 'ACTIVE': continue active_instance = instance check.is_not_none(instance['id']) check.is_not_none(instance['name']) check.is_not_none(instance['status']) check.is_not_none(instance['server_id']) check.is_not_none(instance['tenant_id']) check.true(active_instance is not None, "No active instances") def _get_ids(self): """Get all the ids of instances that are ACTIVE.""" ids = [] results = self.client.hosts.index() for host in results: result = self.client.hosts.get(host) for instance in result.instances: if instance['status'] == 'ACTIVE': ids.append(instance['id']) return ids @test def test_update_hosts(self): ids = self._get_ids() assert_not_equal(ids, [], "No active instances found") before_versions = {} for _id in ids: diagnostics = self.client.diagnostics.get(_id) before_versions[_id] = diagnostics.version hosts = self.client.hosts.index() for host in hosts: self.client.hosts.update_all(host.name) after_versions = {} for _id in ids: diagnostics = self.client.diagnostics.get(_id) after_versions[_id] = diagnostics.version assert_not_equal(before_versions, {}, "No versions found before update") assert_not_equal(after_versions, {}, "No versions found after update") if CONFIG.fake_mode: for _id in after_versions: assert_not_equal(before_versions[_id], after_versions[_id]) @test def test_host_not_found(self): hostname = "host@$%3dne" assert_raises(exceptions.NotFound, self.client.hosts.get, hostname) trove-5.0.0/trove/tests/api/mgmt/malformed_json.py0000664000567000056710000002765212701410316023412 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from collections import deque from proboscis import after_class from proboscis import asserts from proboscis import before_class from proboscis import test from trove.common.utils import poll_until from trove.tests.api.instances import instance_info from trove.tests.api.instances import VOLUME_SUPPORT from trove.tests.config import CONFIG from trove.tests.util import assert_contains from trove.tests.util import create_dbaas_client from trove.tests.util.users import Requirements @test(groups=["dbaas.api.mgmt.malformed_json"]) class MalformedJson(object): @before_class def setUp(self): self.reqs = Requirements(is_admin=False) self.user = CONFIG.users.find_user(self.reqs) self.dbaas = create_dbaas_client(self.user) volume = None if VOLUME_SUPPORT: volume = {"size": 1} self.instance = self.dbaas.instances.create( name="qe_instance", flavor_id=instance_info.dbaas_flavor_href, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version, volume=volume, databases=[{"name": "firstdb", "character_set": "latin2", "collate": "latin2_general_ci"}]) @after_class def tearDown(self): self.dbaas.instances.delete(self.instance) @test def test_bad_instance_data(self): databases = "foo" users = "bar" try: self.dbaas.instances.create("bad_instance", 3, 3, databases=databases, users=users) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create instance failed with code %s," " exception %s" % (httpCode, e)) databases = "u'foo'" users = "u'bar'" assert_contains( e.message, ["Validation error:", "instance['databases'] %s is not of type 'array'" % databases, "instance['users'] %s is not of type 'array'" % users, "instance['volume'] 3 is not of type 'object'"]) @test def test_bad_database_data(self): _bad_db_data = "{foo}" try: self.dbaas.databases.create(self.instance.id, _bad_db_data) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create database failed with code %s, " "exception %s" % (httpCode, e)) _bad_db_data = "u'{foo}'" asserts.assert_equal(e.message, "Validation error: " "databases %s is not of type 'array'" % _bad_db_data) @test def test_bad_user_data(self): def format_path(values): values = list(values) msg = "%s%s" % (values[0], ''.join(['[%r]' % i for i in values[1:]])) return msg _user = [] _user_name = "F343jasdf" _user.append({"name12": _user_name, "password12": "password"}) try: self.dbaas.users.create(self.instance.id, _user) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create user failed with code %s, " "exception %s" % (httpCode, e)) err_1 = format_path(deque(('users', 0))) assert_contains( e.message, ["Validation error:", "%(err_1)s 'name' is a required property" % {'err_1': err_1}, "%(err_1)s 'password' is a required property" % {'err_1': err_1}]) @test def test_bad_resize_instance_data(self): def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) try: self.dbaas.instances.resize_instance(self.instance.id, "") except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Resize instance failed with code %s, " "exception %s" % (httpCode, e)) @test def test_bad_resize_vol_data(self): def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) data = "bad data" try: self.dbaas.instances.resize_volume(self.instance.id, data) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Resize instance failed with code %s, " "exception %s" % (httpCode, e)) data = "u'bad data'" assert_contains( e.message, ["Validation error:", "resize['volume']['size'] %s is not valid under " "any of the given schemas" % data, "%s is not of type 'integer'" % data, "%s does not match '^[0-9]+$'" % data]) @test def test_bad_change_user_password(self): password = "" users = [{"name": password}] def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) try: self.dbaas.users.change_passwords(self.instance, users) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Change usr/passwd failed with code %s, " "exception %s" % (httpCode, e)) password = "u''" assert_contains( e.message, ["Validation error: users[0] 'password' " "is a required property", "users[0]['name'] %s is too short" % password, "users[0]['name'] %s does not match " "'^.*[0-9a-zA-Z]+.*$'" % password]) @test def test_bad_grant_user_access(self): dbs = [] def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) try: self.dbaas.users.grant(self.instance, self.user, dbs) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Grant user access failed with code %s, " "exception %s" % (httpCode, e)) @test def test_bad_revoke_user_access(self): db = "" def _check_instance_status(): inst = self.dbaas.instances.get(self.instance) if inst.status == "ACTIVE": return True else: return False poll_until(_check_instance_status) try: self.dbaas.users.revoke(self.instance, self.user, db) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 404, "Revoke user access failed w/code %s, " "exception %s" % (httpCode, e)) asserts.assert_equal(e.message, "The resource could not be found.") @test def test_bad_body_flavorid_create_instance(self): flavorId = ["?"] try: self.dbaas.instances.create("test_instance", flavorId, 2) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create instance failed with code %s, " "exception %s" % (httpCode, e)) flavorId = [u'?'] assert_contains( e.message, ["Validation error:", "instance['flavorRef'] %s is not valid " "under any of the given schemas" % flavorId, "%s is not of type 'string'" % flavorId, "%s is not of type 'string'" % flavorId, "%s is not of type 'integer'" % flavorId, "instance['volume'] 2 is not of type 'object'"]) @test def test_bad_body_datastore_create_instance(self): datastore = "*" datastore_version = "*" try: self.dbaas.instances.create("test_instance", 3, {"size": 2}, datastore=datastore, datastore_version=datastore_version) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create instance failed with code %s, " "exception %s" % (httpCode, e)) assert_contains( e.message, ["Validation error:", "instance['datastore']['type']" " u'%s' does not match" " '^.*[0-9a-zA-Z]+.*$'" % datastore, "instance['datastore']['version'] u'%s' " "does not match '^.*[0-9a-zA-Z]+.*$'" % datastore_version]) @test def test_bad_body_volsize_create_instance(self): volsize = "h3ll0" try: self.dbaas.instances.create("test_instance", "1", volsize) except Exception as e: resp, body = self.dbaas.client.last_response httpCode = resp.status asserts.assert_equal(httpCode, 400, "Create instance failed with code %s, " "exception %s" % (httpCode, e)) volsize = "u'h3ll0'" asserts.assert_equal(e.message, "Validation error: " "instance['volume'] %s is not of " "type 'object'" % volsize) trove-5.0.0/trove/tests/api/mgmt/instances_actions.py0000664000567000056710000001666312701410316024122 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mox3 import mox from novaclient.v2.servers import Server from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_raises from proboscis import before_class from proboscis import test from trove.backup import models as backup_models from trove.backup import state from trove.common.context import TroveContext from trove.common import exception import trove.common.instance as tr_instance from trove.extensions.mgmt.instances.models import MgmtInstance from trove.extensions.mgmt.instances.service import MgmtInstanceController from trove.instance import models as imodels from trove.instance.models import DBInstance from trove.instance.tasks import InstanceTasks from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.action.reset-task-status" class MgmtInstanceBase(object): def setUp(self): self.mock = mox.Mox() self._create_instance() self.controller = MgmtInstanceController() def tearDown(self): self.db_info.delete() def _create_instance(self): self.context = TroveContext(is_admin=True) self.tenant_id = 999 self.db_info = DBInstance.create( id="inst-id-1", name="instance", flavor_id=1, datastore_version_id=test_config.dbaas_datastore_version_id, tenant_id=self.tenant_id, volume_size=None, task_status=InstanceTasks.NONE) self.server = self.mock.CreateMock(Server) self.instance = imodels.Instance( self.context, self.db_info, self.server, datastore_status=imodels.InstanceServiceStatus( tr_instance.ServiceStatuses.RUNNING)) def _make_request(self, path='/', context=None, **kwargs): from webob import Request path = '/' print("path: %s" % path) return Request.blank(path=path, environ={'trove.context': context}, **kwargs) def _reload_db_info(self): self.db_info = DBInstance.find_by(id=self.db_info.id, deleted=False) @test(groups=[GROUP]) class RestartTaskStatusTests(MgmtInstanceBase): @before_class def setUp(self): super(RestartTaskStatusTests, self).setUp() self.backups_to_clear = [] @after_class def tearDown(self): super(RestartTaskStatusTests, self).tearDown() def _change_task_status_to(self, new_task_status): self.db_info.task_status = new_task_status self.db_info.save() def _make_request(self, path='/', context=None, **kwargs): req = super(RestartTaskStatusTests, self)._make_request(path, context, **kwargs) req.method = 'POST' body = {'reset-task-status': {}} return req, body def reset_task_status(self): self.mock.StubOutWithMock(MgmtInstance, 'load') MgmtInstance.load(context=self.context, id=self.db_info.id).AndReturn(self.instance) self.mock.ReplayAll() req, body = self._make_request(context=self.context) self.controller = MgmtInstanceController() resp = self.controller.action(req, body, self.tenant_id, self.db_info.id) self.mock.UnsetStubs() self.mock.VerifyAll() return resp @test def mgmt_restart_task_requires_admin_account(self): context = TroveContext(is_admin=False) req, body = self._make_request(context=context) self.controller = MgmtInstanceController() assert_raises(exception.Forbidden, self.controller.action, req, body, self.tenant_id, self.db_info.id) @test def mgmt_restart_task_returns_json(self): resp = self.reset_task_status() out = resp.data("application/json") assert_equal(out, None) @test def mgmt_restart_task_changes_status_to_none(self): self._change_task_status_to(InstanceTasks.BUILDING) self.reset_task_status() self._reload_db_info() assert_equal(self.db_info.task_status, InstanceTasks.NONE) @test def mgmt_reset_task_status_clears_backups(self): self.reset_task_status() self._reload_db_info() assert_equal(self.db_info.task_status, InstanceTasks.NONE) user = test_config.users.find_user(Requirements(is_admin=False)) dbaas = create_dbaas_client(user) admin = test_config.users.find_user(Requirements(is_admin=True)) admin_dbaas = create_dbaas_client(admin) result = dbaas.instances.backups(self.db_info.id) assert_equal(0, len(result)) # Create some backups. backup_models.DBBackup.create( name="forever_new", description="forever new", tenant_id=self.tenant_id, state=state.BackupState.NEW, instance_id=self.db_info.id, deleted=False) backup_models.DBBackup.create( name="forever_build", description="forever build", tenant_id=self.tenant_id, state=state.BackupState.BUILDING, instance_id=self.db_info.id, deleted=False) backup_models.DBBackup.create( name="forever_completed", description="forever completed", tenant_id=self.tenant_id, state=state.BackupState.COMPLETED, instance_id=self.db_info.id, deleted=False) # List the backups for this instance. # There ought to be three in the admin tenant, but # none in a different user's tenant. result = dbaas.instances.backups(self.db_info.id) assert_equal(0, len(result)) result = admin_dbaas.instances.backups(self.db_info.id) assert_equal(3, len(result)) self.backups_to_clear = result # Reset the task status. self.reset_task_status() self._reload_db_info() result = admin_dbaas.instances.backups(self.db_info.id) assert_equal(3, len(result)) for backup in result: if backup.name == 'forever_completed': assert_equal(backup.status, state.BackupState.COMPLETED) else: assert_equal(backup.status, state.BackupState.FAILED) @test(runs_after=[mgmt_reset_task_status_clears_backups]) def clear_test_backups(self): for backup in self.backups_to_clear: found_backup = backup_models.DBBackup.find_by(id=backup.id) found_backup.delete() admin = test_config.users.find_user(Requirements(is_admin=True)) admin_dbaas = create_dbaas_client(admin) result = admin_dbaas.instances.backups(self.db_info.id) assert_equal(0, len(result)) trove-5.0.0/trove/tests/api/mgmt/accounts.py0000664000567000056710000001763712701410316022234 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nose.plugins.skip import SkipTest from proboscis import after_class from proboscis import asserts from proboscis import before_class from proboscis.decorators import time_out from proboscis import test from troveclient.compat import exceptions from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import instance_info from trove.tests.config import CONFIG from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.accounts" @test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES], depends_on_groups=["services.initialize"]) class AccountsBeforeInstanceCreation(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) @test def test_invalid_account_fails(self): account_info = self.client.accounts.show("badaccount") asserts.assert_not_equal(self.user.tenant_id, account_info.id) @test(groups=[tests.INSTANCES, GROUP], depends_on_groups=["dbaas.listing"]) class AccountsAfterInstanceCreation(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) @test def test_account_details_available(self): if CONFIG.fake_mode: raise SkipTest("Skipping this as auth is faked anyway.") account_info = self.client.accounts.show(instance_info.user.tenant_id) # Now check the results. expected = instance_info.user.tenant_id if expected is None: expected = "None" print("account_id.id = '%s'" % account_info.id) print("expected = '%s'" % expected) asserts.assert_equal(expected, account_info.id) # Instances: there should at least be one instance asserts.assert_true(len(account_info.instance_ids) > 0) # The instance id should be one of the instances for the account asserts.assert_true(instance_info.id in account_info.instance_ids) @test def test_list_accounts(self): if CONFIG.fake_mode: raise SkipTest("Skipping this as auth is faked anyway.") accounts_info = self.client.accounts.index() asserts.assert_equal(1, len(accounts_info.accounts)) account = accounts_info.accounts[0] asserts.assert_true(account['num_instances'] > 0) asserts.assert_equal(instance_info.user.tenant_id, account['id']) @test(groups=[tests.POST_INSTANCES, GROUP], depends_on_groups=["dbaas.guest.shutdown"]) class AccountsAfterInstanceDeletion(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) @test def test_instance_id_removed_from_account(self): account_info = self.client.accounts.show(instance_info.user.tenant_id) asserts.assert_true(instance_info.id not in account_info.instance_ids) @test(groups=["fake.dbaas.api.mgmt.allaccounts"], depends_on_groups=["services.initialize"]) class AllAccounts(object): max = 5 def _delete_instances_for_users(self): for user in self.users: user_client = create_dbaas_client(user) while True: deleted_count = 0 user_instances = user_client.instances.list() for instance in user_instances: try: instance.delete() except exceptions.NotFound: deleted_count += 1 except Exception: print("Failed to delete instance") if deleted_count == len(user_instances): break def _create_instances_for_users(self): for user in self.users: user_client = create_dbaas_client(user) for index in range(self.max): name = "instance-%s-%03d" % (user.auth_user, index) user_client.instances.create(name, 1, {'size': 1}, [], []) @before_class def setUp(self): admin_req = Requirements(is_admin=True) self.admin_user = test_config.users.find_user(admin_req) self.admin_client = create_dbaas_client(self.admin_user) user_req = Requirements(is_admin=False) self.users = test_config.users.find_all_users_who_satisfy(user_req) self.user_tenant_ids = [user.tenant_id for user in self.users] self._create_instances_for_users() @test def test_list_accounts_with_multiple_users(self): accounts_info = self.admin_client.accounts.index() for account in accounts_info.accounts: asserts.assert_true(account['id'] in self.user_tenant_ids) asserts.assert_equal(self.max, account['num_instances']) @after_class(always_run=True) @time_out(60) def tear_down(self): self._delete_instances_for_users() @test(groups=["fake.%s.broken" % GROUP], depends_on_groups=["services.initialize"], runs_after_groups=["dbaas.guest.shutdown"]) class AccountWithBrokenInstance(object): @before_class def setUpACCR(self): from trove.taskmanager.models import CONF self.old_dns_support = CONF.trove_dns_support CONF.trove_dns_support = False self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) self.name = 'test_SERVER_ERROR' # Create an instance with a broken compute instance. volume = None if CONFIG.trove_volume_support: volume = {'size': 1} self.response = self.client.instances.create( self.name, instance_info.dbaas_flavor_href, volume, []) poll_until(lambda: self.client.instances.get(self.response.id), lambda instance: instance.status == 'ERROR', time_out=10) self.instance = self.client.instances.get(self.response.id) print("Status: %s" % self.instance.status) msg = "Instance did not drop to error after server prov failure." asserts.assert_equal(self.instance.status, "ERROR", msg) @test def no_compute_instance_no_problem(self): """Get account by ID shows even instances lacking computes.""" if test_config.auth_strategy == "fake": raise SkipTest("Skipping this as auth is faked anyway.") account_info = self.client.accounts.show(self.user.tenant_id) # All we care about is that accounts.show doesn't 500 on us # for having a broken instance in the roster. asserts.assert_equal(len(account_info.instances), 1) instance = account_info.instances[0] asserts.assert_true(isinstance(instance['id'], basestring)) asserts.assert_equal(len(instance['id']), 36) asserts.assert_equal(instance['name'], self.name) asserts.assert_equal(instance['status'], "ERROR") assert_is_none(instance['host']) @after_class def tear_down(self): self.client.instances.delete(self.response.id) @after_class def restore_dns(self): from trove.taskmanager.models import CONF CONF.trove_dns_support = self.old_dns_support trove-5.0.0/trove/tests/api/mgmt/storage.py0000664000567000056710000001162512701410316022050 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nose.plugins.skip import SkipTest from proboscis import asserts from proboscis import before_class from proboscis import test from trove import tests from trove.tests.api.instances import CheckInstance from trove.tests.api.instances import instance_info from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements FAKE_MODE = test_config.values['fake_mode'] GROUP = "dbaas.api.mgmt.storage" @test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES], depends_on_groups=["services.initialize"]) class StorageBeforeInstanceCreation(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) @test def test_storage_on_host(self): if not FAKE_MODE: raise SkipTest("Volume driver currently not working.") storage = self.client.storage.index() print("storage : %r" % storage) for device in storage: asserts.assert_true(hasattr(device, 'name'), "device.name: %r" % device.name) asserts.assert_true(hasattr(device, 'type'), "device.type: %r" % device.name) asserts.assert_true(hasattr(device, 'used'), "device.used: %r" % device.used) asserts.assert_true(hasattr(device, 'provision'), "device.provision: %r" % device.provision) provision = device.provision asserts.assert_true('available' in provision, "provision.available: " + "%r" % provision['available']) asserts.assert_true('percent' in provision, "provision.percent: %r" % provision['percent']) asserts.assert_true('total' in provision, "provision.total: %r" % provision['total']) asserts.assert_true(hasattr(device, 'capacity'), "device.capacity: %r" % device.capacity) capacity = device.capacity asserts.assert_true('available' in capacity, "capacity.available: " + "%r" % capacity['available']) asserts.assert_true('total' in capacity, "capacity.total: %r" % capacity['total']) instance_info.storage = storage @test(groups=[tests.INSTANCES, GROUP], depends_on_groups=["dbaas.listing"]) class StorageAfterInstanceCreation(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) @test def test_storage_on_host(self): if not FAKE_MODE: raise SkipTest("Volume driver currently not working.") storage = self.client.storage.index() print("storage : %r" % storage) print("instance_info.storage : %r" % instance_info.storage) allowed_attrs = ['name', 'type', 'used', 'provision', 'capacity'] for index, device in enumerate(storage): CheckInstance(None).contains_allowed_attrs( device._info, allowed_attrs, msg="Storage") asserts.assert_equal(device.name, instance_info.storage[index].name) asserts.assert_equal(device.used, instance_info.storage[index].used) asserts.assert_equal(device.type, instance_info.storage[index].type) provision = instance_info.storage[index].provision asserts.assert_equal(device.provision['available'], provision['available']) asserts.assert_equal(device.provision['percent'], provision['percent']) asserts.assert_equal(device.provision['total'], provision['total']) capacity = instance_info.storage[index].capacity asserts.assert_equal(device.capacity['available'], capacity['available']) asserts.assert_equal(device.capacity['total'], capacity['total']) trove-5.0.0/trove/tests/api/databases.py0000664000567000056710000002046012701410316021364 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis.decorators import time_out from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api.instances import GROUP_START from trove.tests.api.instances import instance_info from trove.tests import util from trove.tests.util import test_config GROUP = "dbaas.api.databases" FAKE = test_config.values['fake_mode'] @test(depends_on_groups=[GROUP_START], groups=[tests.INSTANCES, "dbaas.guest.mysql"], enabled=not test_config.values['fake_mode']) class TestMysqlAccess(object): """ Make sure that MySQL server was secured. """ @time_out(60 * 2) @test def test_mysql_admin(self): """Ensure we aren't allowed access with os_admin and wrong password.""" util.mysql_connection().assert_fails( instance_info.get_address(), "os_admin", "asdfd-asdf234") @test def test_mysql_root(self): """Ensure we aren't allowed access with root and wrong password.""" util.mysql_connection().assert_fails( instance_info.get_address(), "root", "dsfgnear") @test(depends_on_groups=[GROUP_START], depends_on_classes=[TestMysqlAccess], groups=[tests.DBAAS_API, GROUP, tests.INSTANCES]) class TestDatabases(object): """ Test the creation and deletion of additional MySQL databases """ dbname = "third #?@some_-" dbname_urlencoded = "third%20%23%3F%40some_-" dbname2 = "seconddb" created_dbs = [dbname, dbname2] system_dbs = ['information_schema', 'mysql', 'lost+found'] @before_class def setUp(self): self.dbaas = util.create_dbaas_client(instance_info.user) self.dbaas_admin = util.create_dbaas_client(instance_info.admin_user) @test def test_cannot_create_taboo_database_names(self): for name in self.system_dbs: databases = [{"name": name, "character_set": "latin2", "collate": "latin2_general_ci"}] assert_raises(exceptions.BadRequest, self.dbaas.databases.create, instance_info.id, databases) assert_equal(400, self.dbaas.last_http_code) @test def test_create_database(self): databases = [] databases.append({"name": self.dbname, "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": self.dbname2}) self.dbaas.databases.create(instance_info.id, databases) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) @test(depends_on=[test_create_database]) def test_create_database_list(self): databases = self.dbaas.databases.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) found = False for db in self.created_dbs: for result in databases: if result.name == db: found = True assert_true(found, "Database '%s' not found in result" % db) found = False @test(depends_on=[test_create_database]) def test_fails_when_creating_a_db_twice(self): databases = [] databases.append({"name": self.dbname, "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": self.dbname2}) assert_raises(exceptions.BadRequest, self.dbaas.databases.create, instance_info.id, databases) assert_equal(400, self.dbaas.last_http_code) @test def test_create_database_list_system(self): # Databases that should not be returned in the list databases = self.dbaas.databases.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) found = False for db in self.system_dbs: found = any(result.name == db for result in databases) msg = "Database '%s' SHOULD NOT be found in result" % db assert_false(found, msg) found = False @test def test_create_database_on_missing_instance(self): databases = [{"name": "invalid_db", "character_set": "latin2", "collate": "latin2_general_ci"}] assert_raises(exceptions.NotFound, self.dbaas.databases.create, -1, databases) assert_equal(404, self.dbaas.last_http_code) @test(runs_after=[test_create_database]) def test_delete_database(self): self.dbaas.databases.delete(instance_info.id, self.dbname_urlencoded) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) dbs = self.dbaas.databases.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) found = any(result.name == self.dbname_urlencoded for result in dbs) assert_false(found, "Database '%s' SHOULD NOT be found in result" % self.dbname_urlencoded) @test(runs_after=[test_delete_database]) def test_cannot_delete_taboo_database_names(self): for name in self.system_dbs: assert_raises(exceptions.BadRequest, self.dbaas.databases.delete, instance_info.id, name) assert_equal(400, self.dbaas.last_http_code) @test(runs_after=[test_delete_database]) def test_delete_database_on_missing_instance(self): assert_raises(exceptions.NotFound, self.dbaas.databases.delete, -1, self.dbname_urlencoded) assert_equal(404, self.dbaas.last_http_code) @test def test_database_name_too_long(self): databases = [] name = ("aasdlkhaglkjhakjdkjgfakjgadgfkajsg" "34523dfkljgasldkjfglkjadsgflkjagsdd") databases.append({"name": name}) assert_raises(exceptions.BadRequest, self.dbaas.databases.create, instance_info.id, databases) assert_equal(400, self.dbaas.last_http_code) @test def test_invalid_database_name(self): databases = [] databases.append({"name": "sdfsd,"}) assert_raises(exceptions.BadRequest, self.dbaas.databases.create, instance_info.id, databases) assert_equal(400, self.dbaas.last_http_code) @test def test_pagination(self): databases = [] databases.append({"name": "Sprockets", "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": "Cogs"}) databases.append({"name": "Widgets"}) self.dbaas.databases.create(instance_info.id, databases) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) limit = 2 databases = self.dbaas.databases.list(instance_info.id, limit=limit) assert_equal(200, self.dbaas.last_http_code) marker = databases.next # Better get only as many as we asked for assert_true(len(databases) <= limit) assert_true(databases.next is not None) assert_equal(marker, databases[-1].name) marker = databases.next # I better get new databases if I use the marker I was handed. databases = self.dbaas.databases.list(instance_info.id, limit=limit, marker=marker) assert_equal(200, self.dbaas.last_http_code) assert_true(marker not in [database.name for database in databases]) # Now fetch again with a larger limit. databases = self.dbaas.databases.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) assert_true(databases.next is None) trove-5.0.0/trove/tests/api/flavors.py0000664000567000056710000002467512701410316021125 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from nose.tools import assert_equal from nose.tools import assert_false from nose.tools import assert_true from proboscis.asserts import assert_raises from proboscis import before_class from proboscis.decorators import time_out from proboscis import test from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.util.check import AttrCheck from trove.tests.util import create_dbaas_client from trove.tests.util import create_nova_client from trove.tests.util import test_config from trove.tests.util.users import Requirements from troveclient.compat import exceptions from troveclient.v1.flavors import Flavor GROUP = "dbaas.api.flavors" GROUP_DS = "dbaas.api.datastores" FAKE_MODE = test_config.values['fake_mode'] servers_flavors = None dbaas_flavors = None user = None def assert_attributes_equal(name, os_flavor, dbaas_flavor): """Given an attribute name and two objects, ensures the attribute is equal. """ assert_true(hasattr(os_flavor, name), "open stack flavor did not have attribute %s" % name) assert_true(hasattr(dbaas_flavor, name), "dbaas flavor did not have attribute %s" % name) expected = getattr(os_flavor, name) actual = getattr(dbaas_flavor, name) assert_equal(expected, actual, 'DBaas flavor differs from Open Stack on attribute ' + name) def assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor): assert_attributes_equal('name', os_flavor, dbaas_flavor) assert_attributes_equal('ram', os_flavor, dbaas_flavor) assert_false(hasattr(dbaas_flavor, 'disk'), "The attribute 'disk' s/b absent from the dbaas API.") def assert_link_list_is_equal(flavor): assert_true(hasattr(flavor, 'links')) assert_true(flavor.links) if flavor.id: flavor_id = str(flavor.id) else: flavor_id = flavor.str_id for link in flavor.links: href = link['href'] if "self" in link['rel']: expected_href = os.path.join(test_config.dbaas_url, "flavors", str(flavor.id)) url = test_config.dbaas_url.replace('http:', 'https:', 1) msg = ("REL HREF %s doesn't start with %s" % (href, test_config.dbaas_url)) assert_true(href.startswith(url), msg) url = os.path.join("flavors", flavor_id) msg = "REL HREF %s doesn't end in '%s'" % (href, url) assert_true(href.endswith(url), msg) elif "bookmark" in link['rel']: base_url = test_config.version_url.replace('http:', 'https:', 1) expected_href = os.path.join(base_url, "flavors", flavor_id) msg = 'bookmark "href" must be %s, not %s' % (expected_href, href) assert_equal(href, expected_href, msg) else: assert_false(True, "Unexpected rel - %s" % link['rel']) @test(groups=[tests.DBAAS_API, GROUP, GROUP_DS, tests.PRE_INSTANCES], depends_on_groups=["services.initialize"]) class Flavors(object): @before_class def setUp(self): rd_user = test_config.users.find_user( Requirements(is_admin=False, services=["trove"])) self.rd_client = create_dbaas_client(rd_user) if test_config.nova_client is not None: nova_user = test_config.users.find_user( Requirements(services=["nova"])) self.nova_client = create_nova_client(nova_user) def get_expected_flavors(self): # If we have access to the client, great! Let's use that as the flavors # returned by Trove should be identical. if test_config.nova_client is not None: return self.nova_client.flavors.list() # If we don't have access to the client the flavors need to be spelled # out in the config file. flavors = [Flavor(Flavors, flavor_dict, loaded=True) for flavor_dict in test_config.flavors] return flavors @test def confirm_flavors_lists_nearly_identical(self): os_flavors = self.get_expected_flavors() dbaas_flavors = self.rd_client.flavors.list() print("Open Stack Flavors:") print(os_flavors) print("DBaaS Flavors:") print(dbaas_flavors) # Length of both flavors list should be identical. assert_equal(len(os_flavors), len(dbaas_flavors)) for os_flavor in os_flavors: found_index = None for index, dbaas_flavor in enumerate(dbaas_flavors): if os_flavor.name == dbaas_flavor.name: msg = ("Flavor ID '%s' appears in elements #%s and #%d." % (dbaas_flavor.id, str(found_index), index)) assert_true(found_index is None, msg) assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor) found_index = index msg = "Some flavors from OS list were missing in DBAAS list." assert_false(found_index is None, msg) for flavor in dbaas_flavors: assert_link_list_is_equal(flavor) @test def test_flavor_list_attrs(self): allowed_attrs = ['id', 'name', 'ram', 'links', 'local_storage', 'str_id'] flavors = self.rd_client.flavors.list() attrcheck = AttrCheck() for flavor in flavors: flavor_dict = flavor._info attrcheck.contains_allowed_attrs( flavor_dict, allowed_attrs, msg="Flavors list") attrcheck.links(flavor_dict['links']) @test def test_flavor_get_attrs(self): allowed_attrs = ['id', 'name', 'ram', 'links', 'local_storage', 'str_id'] flavor = self.rd_client.flavors.get(1) attrcheck = AttrCheck() flavor_dict = flavor._info attrcheck.contains_allowed_attrs( flavor_dict, allowed_attrs, msg="Flavor Get 1") attrcheck.links(flavor_dict['links']) @test def test_flavor_not_found(self): assert_raises(exceptions.NotFound, self.rd_client.flavors.get, "foo") @test def test_flavor_list_datastore_version_associated_flavors(self): datastore = self.rd_client.datastores.get( test_config.dbaas_datastore) dbaas_flavors = (self.rd_client.flavors. list_datastore_version_associated_flavors( datastore=test_config.dbaas_datastore, version_id=datastore.default_version)) os_flavors = self.get_expected_flavors() assert_equal(len(dbaas_flavors), len(os_flavors)) # verify flavor lists are identical for os_flavor in os_flavors: found_index = None for index, dbaas_flavor in enumerate(dbaas_flavors): if os_flavor.name == dbaas_flavor.name: msg = ("Flavor ID '%s' appears in elements #%s and #%d." % (dbaas_flavor.id, str(found_index), index)) assert_true(found_index is None, msg) assert_flavors_roughly_equivalent(os_flavor, dbaas_flavor) found_index = index msg = "Some flavors from OS list were missing in DBAAS list." assert_false(found_index is None, msg) for flavor in dbaas_flavors: assert_link_list_is_equal(flavor) @test(runs_after=[Flavors], groups=[tests.DBAAS_API, GROUP, GROUP_DS], depends_on_groups=["services.initialize"], enabled=FAKE_MODE) class DatastoreFlavorAssociation(object): @before_class def setUp(self): rd_user = test_config.users.find_user( Requirements(is_admin=False, services=["trove"])) self.rd_client = create_dbaas_client(rd_user) self.datastore = self.rd_client.datastores.get( test_config.dbaas_datastore) self.name1 = "test_instance1" self.name2 = "test_instance2" self.volume = {'size': 2} self.instance_id = None @test @time_out(TIMEOUT_INSTANCE_CREATE) def test_create_instance_with_valid_flavor_association(self): # all the nova flavors are associated with the default datastore result = self.rd_client.instances.create( name=self.name1, flavor_id='1', volume=self.volume, datastore=self.datastore.id) self.instance_id = result.id assert_equal(200, self.rd_client.last_http_code) def result_is_active(): instance = self.rd_client.instances.get(self.instance_id) if instance.status == "ACTIVE": return True else: # If its not ACTIVE, anything but BUILD must be # an error. assert_equal("BUILD", instance.status) return False poll_until(result_is_active) self.rd_client.instances.delete(self.instance_id) @test(runs_after=[test_create_instance_with_valid_flavor_association]) def test_create_instance_with_invalid_flavor_association(self): dbaas_flavors = (self.rd_client.flavors. list_datastore_version_associated_flavors( datastore=test_config.dbaas_datastore, version_id=self.datastore.default_version)) self.flavor_not_associated = None os_flavors = Flavors().get_expected_flavors() for os_flavor in os_flavors: if os_flavor not in dbaas_flavors: self.flavor_not_associated = os_flavor.id break if self.flavor_not_associated is not None: assert_raises(exceptions.BadRequest, self.rd_client.instances.create, self.name2, flavor_not_associated, self.volume, datastore=self.datastore.id) trove-5.0.0/trove/tests/api/replication.py0000664000567000056710000003332612701410320021746 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from time import sleep from proboscis.asserts import assert_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove.common.utils import generate_uuid from trove.common.utils import poll_until from trove.tests.api.instances import CheckInstance from trove.tests.api.instances import instance_info from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE from trove.tests.api.instances import WaitForGuestInstallationToFinish from trove.tests.config import CONFIG from trove.tests.util.server_connection import create_server_connection class SlaveInstanceTestInfo(object): """Stores slave instance information.""" def __init__(self): self.id = None self.replicated_db = generate_uuid() GROUP = "dbaas.api.replication" slave_instance = SlaveInstanceTestInfo() existing_db_on_master = generate_uuid() backup_count = None def _get_user_count(server_info): cmd = ('mysql -BNq -e \\\'select count\\(*\\) from mysql.user' ' where user like \\\"slave_%\\\"\\\'') server = create_server_connection(server_info.id) stdout, stderr = server.execute(cmd) return int(stdout.rstrip()) def slave_is_running(running=True): def check_slave_is_running(): server = create_server_connection(slave_instance.id) cmd = ("mysqladmin extended-status " "| awk '/Slave_running/{print $4}'") stdout, stderr = server.execute(cmd) expected = "ON" if running else "OFF" return stdout.rstrip() == expected return check_slave_is_running def backup_count_matches(count): def check_backup_count_matches(): backup = instance_info.dbaas.instances.backups(instance_info.id) return count == len(backup) return check_backup_count_matches def instance_is_active(id): instance = instance_info.dbaas.instances.get(id) if instance.status == "ACTIVE": return True else: assert_true(instance.status in ['PROMOTE', 'EJECT', 'BUILD', 'BACKUP']) return False def create_slave(): result = instance_info.dbaas.instances.create( instance_info.name + "_slave", instance_info.dbaas_flavor_href, instance_info.volume, nics=instance_info.nics, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version, slave_of=instance_info.id) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal("BUILD", result.status) return result.id def validate_slave(master, slave): new_slave = instance_info.dbaas.instances.get(slave.id) assert_equal(200, instance_info.dbaas.last_http_code) ns_dict = new_slave._info CheckInstance(ns_dict).slave_of() assert_equal(master.id, ns_dict['replica_of']['id']) def validate_master(master, slaves): new_master = instance_info.dbaas.instances.get(master.id) assert_equal(200, instance_info.dbaas.last_http_code) nm_dict = new_master._info CheckInstance(nm_dict).slaves() master_ids = set([replica['id'] for replica in nm_dict['replicas']]) asserted_ids = set([slave.id for slave in slaves]) assert_true(asserted_ids.issubset(master_ids)) @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP]) class CreateReplicationSlave(object): @test def test_replica_provisioning_with_missing_replica_source(self): assert_raises(exceptions.NotFound, instance_info.dbaas.instances.create, instance_info.name + "_slave", instance_info.dbaas_flavor_href, instance_info.volume, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version, slave_of="Missing replica source") assert_equal(404, instance_info.dbaas.last_http_code) @test def test_create_db_on_master(self): databases = [{'name': existing_db_on_master}] # Ensure that the auth_token in the dbaas client is not stale instance_info.dbaas.authenticate() instance_info.dbaas.databases.create(instance_info.id, databases) assert_equal(202, instance_info.dbaas.last_http_code) @test(runs_after=['test_create_db_on_master']) def test_create_slave(self): global backup_count backup_count = len( instance_info.dbaas.instances.backups(instance_info.id)) slave_instance.id = create_slave() @test(groups=[GROUP]) class WaitForCreateSlaveToFinish(object): """Wait until the instance is created and set up as slave.""" @test(depends_on=[CreateReplicationSlave.test_create_slave]) @time_out(TIMEOUT_INSTANCE_CREATE) def test_slave_created(self): poll_until(lambda: instance_is_active(slave_instance.id)) @test(enabled=(not CONFIG.fake_mode), depends_on=[WaitForCreateSlaveToFinish], groups=[GROUP]) class VerifySlave(object): def db_is_found(self, database_to_find): def find_database(): databases = instance_info.dbaas.databases.list(slave_instance.id) return (database_to_find in [d.name for d in databases]) return find_database @test @time_out(5 * 60) def test_correctly_started_replication(self): poll_until(slave_is_running()) @test(runs_after=[test_correctly_started_replication]) @time_out(60) def test_backup_deleted(self): poll_until(backup_count_matches(backup_count)) @test(depends_on=[test_correctly_started_replication]) def test_slave_is_read_only(self): cmd = "mysql -BNq -e \\\'select @@read_only\\\'" server = create_server_connection(slave_instance.id) stdout, stderr = server.execute(cmd) assert_equal(stdout, "1\n") @test(depends_on=[test_slave_is_read_only]) def test_create_db_on_master(self): databases = [{'name': slave_instance.replicated_db}] instance_info.dbaas.databases.create(instance_info.id, databases) assert_equal(202, instance_info.dbaas.last_http_code) @test(depends_on=[test_create_db_on_master]) @time_out(5 * 60) def test_database_replicated_on_slave(self): poll_until(self.db_is_found(slave_instance.replicated_db)) @test(runs_after=[test_database_replicated_on_slave]) @time_out(5 * 60) def test_existing_db_exists_on_slave(self): poll_until(self.db_is_found(existing_db_on_master)) @test(depends_on=[test_existing_db_exists_on_slave]) def test_slave_user_exists(self): assert_equal(_get_user_count(slave_instance), 1) assert_equal(_get_user_count(instance_info), 1) @test(groups=[GROUP], depends_on=[WaitForCreateSlaveToFinish], runs_after=[VerifySlave]) class TestInstanceListing(object): """Test replication information in instance listing.""" @test def test_get_slave_instance(self): validate_slave(instance_info, slave_instance) @test def test_get_master_instance(self): validate_master(instance_info, [slave_instance]) @test(groups=[GROUP], depends_on=[WaitForCreateSlaveToFinish], runs_after=[TestInstanceListing]) class TestReplicationFailover(object): """Test replication failover functionality.""" @staticmethod def promote(master, slave): if CONFIG.fake_mode: raise SkipTest("promote_replica_source not supported in fake mode") instance_info.dbaas.instances.promote_to_replica_source(slave) assert_equal(202, instance_info.dbaas.last_http_code) poll_until(lambda: instance_is_active(slave.id)) validate_master(slave, [master]) validate_slave(slave, master) @test def test_promote_master(self): if CONFIG.fake_mode: raise SkipTest("promote_master not supported in fake mode") assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.promote_to_replica_source, instance_info.id) @test def test_eject_slave(self): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.eject_replica_source, slave_instance.id) @test def test_eject_valid_master(self): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.eject_replica_source, instance_info.id) @test(depends_on=[test_promote_master, test_eject_slave, test_eject_valid_master]) def test_promote_to_replica_source(self): TestReplicationFailover.promote(instance_info, slave_instance) @test(depends_on=[test_promote_to_replica_source]) def test_promote_back_to_replica_source(self): TestReplicationFailover.promote(slave_instance, instance_info) @test(depends_on=[test_promote_back_to_replica_source], enabled=False) def add_second_slave(self): if CONFIG.fake_mode: raise SkipTest("three site promote not supported in fake mode") self._third_slave = SlaveInstanceTestInfo() self._third_slave.id = create_slave() poll_until(lambda: instance_is_active(self._third_slave.id)) poll_until(slave_is_running()) sleep(30) validate_master(instance_info, [slave_instance, self._third_slave]) validate_slave(instance_info, self._third_slave) @test(depends_on=[add_second_slave], enabled=False) def test_three_site_promote(self): if CONFIG.fake_mode: raise SkipTest("three site promote not supported in fake mode") TestReplicationFailover.promote(instance_info, self._third_slave) validate_master(self._third_slave, [slave_instance, instance_info]) validate_slave(self._third_slave, instance_info) @test(depends_on=[test_three_site_promote], enabled=False) def disable_master(self): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") cmd = "sudo service trove-guestagent stop" server = create_server_connection(self._third_slave.id) stdout, stderr = server.execute(cmd) assert_equal(stdout, "1\n") @test(depends_on=[disable_master], enabled=False) def test_eject_replica_master(self): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") sleep(90) instance_info.dbaas.instances.eject_replica_source(self._third_slave) assert_equal(202, instance_info.dbaas.last_http_code) poll_until(lambda: instance_is_active(self._third_slave.id)) validate_master(instance_info, [slave_instance]) validate_slave(instance_info, slave_instance) @test(groups=[GROUP], depends_on=[WaitForCreateSlaveToFinish], runs_after=[TestReplicationFailover]) class DetachReplica(object): @test def delete_before_detach_replica(self): assert_raises(exceptions.Forbidden, instance_info.dbaas.instances.delete, instance_info.id) @test @time_out(5 * 60) def test_detach_replica(self): if CONFIG.fake_mode: raise SkipTest("Detach replica not supported in fake mode") instance_info.dbaas.instances.edit(slave_instance.id, detach_replica_source=True) assert_equal(202, instance_info.dbaas.last_http_code) poll_until(slave_is_running(False)) @test(depends_on=[test_detach_replica]) @time_out(5 * 60) def test_slave_is_not_read_only(self): if CONFIG.fake_mode: raise SkipTest("Test not_read_only not supported in fake mode") # wait until replica is no longer read only def check_not_read_only(): cmd = "mysql -BNq -e \\\'select @@read_only\\\'" server = create_server_connection(slave_instance.id) stdout, stderr = server.execute(cmd) if (stdout.rstrip() != "0"): return False else: return True poll_until(check_not_read_only) @test(groups=[GROUP], depends_on=[WaitForCreateSlaveToFinish], runs_after=[DetachReplica]) class DeleteSlaveInstance(object): @test @time_out(TIMEOUT_INSTANCE_DELETE) def test_delete_slave_instance(self): instance_info.dbaas.instances.delete(slave_instance.id) assert_equal(202, instance_info.dbaas.last_http_code) def instance_is_gone(): try: instance_info.dbaas.instances.get(slave_instance.id) return False except exceptions.NotFound: return True poll_until(instance_is_gone) assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get, slave_instance.id) trove-5.0.0/trove/tests/api/instances.py0000664000567000056710000020245212701410320021422 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import time from time import sleep import unittest import uuid from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_is_not_none from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.asserts import fail from proboscis import before_class from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove.common import exception as rd_exceptions from trove.common.utils import poll_until from trove.datastore import models as datastore_models from trove import tests from trove.tests.config import CONFIG from trove.tests.util.check import AttrCheck from trove.tests.util.check import TypeCheck from trove.tests.util import create_dbaas_client from trove.tests.util import create_nova_client from trove.tests.util import dns_checker from trove.tests.util import event_simulator from trove.tests.util import iso_time from trove.tests.util import test_config from trove.tests.util.usage import create_usage_verifier from trove.tests.util.users import Requirements FAKE = test_config.values['fake_mode'] GROUP = "dbaas.guest" GROUP_NEUTRON = "dbaas.neutron" GROUP_START = "dbaas.guest.initialize" GROUP_START_SIMPLE = "dbaas.guest.initialize.simple" GROUP_TEST = "dbaas.guest.test" GROUP_STOP = "dbaas.guest.shutdown" GROUP_USERS = "dbaas.api.users" GROUP_ROOT = "dbaas.api.root" GROUP_GUEST = "dbaas.guest.start.test" GROUP_DATABASES = "dbaas.api.databases" GROUP_SECURITY_GROUPS = "dbaas.api.security_groups" GROUP_CREATE_INSTANCE_FAILURE = "dbaas.api.failures" GROUP_QUOTAS = "dbaas.quotas" TIMEOUT_INSTANCE_CREATE = 60 * 32 TIMEOUT_INSTANCE_DELETE = 120 class InstanceTestInfo(object): """Stores new instance information used by dependent tests.""" def __init__(self): self.dbaas = None # The rich client instance used by these tests. self.dbaas_admin = None # The rich client with admin access. self.dbaas_flavor = None # The flavor object of the instance. self.dbaas_flavor_href = None # The flavor of the instance. self.dbaas_datastore = None # The datastore id self.dbaas_datastore_version = None # The datastore version id self.dbaas_inactive_datastore_version = None # The DS inactive id self.id = None # The ID of the instance in the database. self.local_id = None self.address = None self.nics = None # The dict of type/id for nics used on the intance. self.initial_result = None # The initial result from the create call. self.user_ip = None # The IP address of the instance, given to user. self.infra_ip = None # The infrastructure network IP address. self.result = None # The instance info returned by the API self.nova_client = None # The instance of novaclient. self.volume_client = None # The instance of the volume client. self.name = None # Test name, generated each test run. self.pid = None # The process ID of the instance. self.user = None # The user instance who owns the instance. self.admin_user = None # The admin user for the management interfaces. self.volume = None # The volume the instance will have. self.volume_id = None # Id for the attached vo186lume self.storage = None # The storage device info for the volumes. self.databases = None # The databases created on the instance. self.host_info = None # Host Info before creating instances self.user_context = None # A regular user context self.users = None # The users created on the instance. self.consumer = create_usage_verifier() def find_default_flavor(self): if EPHEMERAL_SUPPORT: flavor_name = CONFIG.values.get('instance_eph_flavor_name', 'eph.rd-tiny') else: flavor_name = CONFIG.values.get('instance_flavor_name', 'm1.tiny') flavors = self.dbaas.find_flavors_by_name(flavor_name) assert_equal(len(flavors), 1, "Number of flavors with name '%s' " "found was '%d'." % (flavor_name, len(flavors))) flavor = flavors[0] assert_true(flavor is not None, "Flavor '%s' not found!" % flavor_name) flavor_href = self.dbaas.find_flavor_self_href(flavor) assert_true(flavor_href is not None, "Flavor href '%s' not found!" % flavor_name) return flavor, flavor_href def get_address(self): result = self.dbaas_admin.mgmt.instances.show(self.id) if not hasattr(result, 'hostname'): return result.ip[0] else: return result.server['addresses'] def get_local_id(self): mgmt_instance = self.dbaas_admin.management.show(self.id) return mgmt_instance.server["local_id"] def get_volume_filesystem_size(self): mgmt_instance = self.dbaas_admin.management.show(self.id) return mgmt_instance.volume["total"] # The two variables are used below by tests which depend on an instance # existing. instance_info = InstanceTestInfo() dbaas = None # Rich client used throughout this test. dbaas_admin = None # Same as above, with admin privs. ROOT_ON_CREATE = CONFIG.get('root_on_create', False) VOLUME_SUPPORT = CONFIG.get('trove_volume_support', False) EPHEMERAL_SUPPORT = not VOLUME_SUPPORT and CONFIG.get('device_path', '/dev/vdb') is not None ROOT_PARTITION = not VOLUME_SUPPORT and CONFIG.get('device_path', None) is None # This is like a cheat code which allows the tests to skip creating a new # instance and use an old one. def existing_instance(): return os.environ.get("TESTS_USE_INSTANCE_ID", None) def do_not_delete_instance(): return os.environ.get("TESTS_DO_NOT_DELETE_INSTANCE", None) is not None def create_new_instance(): return existing_instance() is None @test(groups=['dbaas.usage', 'dbaas.usage.init']) def clear_messages_off_queue(): instance_info.consumer.clear_events() @test(groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, 'dbaas.setup'], depends_on_groups=["services.initialize"]) class InstanceSetup(object): """Makes sure the client can hit the ReST service. This test also uses the API to find the flavor to use. """ @before_class def setUp(self): """Sets up the client.""" reqs = Requirements(is_admin=True) instance_info.admin_user = CONFIG.users.find_user(reqs) instance_info.dbaas_admin = create_dbaas_client( instance_info.admin_user) global dbaas_admin dbaas_admin = instance_info.dbaas_admin # Make sure we create the client as the correct user if we're using # a pre-built instance. if existing_instance(): mgmt_inst = dbaas_admin.mgmt.instances.show(existing_instance()) t_id = mgmt_inst.tenant_id instance_info.user = CONFIG.users.find_user_by_tenant_id(t_id) else: reqs = Requirements(is_admin=False) instance_info.user = CONFIG.users.find_user(reqs) instance_info.dbaas = create_dbaas_client(instance_info.user) global dbaas dbaas = instance_info.dbaas @test def test_find_flavor(self): flavor, flavor_href = instance_info.find_default_flavor() instance_info.dbaas_flavor = flavor instance_info.dbaas_flavor_href = flavor_href @test def create_instance_name(self): id = existing_instance() if id is None: instance_info.name = "TEST_" + str(uuid.uuid4()) else: instance_info.name = dbaas.instances.get(id).name @test(depends_on_classes=[InstanceSetup], groups=[GROUP]) def test_delete_instance_not_found(): """Deletes an instance that does not exist.""" # Looks for a random UUID that (most probably) does not exist. assert_raises(exceptions.NotFound, dbaas.instances.delete, "7016efb6-c02c-403e-9628-f6f57d0920d0") @test(depends_on_classes=[InstanceSetup], groups=[GROUP, GROUP_QUOTAS], runs_after_groups=[tests.PRE_INSTANCES]) class CreateInstanceQuotaTest(unittest.TestCase): def setUp(self): import copy self.test_info = copy.deepcopy(instance_info) self.test_info.dbaas_datastore = CONFIG.dbaas_datastore def tearDown(self): quota_dict = {'instances': CONFIG.trove_max_instances_per_tenant, 'volumes': CONFIG.trove_max_volumes_per_tenant} dbaas_admin.quota.update(self.test_info.user.tenant_id, quota_dict) def test_instance_size_too_big(self): if ('trove_max_accepted_volume_size' in CONFIG.values and VOLUME_SUPPORT): too_big = CONFIG.trove_max_accepted_volume_size self.test_info.volume = {'size': too_big + 1} self.test_info.name = "way_too_large" assert_raises(exceptions.OverLimit, dbaas.instances.create, self.test_info.name, self.test_info.dbaas_flavor_href, self.test_info.volume) def test_update_quota_invalid_resource_should_fail(self): quota_dict = {'invalid_resource': 100} assert_raises(exceptions.NotFound, dbaas_admin.quota.update, self.test_info.user.tenant_id, quota_dict) def test_update_quota_volume_should_fail_volume_not_supported(self): if VOLUME_SUPPORT: raise SkipTest("Volume support needs to be disabled") quota_dict = {'volumes': 100} assert_raises(exceptions.NotFound, dbaas_admin.quota.update, self.test_info.user.tenant_id, quota_dict) def test_create_too_many_instances(self): instance_quota = 0 quota_dict = {'instances': instance_quota} new_quotas = dbaas_admin.quota.update(self.test_info.user.tenant_id, quota_dict) verify_quota = dbaas_admin.quota.show(self.test_info.user.tenant_id) assert_equal(new_quotas['instances'], quota_dict['instances']) assert_equal(0, verify_quota['instances']) self.test_info.volume = None if VOLUME_SUPPORT: assert_equal(CONFIG.trove_max_volumes_per_tenant, verify_quota['volumes']) self.test_info.volume = {'size': CONFIG.get('trove_volume_size', 1)} self.test_info.name = "too_many_instances" assert_raises(exceptions.OverLimit, dbaas.instances.create, self.test_info.name, self.test_info.dbaas_flavor_href, self.test_info.volume) assert_equal(413, dbaas.last_http_code) def test_create_instances_total_volume_exceeded(self): if not VOLUME_SUPPORT: raise SkipTest("Volume support not enabled") volume_quota = 3 quota_dict = {'volumes': volume_quota} self.test_info.volume = {'size': volume_quota + 1} new_quotas = dbaas_admin.quota.update(self.test_info.user.tenant_id, quota_dict) assert_equal(volume_quota, new_quotas['volumes']) self.test_info.name = "too_large_volume" assert_raises(exceptions.OverLimit, dbaas.instances.create, self.test_info.name, self.test_info.dbaas_flavor_href, self.test_info.volume) assert_equal(413, dbaas.last_http_code) @test(depends_on_classes=[InstanceSetup], groups=[GROUP, GROUP_CREATE_INSTANCE_FAILURE], runs_after_groups=[tests.PRE_INSTANCES, GROUP_QUOTAS]) class CreateInstanceFail(object): def instance_in_error(self, instance_id): def check_if_error(): instance = dbaas.instances.get(instance_id) if instance.status == "ERROR": return True else: # The status should still be BUILD assert_equal("BUILD", instance.status) return False return check_if_error def delete_async(self, instance_id): dbaas.instances.delete(instance_id) while True: try: dbaas.instances.get(instance_id) except exceptions.NotFound: return True time.sleep(1) @test @time_out(30) def test_create_with_bad_availability_zone(self): instance_name = "instance-failure-with-bad-ephemeral" if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None databases = [] result = dbaas.instances.create(instance_name, instance_info.dbaas_flavor_href, volume, databases, availability_zone="BAD_ZONE") poll_until(self.instance_in_error(result.id)) instance = dbaas.instances.get(result.id) assert_equal("ERROR", instance.status) self.delete_async(result.id) @test def test_create_with_bad_nics(self): instance_name = "instance-failure-with-bad-nics" if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None databases = [] bad_nic = [{"port-id": "UNKNOWN", "net-id": "1234", "v4-fixed-ip": "1.2.3.4"}] result = dbaas.instances.create(instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=bad_nic) poll_until(self.instance_in_error(result.id)) instance = dbaas.instances.get(result.id) assert_equal("ERROR", instance.status) self.delete_async(result.id) def test_create_failure_with_empty_flavor(self): instance_name = "instance-failure-with-empty-flavor" databases = [] if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, '', volume, databases) assert_equal(400, dbaas.last_http_code) @test(enabled=VOLUME_SUPPORT) def test_create_failure_with_empty_volume(self): instance_name = "instance-failure-with-no-volume-size" databases = [] volume = {} assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases) assert_equal(400, dbaas.last_http_code) @test(enabled=VOLUME_SUPPORT) def test_create_failure_with_no_volume_size(self): instance_name = "instance-failure-with-no-volume-size" databases = [] volume = {'size': None} assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases) assert_equal(400, dbaas.last_http_code) @test(enabled=not VOLUME_SUPPORT) def test_create_failure_with_volume_size_and_volume_disabled(self): instance_name = "instance-failure-volume-size_and_volume_disabled" databases = [] volume = {'size': 2} assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases) assert_equal(501, dbaas.last_http_code) def test_create_failure_with_volume_size_and_disabled_for_datastore(self): instance_name = "instance-failure-volume-size_and_volume_disabled" databases = [] datastore = 'redis' assert_equal(CONFIG.get(datastore, 'redis')['volume_support'], False) volume = {'size': 2} assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, datastore=datastore) assert_equal(501, dbaas.last_http_code) @test(enabled=EPHEMERAL_SUPPORT) def test_create_failure_with_no_ephemeral_flavor(self): instance_name = "instance-failure-with-no-ephemeral-flavor" databases = [] flavor_name = CONFIG.values.get('instance_flavor_name', 'm1.tiny') flavors = dbaas.find_flavors_by_name(flavor_name) assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, flavors[0].id, None, databases) assert_equal(400, dbaas.last_http_code) @test def test_create_failure_with_no_name(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "" databases = [] assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases) assert_equal(400, dbaas.last_http_code) @test def test_create_failure_with_spaces_for_name(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = " " databases = [] assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases) assert_equal(400, dbaas.last_http_code) @test def test_mgmt_get_instance_on_create(self): if CONFIG.test_mgmt: result = dbaas_admin.management.show(instance_info.id) allowed_attrs = ['account_id', 'addresses', 'created', 'databases', 'flavor', 'guest_status', 'host', 'hostname', 'id', 'name', 'datastore', 'server_state_description', 'status', 'updated', 'users', 'volume', 'root_enabled_at', 'root_enabled_by'] with CheckInstance(result._info) as check: check.contains_allowed_attrs( result._info, allowed_attrs, msg="Mgmt get instance") check.flavor() check.datastore() check.guest_status() @test def test_create_failure_with_datastore_default_notfound(self): if not FAKE: raise SkipTest("This test only for fake mode.") if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_default_notfound" databases = [] users = [] origin_default_datastore = (datastore_models.CONF. default_datastore) datastore_models.CONF.default_datastore = "" try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users) except exceptions.BadRequest as e: assert_equal(e.message, "Please specify datastore. Default datastore " "cannot be found.") datastore_models.CONF.default_datastore = \ origin_default_datastore @test def test_create_failure_with_datastore_default_version_notfound(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_default_version_notfound" databases = [] users = [] datastore = "Test_Datastore_1" try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users, datastore=datastore) except exceptions.BadRequest as e: assert_equal(e.message, "Default version for datastore '%s' not found." % datastore) @test def test_create_failure_with_datastore_notfound(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_notfound" databases = [] users = [] datastore = "nonexistent" try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users, datastore=datastore) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore '%s' cannot be found." % datastore) @test def test_create_failure_with_datastore_version_notfound(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_version_notfound" databases = [] users = [] datastore = CONFIG.dbaas_datastore datastore_version = "nonexistent" try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users, datastore=datastore, datastore_version=datastore_version) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore version '%s' cannot be found." % datastore_version) @test def test_create_failure_with_datastore_version_inactive(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_version_inactive" databases = [] users = [] datastore = CONFIG.dbaas_datastore datastore_version = CONFIG.dbaas_inactive_datastore_version try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users, datastore=datastore, datastore_version=datastore_version) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore version '%s' is not active." % datastore_version) def assert_unprocessable(func, *args): try: func(*args) # If the exception didn't get raised, but the instance is still in # the BUILDING state, that's a bug. result = dbaas.instances.get(instance_info.id) if result.status == "BUILD": fail("When an instance is being built, this function should " "always raise UnprocessableEntity.") except exceptions.UnprocessableEntity: assert_equal(422, dbaas.last_http_code) pass # Good @test def test_deep_list_security_group_with_rules(self): securityGroupList = dbaas.security_groups.list() assert_is_not_none(securityGroupList) securityGroup = [x for x in securityGroupList if x.name in self.secGroupName] assert_is_not_none(securityGroup[0]) assert_not_equal(len(securityGroup[0].rules), 0) @test(depends_on_classes=[InstanceSetup], run_after_class=[CreateInstanceFail], groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, tests.INSTANCES], runs_after_groups=[tests.PRE_INSTANCES, GROUP_QUOTAS]) class CreateInstance(object): """Test to create a Database Instance If the call returns without raising an exception this test passes. """ @test def test_create(self): databases = [] databases.append({"name": "firstdb", "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": "db2"}) instance_info.databases = databases users = [] users.append({"name": "lite", "password": "litepass", "databases": [{"name": "firstdb"}]}) instance_info.users = users instance_info.dbaas_datastore = CONFIG.dbaas_datastore instance_info.dbaas_datastore_version = CONFIG.dbaas_datastore_version if VOLUME_SUPPORT: instance_info.volume = {'size': CONFIG.get('trove_volume_size', 1)} else: instance_info.volume = None shared_network = CONFIG.get('shared_network', None) if shared_network: instance_info.nics = [{'net-id': shared_network}] if create_new_instance(): instance_info.initial_result = dbaas.instances.create( instance_info.name, instance_info.dbaas_flavor_href, instance_info.volume, databases, users, nics=instance_info.nics, availability_zone="nova", datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version) assert_equal(200, dbaas.last_http_code) else: id = existing_instance() instance_info.initial_result = dbaas.instances.get(id) result = instance_info.initial_result instance_info.id = result.id instance_info.dbaas_datastore_version = result.datastore['version'] report = CONFIG.get_report() report.log("Instance UUID = %s" % instance_info.id) if create_new_instance(): assert_equal("BUILD", instance_info.initial_result.status) else: report.log("Test was invoked with TESTS_USE_INSTANCE_ID=%s, so no " "instance was actually created." % id) # Check these attrs only are returned in create response allowed_attrs = ['created', 'flavor', 'addresses', 'id', 'links', 'name', 'status', 'updated', 'datastore'] if ROOT_ON_CREATE: allowed_attrs.append('password') if VOLUME_SUPPORT: allowed_attrs.append('volume') if CONFIG.trove_dns_support: allowed_attrs.append('hostname') with CheckInstance(result._info) as check: if create_new_instance(): check.contains_allowed_attrs( result._info, allowed_attrs, msg="Create response") # Don't CheckInstance if the instance already exists. check.flavor() check.datastore() check.links(result._info['links']) if VOLUME_SUPPORT: check.volume() @test(depends_on_classes=[InstanceSetup], groups=[GROUP, tests.INSTANCES], runs_after_groups=[tests.PRE_INSTANCES]) class CreateInstanceFlavors(object): def _result_is_active(self): instance = dbaas.instances.get(self.result.id) if instance.status == "ACTIVE": return True else: # If its not ACTIVE, anything but BUILD must be # an error. assert_equal("BUILD", instance.status) if instance_info.volume is not None: assert_equal(instance.volume.get('used', None), None) return False def _delete_async(self, instance_id): dbaas.instances.delete(instance_id) while True: try: dbaas.instances.get(instance_id) except exceptions.NotFound: return True time.sleep(1) def _create_with_flavor(self, flavor_id): if not FAKE: raise SkipTest("This test only for fake mode.") instance_name = "instance-with-flavor-%s" % flavor_id databases = [] if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None self.result = dbaas.instances.create(instance_name, flavor_id, volume, databases) poll_until(self._result_is_active) self._delete_async(self.result.id) @test def test_create_with_int_flavor(self): self._create_with_flavor(1) @test def test_create_with_str_flavor(self): self._create_with_flavor('custom') @test(depends_on_classes=[InstanceSetup], groups=[GROUP_NEUTRON]) class CreateInstanceWithNeutron(unittest.TestCase): @time_out(TIMEOUT_INSTANCE_CREATE) def setUp(self): if not CONFIG.values.get('neutron_enabled'): raise SkipTest("neutron is not enabled, skipping") user = test_config.users.find_user( Requirements(is_admin=False, services=["nova", "trove"])) self.nova_client = create_nova_client(user) self.dbaas_client = create_dbaas_client(user) self.result = None self.instance_name = ("TEST_INSTANCE_CREATION_WITH_NICS" + str(uuid.uuid4())) databases = [] self.default_cidr = CONFIG.values.get('shared_network_subnet', None) if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None self.result = self.dbaas_client.instances.create( self.instance_name, instance_info.dbaas_flavor_href, volume, databases) self.instance_id = self.result.id def verify_instance_is_active(): result = self.dbaas_client.instances.get(self.instance_id) if result.status == "ACTIVE": return True else: assert_equal("BUILD", result.status) return False poll_until(verify_instance_is_active) def tearDown(self): if self.result.id is not None: self.dbaas_client.instances.delete(self.result.id) while True: try: self.dbaas_client.instances.get(self.result.id) except exceptions.NotFound: return True time.sleep(1) def check_ip_within_network(self, ip, network): octet_list = str(ip).split(".") octets, mask = str(network).split("/") octet_list_ = octets.split(".") for i in range(int(mask) / 8): if octet_list[i] != octet_list_[i]: return False return True def test_ip_within_cidr(self): nova_instance = None for server in self.nova_client.servers.list(): if str(server.name) == self.instance_name: nova_instance = server break if nova_instance is None: fail("instance created with neutron enabled is not found in nova") for address in nova_instance.addresses['private']: ip = address['addr'] assert_true(self.check_ip_within_network(ip, self.default_cidr)) # black list filtered ip not visible via troveclient trove_instance = self.dbaas_client.instances.get(self.result.id) for ip in trove_instance.ip: if str(ip).startswith('10.'): assert_true(self.check_ip_within_network(ip, "10.0.0.0/24")) assert_false(self.check_ip_within_network(ip, "10.0.1.0/24")) @test(depends_on_classes=[CreateInstance], groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, 'dbaas.mgmt.hosts_post_install'], enabled=create_new_instance()) class AfterInstanceCreation(unittest.TestCase): # instance calls def test_instance_delete_right_after_create(self): assert_unprocessable(dbaas.instances.delete, instance_info.id) # root calls def test_root_create_root_user_after_create(self): assert_unprocessable(dbaas.root.create, instance_info.id) def test_root_is_root_enabled_after_create(self): assert_unprocessable(dbaas.root.is_root_enabled, instance_info.id) # database calls def test_database_index_after_create(self): assert_unprocessable(dbaas.databases.list, instance_info.id) def test_database_delete_after_create(self): assert_unprocessable(dbaas.databases.delete, instance_info.id, "testdb") def test_database_create_after_create(self): assert_unprocessable(dbaas.databases.create, instance_info.id, instance_info.databases) # user calls def test_users_index_after_create(self): assert_unprocessable(dbaas.users.list, instance_info.id) def test_users_delete_after_create(self): assert_unprocessable(dbaas.users.delete, instance_info.id, "testuser") def test_users_create_after_create(self): users = list() users.append({"name": "testuser", "password": "password", "databases": [{"name": "testdb"}]}) assert_unprocessable(dbaas.users.create, instance_info.id, users) def test_resize_instance_after_create(self): assert_unprocessable(dbaas.instances.resize_instance, instance_info.id, 8) def test_resize_volume_after_create(self): assert_unprocessable(dbaas.instances.resize_volume, instance_info.id, 2) @test(depends_on_classes=[CreateInstance], runs_after=[AfterInstanceCreation], groups=[GROUP, GROUP_START, GROUP_START_SIMPLE], enabled=create_new_instance()) class WaitForGuestInstallationToFinish(object): """ Wait until the Guest is finished installing. It takes quite a while... """ @test @time_out(TIMEOUT_INSTANCE_CREATE) def test_instance_created(self): # This version just checks the REST API status. def result_is_active(): instance = dbaas.instances.get(instance_info.id) if instance.status == "ACTIVE": return True else: # If its not ACTIVE, anything but BUILD must be # an error. assert_equal("BUILD", instance.status) if instance_info.volume is not None: assert_equal(instance.volume.get('used', None), None) return False poll_until(result_is_active) dbaas.instances.get(instance_info.id) report = CONFIG.get_report() report.log("Created an instance, ID = %s." % instance_info.id) report.log("TIP:") report.log("Rerun the tests with TESTS_USE_INSTANCE_ID=%s " "to skip ahead to this point." % instance_info.id) report.log("Add TESTS_DO_NOT_DELETE_INSTANCE=True to avoid deleting " "the instance at the end of the tests.") @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP, GROUP_SECURITY_GROUPS]) class SecurityGroupsTest(object): @before_class def setUp(self): self.testSecurityGroup = dbaas.security_groups.get( instance_info.id) self.secGroupName = "SecGroup_%s" % instance_info.id self.secGroupDescription = "Security Group for %s" % instance_info.id @test def test_created_security_group(self): assert_is_not_none(self.testSecurityGroup) with TypeCheck('SecurityGroup', self.testSecurityGroup) as secGrp: secGrp.has_field('id', basestring) secGrp.has_field('name', basestring) secGrp.has_field('description', basestring) secGrp.has_field('created', basestring) secGrp.has_field('updated', basestring) assert_equal(self.testSecurityGroup.name, self.secGroupName) assert_equal(self.testSecurityGroup.description, self.secGroupDescription) assert_equal(self.testSecurityGroup.created, self.testSecurityGroup.updated) @test def test_list_security_group(self): securityGroupList = dbaas.security_groups.list() assert_is_not_none(securityGroupList) securityGroup = [x for x in securityGroupList if x.name in self.secGroupName] assert_is_not_none(securityGroup) @test def test_get_security_group(self): securityGroup = dbaas.security_groups.get(self.testSecurityGroup.id) assert_is_not_none(securityGroup) assert_equal(securityGroup.name, self.secGroupName) assert_equal(securityGroup.description, self.secGroupDescription) assert_equal(securityGroup.instance_id, instance_info.id) @test(depends_on_classes=[SecurityGroupsTest], groups=[GROUP, GROUP_SECURITY_GROUPS]) class SecurityGroupsRulesTest(object): # Security group already have default rule # that is why 'delete'-test is not needed anymore @before_class def setUp(self): self.testSecurityGroup = dbaas.security_groups.get( instance_info.id) self.secGroupName = "SecGroup_%s" % instance_info.id self.secGroupDescription = "Security Group for %s" % instance_info.id self.orig_allowable_empty_sleeps = (event_simulator. allowable_empty_sleeps) event_simulator.allowable_empty_sleeps = 2 self.test_rule_id = None @after_class def tearDown(self): (event_simulator. allowable_empty_sleeps) = self.orig_allowable_empty_sleeps @test def test_create_security_group_rule(self): # Need to sleep to verify created/updated timestamps time.sleep(1) cidr = "1.2.3.4/16" self.testSecurityGroupRules = ( dbaas.security_group_rules.create( group_id=self.testSecurityGroup.id, cidr=cidr)) assert_not_equal(len(self.testSecurityGroupRules), 0) assert_is_not_none(self.testSecurityGroupRules) for rule in self.testSecurityGroupRules: assert_is_not_none(rule) assert_equal(rule['security_group_id'], self.testSecurityGroup.id) assert_is_not_none(rule['id']) assert_equal(rule['cidr'], cidr) assert_equal(rule['from_port'], 3306) assert_equal(rule['to_port'], 3306) assert_is_not_none(rule['created']) self.test_rule_id = rule['id'] if not CONFIG.fake_mode: group = dbaas.security_groups.get( self.testSecurityGroup.id) assert_not_equal(self.testSecurityGroup.created, group.updated) assert_not_equal(self.testSecurityGroup.updated, group.updated) @test(depends_on=[test_create_security_group_rule]) def test_delete_security_group_rule(self): # Need to sleep to verify created/updated timestamps time.sleep(1) group_before = dbaas.security_groups.get( self.testSecurityGroup.id) dbaas.security_group_rules.delete(self.test_rule_id) assert_equal(204, dbaas.last_http_code) if not CONFIG.fake_mode: group = dbaas.security_groups.get( self.testSecurityGroup.id) assert_not_equal(group_before.created, group.updated) assert_not_equal(group_before.updated, group.updated) assert_not_equal(self.testSecurityGroup, group.updated) @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP, GROUP_START], enabled=create_new_instance()) class TestGuestProcess(object): """ Test that the guest process is started with all the right parameters """ @test def check_hwinfo_before_tests(self): if CONFIG.test_mgmt: hwinfo = dbaas_admin.hwinfo.get(instance_info.id) print("hwinfo : %r" % hwinfo._info) allowed_attrs = ['hwinfo'] CheckInstance(None).contains_allowed_attrs( hwinfo._info, allowed_attrs, msg="Hardware information") # TODO(pdmars): instead of just checking that these are int's, get # the instance flavor and verify that the values are correct for # the flavor assert_true(isinstance(hwinfo.hwinfo['mem_total'], int)) assert_true(isinstance(hwinfo.hwinfo['num_cpus'], int)) @test def grab_diagnostics_before_tests(self): if CONFIG.test_mgmt: diagnostics = dbaas_admin.diagnostics.get(instance_info.id) diagnostic_tests_helper(diagnostics) @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP, GROUP_TEST, "dbaas.dns"]) class DnsTests(object): @test def test_dns_entries_are_found(self): """Talk to DNS system to ensure entries were created.""" print("Instance name=%s" % instance_info.name) client = instance_info.dbaas_admin mgmt_instance = client.mgmt.instances.show(instance_info.id) dns_checker(mgmt_instance) @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP, GROUP_TEST, GROUP_GUEST]) class TestAfterInstanceCreatedGuestData(object): """ Test the optional parameters (databases and users) passed in to create instance call were created. """ @test def test_databases(self): databases = dbaas.databases.list(instance_info.id) dbs = [database.name for database in databases] for db in instance_info.databases: assert_true(db["name"] in dbs) @test def test_users(self): users = dbaas.users.list(instance_info.id) usernames = [user.name for user in users] for user in instance_info.users: assert_true(user["name"] in usernames) @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, "dbaas.listing"]) class TestInstanceListing(object): """Test the listing of the instance information.""" @before_class def setUp(self): reqs = Requirements(is_admin=False) self.other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user]) self.other_client = create_dbaas_client(self.other_user) @test def test_index_list(self): allowed_attrs = ['id', 'links', 'name', 'status', 'flavor', 'datastore', 'ip', 'hostname', 'replica_of'] if VOLUME_SUPPORT: allowed_attrs.append('volume') instances = dbaas.instances.list() assert_equal(200, dbaas.last_http_code) for instance in instances: instance_dict = instance._info with CheckInstance(instance_dict) as check: print("testing instance_dict=%s" % instance_dict) check.contains_allowed_attrs( instance_dict, allowed_attrs, msg="Instance Index") check.links(instance_dict['links']) check.flavor() check.datastore() check.volume() @test def test_get_instance(self): allowed_attrs = ['created', 'databases', 'flavor', 'hostname', 'id', 'links', 'name', 'status', 'updated', 'ip', 'datastore'] if VOLUME_SUPPORT: allowed_attrs.append('volume') else: allowed_attrs.append('local_storage') instance = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) instance_dict = instance._info print("instance_dict=%s" % instance_dict) with CheckInstance(instance_dict) as check: check.contains_allowed_attrs( instance_dict, allowed_attrs, msg="Get Instance") check.flavor() check.datastore() check.links(instance_dict['links']) check.used_volume() @test def test_get_instance_status(self): result = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) assert_equal("ACTIVE", result.status) @test def test_get_legacy_status(self): result = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) assert_true(result is not None) @test def test_get_legacy_status_notfound(self): assert_raises(exceptions.NotFound, dbaas.instances.get, -2) @test(enabled=VOLUME_SUPPORT) def test_volume_found(self): instance = dbaas.instances.get(instance_info.id) if create_new_instance(): assert_equal(instance_info.volume['size'], instance.volume['size']) else: # FIXME(peterstac): Sometimes this returns as an int - is that ok? assert_true(type(instance_info.volume['size']) in [int, float]) if create_new_instance(): # FIXME(pmalik): Keeps failing because 'used' > 'size'. # It seems like the reported 'used' space is from the root volume # instead of the attached Trove volume. # assert_true(0.0 < instance.volume['used'] < # instance.volume['size']) pass @test(enabled=EPHEMERAL_SUPPORT) def test_ephemeral_mount(self): instance = dbaas.instances.get(instance_info.id) assert_true(isinstance(instance.local_storage['used'], float)) @test(enabled=ROOT_PARTITION) def test_root_partition(self): instance = dbaas.instances.get(instance_info.id) assert_true(isinstance(instance.local_storage['used'], float)) @test(enabled=do_not_delete_instance()) def test_instance_not_shown_to_other_user(self): daffy_ids = [instance.id for instance in self.other_client.instances.list()] assert_equal(200, self.other_client.last_http_code) admin_ids = [instance.id for instance in dbaas.instances.list()] assert_equal(200, dbaas.last_http_code) assert_equal(len(daffy_ids), 0) assert_not_equal(sorted(admin_ids), sorted(daffy_ids)) assert_raises(exceptions.NotFound, self.other_client.instances.get, instance_info.id) for id in admin_ids: assert_equal(daffy_ids.count(id), 0) @test(enabled=do_not_delete_instance()) def test_instance_not_deleted_by_other_user(self): assert_raises(exceptions.NotFound, self.other_client.instances.get, instance_info.id) assert_raises(exceptions.NotFound, self.other_client.instances.delete, instance_info.id) @test(enabled=CONFIG.test_mgmt) def test_mgmt_get_instance_after_started(self): result = dbaas_admin.management.show(instance_info.id) allowed_attrs = ['account_id', 'addresses', 'created', 'databases', 'flavor', 'guest_status', 'host', 'hostname', 'id', 'name', 'root_enabled_at', 'root_enabled_by', 'server_state_description', 'status', 'datastore', 'updated', 'users', 'volume'] with CheckInstance(result._info) as check: check.contains_allowed_attrs( result._info, allowed_attrs, msg="Mgmt get instance") check.flavor() check.datastore() check.guest_status() check.addresses() check.volume_mgmt() @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, "dbaas.update"]) class TestInstanceUpdate(object): """Test the updation of the instance information.""" @before_class def setUp(self): reqs = Requirements(is_admin=False) self.other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user]) self.other_client = create_dbaas_client(self.other_user) @test def test_update_name(self): new_name = 'new-name' result = dbaas.instances.edit(instance_info.id, name=new_name) assert_equal(202, dbaas.last_http_code) result = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) assert_equal(new_name, result.name) # Restore instance name because other tests depend on it dbaas.instances.edit(instance_info.id, name=instance_info.name) assert_equal(202, dbaas.last_http_code) @test def test_update_name_to_invalid_instance(self): # test assigning to an instance that does not exist invalid_id = "invalid-inst-id" assert_raises(exceptions.NotFound, instance_info.dbaas.instances.edit, invalid_id, name='name') assert_equal(404, instance_info.dbaas.last_http_code) @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP, 'dbaas.usage']) class TestCreateNotification(object): """ Test that the create notification has been sent correctly. """ @test def test_create_notification(self): expected = { 'instance_size': instance_info.dbaas_flavor.ram, 'tenant_id': instance_info.user.tenant_id, 'instance_id': instance_info.id, 'instance_name': instance_info.name, 'created_at': iso_time(instance_info.initial_result.created), 'launched_at': iso_time(instance_info.initial_result.created), 'region': 'LOCAL_DEV', 'availability_zone': 'nova', } instance_info.consumer.check_message(instance_info.id, 'trove.instance.create', **expected) @test(depends_on_groups=['dbaas.api.instances.actions'], groups=[GROUP, tests.INSTANCES, "dbaas.diagnostics"]) class CheckDiagnosticsAfterTests(object): """Check the diagnostics after running api commands on an instance.""" @test def test_check_diagnostics_on_instance_after_tests(self): diagnostics = dbaas_admin.diagnostics.get(instance_info.id) assert_equal(200, dbaas.last_http_code) diagnostic_tests_helper(diagnostics) msg = "Fat Pete has emerged. size (%s > 30MB)" % diagnostics.vmPeak assert_true(diagnostics.vmPeak < (30 * 1024), msg) @test(depends_on=[WaitForGuestInstallationToFinish], depends_on_groups=[GROUP_USERS, GROUP_DATABASES, GROUP_ROOT], groups=[GROUP, GROUP_STOP], runs_after_groups=[GROUP_START, GROUP_START_SIMPLE, GROUP_TEST, tests.INSTANCES]) class DeleteInstance(object): """Delete the created instance.""" @time_out(3 * 60) @test def test_delete(self): if do_not_delete_instance(): CONFIG.get_report().log("TESTS_DO_NOT_DELETE_INSTANCE=True was " "specified, skipping delete...") raise SkipTest("TESTS_DO_NOT_DELETE_INSTANCE was specified.") global dbaas if not hasattr(instance_info, "initial_result"): raise SkipTest("Instance was never created, skipping test...") # Update the report so the logs inside the instance will be saved. CONFIG.get_report().update() dbaas.instances.delete(instance_info.id) attempts = 0 try: time.sleep(1) result = True while result is not None: attempts += 1 result = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) assert_equal("SHUTDOWN", result.status) time.sleep(1) except exceptions.NotFound: pass except Exception as ex: fail("A failure occurred when trying to GET instance %s for the %d" " time: %s" % (str(instance_info.id), attempts, str(ex))) @time_out(30) @test(enabled=VOLUME_SUPPORT, depends_on=[test_delete]) def test_volume_is_deleted(self): try: while True: instance = dbaas.instances.get(instance_info.id) assert_equal(instance.volume['status'], "available") time.sleep(1) except exceptions.NotFound: pass except Exception as ex: fail("Failure: %s" % str(ex)) # TODO(tim-simpson): make sure that the actual instance, volume, # guest status, and DNS entries are deleted. @test(depends_on=[WaitForGuestInstallationToFinish], runs_after=[DeleteInstance], groups=[GROUP, GROUP_STOP, 'dbaas.usage']) class AfterDeleteChecks(object): @test def test_instance_delete_event_sent(self): deleted_at = None mgmt_details = dbaas_admin.management.index(deleted=True) for instance in mgmt_details: if instance.id == instance_info.id: deleted_at = instance.deleted_at expected = { 'instance_size': instance_info.dbaas_flavor.ram, 'tenant_id': instance_info.user.tenant_id, 'instance_id': instance_info.id, 'instance_name': instance_info.name, 'created_at': iso_time(instance_info.initial_result.created), 'launched_at': iso_time(instance_info.initial_result.created), 'deleted_at': iso_time(deleted_at), } instance_info.consumer.check_message(instance_info.id, 'trove.instance.delete', **expected) @test def test_instance_status_deleted_in_db(self): mgmt_details = dbaas_admin.management.index(deleted=True) for instance in mgmt_details: if instance.id == instance_info.id: assert_equal(instance.service_status, 'DELETED') break else: fail("Could not find instance %s" % instance_info.id) @test(depends_on_classes=[CreateInstance, WaitForGuestInstallationToFinish], groups=[GROUP, GROUP_START, GROUP_START_SIMPLE], enabled=CONFIG.test_mgmt) class VerifyInstanceMgmtInfo(object): @before_class def set_up(self): self.mgmt_details = dbaas_admin.management.show(instance_info.id) def _assert_key(self, k, expected): v = getattr(self.mgmt_details, k) err = "Key %r does not match expected value of %r (was %r)." \ % (k, expected, v) assert_equal(str(v), str(expected), err) @test def test_id_matches(self): self._assert_key('id', instance_info.id) @test def test_bogus_instance_mgmt_data(self): # Make sure that a management call to a bogus API 500s. # The client reshapes the exception into just an OpenStackException. assert_raises(exceptions.NotFound, dbaas_admin.management.show, -1) @test def test_mgmt_ips_associated(self): # Test that the management index properly associates an instances with # ONLY its IPs. mgmt_index = dbaas_admin.management.index() # Every instances has exactly one address. for instance in mgmt_index: assert_equal(1, len(instance.ips)) @test def test_mgmt_data(self): # Test that the management API returns all the values we expect it to. info = instance_info ir = info.initial_result cid = ir.id expected = { 'id': cid, 'name': ir.name, 'account_id': info.user.auth_user, # TODO(hub-cap): fix this since its a flavor object now # 'flavorRef': info.dbaas_flavor_href, 'databases': [ { 'name': 'db2', 'character_set': 'utf8', 'collate': 'utf8_general_ci', }, { 'name': 'firstdb', 'character_set': 'latin2', 'collate': 'latin2_general_ci', } ], } expected_entry = info.expected_dns_entry() if expected_entry: expected['hostname'] = expected_entry.name assert_true(self.mgmt_details is not None) for (k, v) in expected.items(): msg = "Attr %r is missing." % k assert_true(hasattr(self.mgmt_details, k), msg) msg = ("Attr %r expected to be %r but was %r." % (k, v, getattr(self.mgmt_details, k))) assert_equal(getattr(self.mgmt_details, k), v, msg) print(self.mgmt_details.users) for user in self.mgmt_details.users: assert_true('name' in user, "'name' not in users element.") class CheckInstance(AttrCheck): """Class to check various attributes of Instance details.""" def __init__(self, instance): super(CheckInstance, self).__init__() self.instance = instance def flavor(self): if 'flavor' not in self.instance: self.fail("'flavor' not found in instance.") else: allowed_attrs = ['id', 'links'] self.contains_allowed_attrs( self.instance['flavor'], allowed_attrs, msg="Flavor") self.links(self.instance['flavor']['links']) def datastore(self): if 'datastore' not in self.instance: self.fail("'datastore' not found in instance.") else: allowed_attrs = ['type', 'version'] self.contains_allowed_attrs( self.instance['datastore'], allowed_attrs, msg="datastore") def volume_key_exists(self): if 'volume' not in self.instance: self.fail("'volume' not found in instance.") return False return True def volume(self): if not VOLUME_SUPPORT: return if self.volume_key_exists(): allowed_attrs = ['size'] if not create_new_instance(): allowed_attrs.append('used') self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def used_volume(self): if not VOLUME_SUPPORT: return if self.volume_key_exists(): allowed_attrs = ['size', 'used'] print(self.instance) self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def volume_mgmt(self): if not VOLUME_SUPPORT: return if self.volume_key_exists(): allowed_attrs = ['description', 'id', 'name', 'size'] self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def addresses(self): allowed_attrs = ['addr', 'version'] print(self.instance) networks = ['usernet'] for network in networks: for address in self.instance['addresses'][network]: self.contains_allowed_attrs( address, allowed_attrs, msg="Address") def guest_status(self): allowed_attrs = ['created_at', 'deleted', 'deleted_at', 'instance_id', 'state', 'state_description', 'updated_at'] self.contains_allowed_attrs( self.instance['guest_status'], allowed_attrs, msg="Guest status") def mgmt_volume(self): if not VOLUME_SUPPORT: return allowed_attrs = ['description', 'id', 'name', 'size'] self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volume") def slave_of(self): if 'replica_of' not in self.instance: self.fail("'replica_of' not found in instance.") else: allowed_attrs = ['id', 'links'] self.contains_allowed_attrs( self.instance['replica_of'], allowed_attrs, msg="Replica-of links not found") self.links(self.instance['replica_of']['links']) def slaves(self): if 'replicas' not in self.instance: self.fail("'replicas' not found in instance.") else: allowed_attrs = ['id', 'links'] for slave in self.instance['replicas']: self.contains_allowed_attrs( slave, allowed_attrs, msg="Replica links not found") self.links(slave['links']) @test(groups=[GROUP]) class BadInstanceStatusBug(): @before_class() def setUp(self): self.instances = [] reqs = Requirements(is_admin=True) self.user = CONFIG.users.find_user( reqs, black_list=[]) self.client = create_dbaas_client(self.user) self.mgmt = self.client.management @test def test_instance_status_after_double_migrate(self): """ This test is to verify that instance status returned is more informative than 'Status is {}'. There are several ways to replicate this error. A double migration is just one of them but since this is a known way to recreate that error we will use it here to be sure that the error is fixed. The actual code lives in trove/instance/models.py in _validate_can_perform_action() """ # TODO(imsplitbit): test other instances where this issue could be # replicated. Resizing a resized instance awaiting confirmation # can be used as another case. This all boils back to the same # piece of code so I'm not sure if it's relevant or not but could # be done. size = None if VOLUME_SUPPORT: size = {'size': 5} result = self.client.instances.create('testbox', instance_info.dbaas_flavor_href, size) id = result.id self.instances.append(id) def verify_instance_is_active(): result = self.client.instances.get(id) print(result.status) return result.status == 'ACTIVE' def attempt_migrate(): print('attempting migration') try: self.mgmt.migrate(id) except exceptions.UnprocessableEntity: return False return True # Timing necessary to make the error occur poll_until(verify_instance_is_active, time_out=120, sleep_time=1) try: poll_until(attempt_migrate, time_out=10, sleep_time=1) except rd_exceptions.PollTimeOut: fail('Initial migration timed out') try: self.mgmt.migrate(id) except exceptions.UnprocessableEntity as err: assert('status was {}' not in err.message) else: # If we are trying to test what status is returned when an # instance is in a confirm_resize state and another # migration is attempted then we also need to # assert that an exception is raised when running migrate. # If one is not then we aren't able to test what the # returned status is in the exception message. fail('UnprocessableEntity was not thrown') @after_class(always_run=True) def tearDown(self): while len(self.instances) > 0: for id in self.instances: try: self.client.instances.delete(id) self.instances.remove(id) except exceptions.UnprocessableEntity: sleep(1.0) def diagnostic_tests_helper(diagnostics): print("diagnostics : %r" % diagnostics._info) allowed_attrs = ['version', 'fdSize', 'vmSize', 'vmHwm', 'vmRss', 'vmPeak', 'threads'] CheckInstance(None).contains_allowed_attrs( diagnostics._info, allowed_attrs, msg="Diagnostics") assert_true(isinstance(diagnostics.fdSize, int)) assert_true(isinstance(diagnostics.threads, int)) assert_true(isinstance(diagnostics.vmHwm, int)) assert_true(isinstance(diagnostics.vmPeak, int)) assert_true(isinstance(diagnostics.vmRss, int)) assert_true(isinstance(diagnostics.vmSize, int)) actual_version = diagnostics.version update_test_conf = CONFIG.values.get("guest-update-test", None) if update_test_conf is not None: if actual_version == update_test_conf['next-version']: return # This is acceptable but may not match the regex. version_pattern = re.compile(r'[a-f0-9]+') msg = "Version %s does not match pattern %s." % (actual_version, version_pattern) assert_true(version_pattern.match(actual_version), msg) trove-5.0.0/trove/tests/api/root.py0000664000567000056710000001722612701410316020426 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nose.plugins.skip import SkipTest from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api.databases import TestMysqlAccess from trove.tests.api.instances import instance_info from trove.tests.api.users import TestUsers from trove.tests import util from trove.tests.util import test_config GROUP = "dbaas.api.root" @test(depends_on_classes=[TestMysqlAccess], runs_after=[TestUsers], groups=[tests.DBAAS_API, GROUP, tests.INSTANCES]) class TestRoot(object): """ Test the root operations """ root_enabled_timestamp = 'Never' system_users = ['root', 'debian_sys_maint'] @before_class def setUp(self): self.dbaas = util.create_dbaas_client(instance_info.user) self.dbaas_admin = util.create_dbaas_client(instance_info.admin_user) def _verify_root_timestamp(self, id): reh = self.dbaas_admin.management.root_enabled_history(id) timestamp = reh.enabled assert_equal(self.root_enabled_timestamp, timestamp) assert_equal(id, reh.id) def _root(self): global root_password self.dbaas.root.create(instance_info.id) assert_equal(200, self.dbaas.last_http_code) reh = self.dbaas_admin.management.root_enabled_history self.root_enabled_timestamp = reh(instance_info.id).enabled @test def test_root_initially_disabled(self): """Test that root is disabled.""" enabled = self.dbaas.root.is_root_enabled(instance_info.id) assert_equal(200, self.dbaas.last_http_code) is_enabled = enabled if hasattr(enabled, 'rootEnabled'): is_enabled = enabled.rootEnabled assert_false(is_enabled, "Root SHOULD NOT be enabled.") @test def test_create_user_os_admin_failure(self): users = [{"name": "os_admin", "password": "12345"}] assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) @test def test_delete_user_os_admin_failure(self): assert_raises(exceptions.BadRequest, self.dbaas.users.delete, instance_info.id, "os_admin") @test(depends_on=[test_root_initially_disabled], enabled=not test_config.values['root_removed_from_instance_api']) def test_root_initially_disabled_details(self): """Use instance details to test that root is disabled.""" instance = self.dbaas.instances.get(instance_info.id) assert_true(hasattr(instance, 'rootEnabled'), "Instance has no rootEnabled property.") assert_false(instance.rootEnabled, "Root SHOULD NOT be enabled.") assert_equal(self.root_enabled_timestamp, 'Never') @test(depends_on=[test_root_initially_disabled_details]) def test_root_disabled_in_mgmt_api(self): """Verifies in the management api that the timestamp exists.""" self._verify_root_timestamp(instance_info.id) @test(depends_on=[test_root_initially_disabled_details]) def test_root_disable_when_root_not_enabled(self): reh = self.dbaas_admin.management.root_enabled_history self.root_enabled_timestamp = reh(instance_info.id).enabled assert_raises(exceptions.NotFound, self.dbaas.root.delete, instance_info.id) self._verify_root_timestamp(instance_info.id) @test(depends_on=[test_root_disable_when_root_not_enabled]) def test_enable_root(self): self._root() @test(depends_on=[test_enable_root]) def test_enabled_timestamp(self): assert_not_equal(self.root_enabled_timestamp, 'Never') @test(depends_on=[test_enable_root]) def test_root_not_in_users_list(self): """ Tests that despite having enabled root, user root doesn't appear in the users list for the instance. """ users = self.dbaas.users.list(instance_info.id) usernames = [user.name for user in users] assert_true('root' not in usernames) @test(depends_on=[test_enable_root]) def test_root_now_enabled(self): """Test that root is now enabled.""" enabled = self.dbaas.root.is_root_enabled(instance_info.id) assert_equal(200, self.dbaas.last_http_code) assert_true(enabled, "Root SHOULD be enabled.") @test(depends_on=[test_root_now_enabled], enabled=not test_config.values['root_removed_from_instance_api']) def test_root_now_enabled_details(self): """Use instance details to test that root is now enabled.""" instance = self.dbaas.instances.get(instance_info.id) assert_true(hasattr(instance, 'rootEnabled'), "Instance has no rootEnabled property.") assert_true(instance.rootEnabled, "Root SHOULD be enabled.") assert_not_equal(self.root_enabled_timestamp, 'Never') self._verify_root_timestamp(instance_info.id) @test(depends_on=[test_root_now_enabled_details]) def test_reset_root(self): if test_config.values['root_timestamp_disabled']: raise SkipTest("Enabled timestamp not enabled yet") old_ts = self.root_enabled_timestamp self._root() assert_not_equal(self.root_enabled_timestamp, 'Never') assert_equal(self.root_enabled_timestamp, old_ts) @test(depends_on=[test_reset_root]) def test_root_still_enabled(self): """Test that after root was reset it's still enabled.""" enabled = self.dbaas.root.is_root_enabled(instance_info.id) assert_equal(200, self.dbaas.last_http_code) assert_true(enabled, "Root SHOULD still be enabled.") @test(depends_on=[test_root_still_enabled], enabled=not test_config.values['root_removed_from_instance_api']) def test_root_still_enabled_details(self): """Use instance details to test that after root was reset, it's still enabled. """ instance = self.dbaas.instances.get(instance_info.id) assert_true(hasattr(instance, 'rootEnabled'), "Instance has no rootEnabled property.") assert_true(instance.rootEnabled, "Root SHOULD still be enabled.") assert_not_equal(self.root_enabled_timestamp, 'Never') self._verify_root_timestamp(instance_info.id) @test(depends_on=[test_enable_root]) def test_root_cannot_be_deleted(self): """Even if root was enabled, the user root cannot be deleted.""" assert_raises(exceptions.BadRequest, self.dbaas.users.delete, instance_info.id, "root") @test(depends_on=[test_root_still_enabled_details]) def test_root_disable(self): reh = self.dbaas_admin.management.root_enabled_history self.root_enabled_timestamp = reh(instance_info.id).enabled self.dbaas.root.delete(instance_info.id) assert_equal(200, self.dbaas.last_http_code) self._verify_root_timestamp(instance_info.id) trove-5.0.0/trove/tests/api/__init__.py0000664000567000056710000000000012701410316021160 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/api/configurations.py0000664000567000056710000011661312701410316022475 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import json from time import sleep import uuid from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove.common.utils import poll_until from trove.tests.api.backups import RestoreUsingBackup from trove.tests.api.instances import assert_unprocessable from trove.tests.api.instances import instance_info from trove.tests.api.instances import InstanceTestInfo from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE from trove.tests.api.instances import WaitForGuestInstallationToFinish from trove.tests.config import CONFIG from trove.tests.util.check import AttrCheck from trove.tests.util.check import CollectionCheck from trove.tests.util.check import TypeCheck from trove.tests.util import create_dbaas_client from trove.tests.util.mysql import create_mysql_connection from trove.tests.util.users import Requirements GROUP = "dbaas.api.configurations" GROUP_CONFIG_DEFINE = "dbaas.api.configurations.define" CONFIG_NAME = "test_configuration" CONFIG_DESC = "configuration description" configuration_default = None configuration_info = None configuration_href = None configuration_instance = InstanceTestInfo() configuration_instance_id = None sql_variables = [ 'key_buffer_size', 'connect_timeout', 'join_buffer_size', ] def _is_valid_timestamp(time_string): try: datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S") except ValueError: return False return True # helper methods to validate configuration is applied to instance def _execute_query(host, user_name, password, query): print(host, user_name, password, query) with create_mysql_connection(host, user_name, password) as db: result = db.execute(query) return result assert_true(False, "something went wrong in the sql connection") def _get_address(instance_id): result = instance_info.dbaas_admin.mgmt.instances.show(instance_id) return result.ip[0] def _test_configuration_is_applied_to_instance(instance, configuration_id): if CONFIG.fake_mode: raise SkipTest("configuration from sql does not work in fake mode") instance_test = instance_info.dbaas.instances.get(instance.id) assert_equal(configuration_id, instance_test.configuration['id']) if configuration_id: testconfig_info = instance_info.dbaas.configurations.get( configuration_id) else: testconfig_info = instance_info.dbaas.instance.configuration( instance.id) testconfig_info['configuration'] conf_instances = instance_info.dbaas.configurations.instances( configuration_id) config_instance_ids = [inst.id for inst in conf_instances] assert_true(instance_test.id in config_instance_ids) cfg_names = testconfig_info.values.keys() host = _get_address(instance.id) for user in instance.users: username = user['name'] password = user['password'] concat_variables = "','".join(cfg_names) query = ("show variables where Variable_name " "in ('%s');" % concat_variables) actual_values = _execute_query(host, username, password, query) print("actual_values %s" % actual_values) print("testconfig_info.values %s" % testconfig_info.values) assert_true(len(actual_values) == len(cfg_names)) # check the configs exist attrcheck = AttrCheck() allowed_attrs = [actual_key for actual_key, actual_value in actual_values] attrcheck.contains_allowed_attrs( testconfig_info.values, allowed_attrs, msg="Configurations parameters") def _get_parameter_type(name): instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, name) resp, body = instance_info.dbaas.client.last_response print(resp) print(body) return json.loads(body)['type'] # check the config values are correct for key, value in actual_values: key_type = _get_parameter_type(key) # mysql returns 'ON' and 'OFF' for True and False respectively if value == 'ON': converted_key_value = (str(key), 1) elif value == 'OFF': converted_key_value = (str(key), 0) else: if key_type == 'integer': value = int(value) converted_key_value = (str(key), value) print("converted_key_value: %s" % str(converted_key_value)) assert_true(converted_key_value in testconfig_info.values.items()) class ConfigurationsTestBase(object): @staticmethod def expected_instance_datastore_configs(instance_id): """Given an instance retrieve the expected test configurations for instance's datastore. """ instance = instance_info.dbaas.instances.get(instance_id) datastore_type = instance.datastore['type'] datastore_test_configs = CONFIG.get(datastore_type, {}) return datastore_test_configs.get("configurations", {}) @staticmethod def expected_default_datastore_configs(): """Returns the expected test configurations for the default datastore defined in the Test Config as dbaas_datastore. """ default_datatstore = CONFIG.get('dbaas_datastore', None) datastore_test_configs = CONFIG.get(default_datatstore, {}) return datastore_test_configs.get("configurations", {}) @test(depends_on_classes=[WaitForGuestInstallationToFinish], runs_after=[RestoreUsingBackup], groups=[GROUP, GROUP_CONFIG_DEFINE]) class CreateConfigurations(ConfigurationsTestBase): @test def test_expected_configurations_parameters(self): """Test get expected configurations parameters.""" allowed_attrs = ["configuration-parameters"] instance_info.dbaas.configuration_parameters.parameters( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version) resp, body = instance_info.dbaas.client.last_response attrcheck = AttrCheck() config_parameters_dict = json.loads(body) attrcheck.contains_allowed_attrs( config_parameters_dict, allowed_attrs, msg="Configurations parameters") # sanity check that a few options are in the list config_params_list = config_parameters_dict['configuration-parameters'] config_param_keys = [] for param in config_params_list: config_param_keys.append(param['name']) expected_configs = self.expected_default_datastore_configs() expected_config_params = expected_configs.get('parameters_list') # check for duplicate configuration parameters msg = "check for duplicate configuration parameters" assert_equal(len(config_param_keys), len(set(config_param_keys)), msg) for expected_config_item in expected_config_params: assert_true(expected_config_item in config_param_keys) @test def test_expected_get_configuration_parameter(self): # tests get on a single parameter to verify it has expected attributes param_name = 'key_buffer_size' allowed_config_params = ['name', 'restart_required', 'max', 'min', 'type', 'deleted', 'deleted_at', 'datastore_version_id'] param = instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, param_name) resp, body = instance_info.dbaas.client.last_response print("params: %s" % param) print("resp: %s" % resp) print("body: %s" % body) attrcheck = AttrCheck() config_parameter_dict = json.loads(body) print("config_parameter_dict: %s" % config_parameter_dict) attrcheck.contains_allowed_attrs( config_parameter_dict, allowed_config_params, msg="Get Configuration parameter") assert_equal(param_name, config_parameter_dict['name']) with TypeCheck('ConfigurationParameter', param) as parameter: parameter.has_field('name', basestring) parameter.has_field('restart_required', bool) parameter.has_field('max', (int, long)) parameter.has_field('min', (int, long)) parameter.has_field('type', basestring) parameter.has_field('datastore_version_id', unicode) @test def test_configurations_create_invalid_values(self): """Test create configurations with invalid values.""" values = '{"this_is_invalid": 123}' try: instance_info.dbaas.configurations.create( CONFIG_NAME, values, CONFIG_DESC) except exceptions.UnprocessableEntity: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 422) @test def test_configurations_create_invalid_value_type(self): """Test create configuration with invalild value type.""" values = '{"key_buffer_size": "this is a string not int"}' assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) @test def test_configurations_create_value_out_of_bounds(self): """Test create configuration with value out of bounds.""" expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('out_of_bounds_over')) assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) values = json.dumps(expected_configs.get('out_of_bounds_under')) assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) @test def test_valid_configurations_create(self): # create a configuration with valid parameters expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('valid_values')) expected_values = json.loads(values) result = instance_info.dbaas.configurations.create( CONFIG_NAME, values, CONFIG_DESC, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) with TypeCheck('Configuration', result) as configuration: configuration.has_field('name', basestring) configuration.has_field('description', basestring) configuration.has_field('values', dict) configuration.has_field('datastore_name', basestring) configuration.has_field('datastore_version_id', unicode) configuration.has_field('datastore_version_name', basestring) global configuration_info configuration_info = result assert_equal(configuration_info.name, CONFIG_NAME) assert_equal(configuration_info.description, CONFIG_DESC) assert_equal(configuration_info.values, expected_values) @test(runs_after=[test_valid_configurations_create]) def test_appending_to_existing_configuration(self): # test being able to update and insert new parameter name and values # to an existing configuration expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('appending_values')) # ensure updated timestamp is different than created if not CONFIG.fake_mode: sleep(1) instance_info.dbaas.configurations.edit(configuration_info.id, values) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) @test(runs_after=[CreateConfigurations], groups=[GROUP, GROUP_CONFIG_DEFINE]) class AfterConfigurationsCreation(ConfigurationsTestBase): @test def test_assign_configuration_to_invalid_instance(self): # test assigning to an instance that does not exist invalid_id = "invalid-inst-id" try: instance_info.dbaas.instances.modify(invalid_id, configuration_info.id) except exceptions.NotFound: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 404) @test def test_assign_configuration_to_valid_instance(self): # test assigning a configuration to an instance print("instance_info.id: %s" % instance_info.id) print("configuration_info: %s" % configuration_info) print("configuration_info.id: %s" % configuration_info.id) config_id = configuration_info.id instance_info.dbaas.instances.modify(instance_info.id, configuration=config_id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) @test def test_assign_name_to_instance_using_patch(self): # test assigning a name to an instance new_name = 'new_name_1' report = CONFIG.get_report() report.log("instance_info.id: %s" % instance_info.id) report.log("instance name:%s" % instance_info.name) report.log("instance new name:%s" % new_name) instance_info.dbaas.instances.edit(instance_info.id, name=new_name) assert_equal(202, instance_info.dbaas.last_http_code) check = instance_info.dbaas.instances.get(instance_info.id) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal(check.name, new_name) # Restore instance name instance_info.dbaas.instances.edit(instance_info.id, name=instance_info.name) assert_equal(202, instance_info.dbaas.last_http_code) @test def test_assign_configuration_to_invalid_instance_using_patch(self): # test assign config group to an invalid instance invalid_id = "invalid-inst-id" assert_raises(exceptions.NotFound, instance_info.dbaas.instances.edit, invalid_id, configuration=configuration_info.id) @test(depends_on=[test_assign_configuration_to_valid_instance]) def test_assign_configuration_to_instance_with_config(self): # test assigning a configuration to an instance that # already has an assigned configuration config_id = configuration_info.id assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.modify, instance_info.id, configuration=config_id) @test(depends_on=[test_assign_configuration_to_valid_instance]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): # validate that the configuration was applied correctly to the instance print("instance_info.id: %s" % instance_info.id) inst = instance_info.dbaas.instances.get(instance_info.id) configuration_id = inst.configuration['id'] print("configuration_info: %s" % configuration_id) assert_not_equal(None, inst.configuration['id']) _test_configuration_is_applied_to_instance(instance_info, configuration_id) def test_configurations_get(self): # test that the instance shows up on the assigned configuration result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(configuration_info.id, result.id) assert_equal(configuration_info.name, result.name) assert_equal(configuration_info.description, result.description) # check the result field types with TypeCheck("configuration", result) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("description", basestring) check.has_field("values", dict) check.has_field("created", basestring) check.has_field("updated", basestring) check.has_field("instance_count", int) print(result.values) # check for valid timestamps assert_true(_is_valid_timestamp(result.created)) assert_true(_is_valid_timestamp(result.updated)) # check that created and updated timestamps differ, since # test_appending_to_existing_configuration should have changed the # updated timestamp if not CONFIG.fake_mode: assert_not_equal(result.created, result.updated) assert_equal(result.instance_count, 1) with CollectionCheck("configuration_values", result.values) as check: # check each item has the correct type according to the rules for (item_key, item_val) in result.values.iteritems(): print("item_key: %s" % item_key) print("item_val: %s" % item_val) dbaas = instance_info.dbaas param = dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, item_key) if param.type == 'integer': check.has_element(item_key, int) if param.type == 'string': check.has_element(item_key, basestring) if param.type == 'boolean': check.has_element(item_key, bool) # Test to make sure that another user is not able to GET this config reqs = Requirements(is_admin=False) test_auth_user = instance_info.user.auth_user other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user]) other_user_tenant_id = other_user.tenant_id client_tenant_id = instance_info.user.tenant_id if other_user_tenant_id == client_tenant_id: other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user, other_user]) print(other_user) print(other_user.__dict__) other_client = create_dbaas_client(other_user) assert_raises(exceptions.NotFound, other_client.configurations.get, configuration_info.id) @test(runs_after=[AfterConfigurationsCreation], groups=[GROUP, GROUP_CONFIG_DEFINE]) class ListConfigurations(ConfigurationsTestBase): @test def test_configurations_list(self): # test listing configurations show up result = instance_info.dbaas.configurations.list() for conf in result: with TypeCheck("Configuration", conf) as check: check.has_field('id', basestring) check.has_field('name', basestring) check.has_field('description', basestring) check.has_field('datastore_version_id', basestring) check.has_field('datastore_version_name', basestring) check.has_field('datastore_name', basestring) exists = [config for config in result if config.id == configuration_info.id] assert_equal(1, len(exists)) configuration = exists[0] assert_equal(configuration.id, configuration_info.id) assert_equal(configuration.name, configuration_info.name) assert_equal(configuration.description, configuration_info.description) @test def test_configurations_list_for_instance(self): # test getting an instance shows the configuration assigned shows up instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal(instance.configuration['id'], configuration_info.id) assert_equal(instance.configuration['name'], configuration_info.name) # expecting two things in links, href and bookmark assert_equal(2, len(instance.configuration['links'])) link = instance.configuration['links'][0] global configuration_href configuration_href = link['href'] @test def test_get_default_configuration_on_instance(self): # test the api call to get the default template of an instance exists result = instance_info.dbaas.instances.configuration(instance_info.id) global configuration_default configuration_default = result assert_not_equal(None, result.configuration) @test def test_changing_configuration_with_nondynamic_parameter(self): # test that changing a non-dynamic parameter is applied to instance # and show that the instance requires a restart expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('nondynamic_parameter')) instance_info.dbaas.configurations.update(configuration_info.id, values) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.configurations.get(configuration_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) @test(depends_on=[test_changing_configuration_with_nondynamic_parameter]) @time_out(20) def test_waiting_for_instance_in_restart_required(self): def result_is_not_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return False else: return True poll_until(result_is_not_active) instance = instance_info.dbaas.instances.get(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) print(instance.status) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_waiting_for_instance_in_restart_required]) def test_restart_service_should_return_active(self): # test that after restarting the instance it becomes active instance_info.dbaas.instances.restart(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_restart_service_should_return_active]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): # validate that the configuraiton was applied correctly to the instance inst = instance_info.dbaas.instances.get(instance_info.id) configuration_id = inst.configuration['id'] assert_not_equal(None, inst.configuration['id']) _test_configuration_is_applied_to_instance(instance_info, configuration_id) @test(depends_on=[test_configurations_list]) def test_compare_list_and_details_timestamps(self): # compare config timestamps between list and details calls result = instance_info.dbaas.configurations.list() list_config = [config for config in result if config.id == configuration_info.id] assert_equal(1, len(list_config)) details_config = instance_info.dbaas.configurations.get( configuration_info.id) assert_equal(list_config[0].created, details_config.created) assert_equal(list_config[0].updated, details_config.updated) @test(runs_after=[ListConfigurations], groups=[GROUP, GROUP_CONFIG_DEFINE]) class StartInstanceWithConfiguration(ConfigurationsTestBase): @test def test_start_instance_with_configuration(self): # test that a new instance will apply the configuration on create global configuration_instance databases = [] databases.append({"name": "firstdbconfig", "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": "db2"}) configuration_instance.databases = databases users = [] users.append({"name": "liteconf", "password": "liteconfpass", "databases": [{"name": "firstdbconfig"}]}) configuration_instance.users = users configuration_instance.name = "TEST_" + str(uuid.uuid4()) + "_config" flavor_href = instance_info.dbaas_flavor_href configuration_instance.dbaas_flavor_href = flavor_href configuration_instance.volume = instance_info.volume configuration_instance.dbaas_datastore = instance_info.dbaas_datastore configuration_instance.dbaas_datastore_version = \ instance_info.dbaas_datastore_version configuration_instance.nics = instance_info.nics result = instance_info.dbaas.instances.create( configuration_instance.name, configuration_instance.dbaas_flavor_href, configuration_instance.volume, configuration_instance.databases, configuration_instance.users, nics=configuration_instance.nics, availability_zone="nova", datastore=configuration_instance.dbaas_datastore, datastore_version=configuration_instance.dbaas_datastore_version, configuration=configuration_href) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal("BUILD", result.status) configuration_instance.id = result.id @test(depends_on_classes=[StartInstanceWithConfiguration], runs_after_groups=['dbaas.api.backups'], groups=[GROUP]) class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase): @test @time_out(TIMEOUT_INSTANCE_CREATE) def test_instance_with_configuration_active(self): # wait for the instance to become active def result_is_active(): instance = instance_info.dbaas.instances.get( configuration_instance.id) if instance.status == "ACTIVE": return True else: assert_equal("BUILD", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_instance_with_configuration_active]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): # validate that the configuration was applied correctly to the instance inst = instance_info.dbaas.instances.get(configuration_instance.id) configuration_id = inst.configuration['id'] assert_not_equal(None, inst.configuration['id']) _test_configuration_is_applied_to_instance(configuration_instance, configuration_id) @test(runs_after=[WaitForConfigurationInstanceToFinish], groups=[GROUP]) class DeleteConfigurations(ConfigurationsTestBase): @before_class def setUp(self): # need to store the parameter details that will be deleted config_param_name = sql_variables[1] instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, config_param_name) resp, body = instance_info.dbaas.client.last_response print(resp) print(body) self.config_parameter_dict = json.loads(body) @after_class(always_run=True) def tearDown(self): # need to "undelete" the parameter that was deleted from the mgmt call ds = instance_info.dbaas_datastore ds_v = instance_info.dbaas_datastore_version version = instance_info.dbaas.datastore_versions.get( ds, ds_v) client = instance_info.dbaas_admin.mgmt_configs print(self.config_parameter_dict) client.create(version.id, self.config_parameter_dict['name'], self.config_parameter_dict['restart_required'], self.config_parameter_dict['type'], self.config_parameter_dict['max'], self.config_parameter_dict['min']) @test def test_delete_invalid_configuration_not_found(self): # test deleting a configuration that does not exist throws exception invalid_configuration_id = "invalid-config-id" assert_raises(exceptions.NotFound, instance_info.dbaas.configurations.delete, invalid_configuration_id) @test(depends_on=[test_delete_invalid_configuration_not_found]) def test_delete_configuration_parameter_with_mgmt_api(self): # testing a param that is assigned to an instance can be deleted # and doesn't affect an unassign later. So we delete a parameter # that is used by a test (connect_timeout) ds = instance_info.dbaas_datastore ds_v = instance_info.dbaas_datastore_version version = instance_info.dbaas.datastore_versions.get( ds, ds_v) client = instance_info.dbaas_admin.mgmt_configs config_param_name = self.config_parameter_dict['name'] client.delete(version.id, config_param_name) assert_raises( exceptions.NotFound, instance_info.dbaas.configuration_parameters.get_parameter, ds, ds_v, config_param_name) @test(depends_on=[test_delete_configuration_parameter_with_mgmt_api]) def test_unable_delete_instance_configurations(self): # test deleting a configuration that is assigned to # an instance is not allowed. assert_raises(exceptions.BadRequest, instance_info.dbaas.configurations.delete, configuration_info.id) @test(depends_on=[test_unable_delete_instance_configurations]) @time_out(30) def test_unassign_configuration_from_instances(self): # test to unassign configuration from instance instance_info.dbaas.instances.modify(configuration_instance.id, configuration="") resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.instances.get(configuration_instance.id) # test that config group is not removed instance_info.dbaas.instances.modify(instance_info.id, configuration=None) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.instances.get(instance_info.id) def result_has_no_configuration(): instance = instance_info.dbaas.instances.get(inst_info.id) if hasattr(instance, 'configuration'): return False else: return True inst_info = instance_info poll_until(result_has_no_configuration) inst_info = configuration_instance poll_until(result_has_no_configuration) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_unassign_configuration_from_instances]) def test_assign_in_wrong_state(self): # test assigning a config to an instance in RESTART state assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.modify, configuration_instance.id, configuration=configuration_info.id) @test(depends_on=[test_assign_in_wrong_state]) def test_no_instances_on_configuration(self): # test there is no configuration on the instance after unassigning result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(configuration_info.id, result.id) assert_equal(configuration_info.name, result.name) assert_equal(configuration_info.description, result.description) assert_equal(result.instance_count, 0) print(configuration_instance.id) print(instance_info.id) @test(depends_on=[test_unassign_configuration_from_instances]) @time_out(120) def test_restart_service_after_unassign_return_active(self): def result_is_not_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return False else: return True poll_until(result_is_not_active) config = instance_info.dbaas.configurations.list() print(config) instance = instance_info.dbaas.instances.get(instance_info.id) print(instance.__dict__) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) print(instance.status) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_restart_service_after_unassign_return_active]) @time_out(120) def test_restart_service_should_return_active(self): # test that after restarting the instance it becomes active instance_info.dbaas.instances.restart(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_restart_service_should_return_active]) def test_assign_config_and_name_to_instance_using_patch(self): # test assigning a configuration and name to an instance new_name = 'new_name' report = CONFIG.get_report() report.log("instance_info.id: %s" % instance_info.id) report.log("configuration_info: %s" % configuration_info) report.log("configuration_info.id: %s" % configuration_info.id) report.log("instance name:%s" % instance_info.name) report.log("instance new name:%s" % new_name) saved_name = instance_info.name config_id = configuration_info.id instance_info.dbaas.instances.edit(instance_info.id, configuration=config_id, name=new_name) assert_equal(202, instance_info.dbaas.last_http_code) check = instance_info.dbaas.instances.get(instance_info.id) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal(check.name, new_name) # restore instance name instance_info.dbaas.instances.edit(instance_info.id, name=saved_name) assert_equal(202, instance_info.dbaas.last_http_code) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) # restart to be sure configuration is applied instance_info.dbaas.instances.restart(instance_info.id) assert_equal(202, instance_info.dbaas.last_http_code) sleep(2) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) # test assigning a configuration to an instance that # already has an assigned configuration with patch config_id = configuration_info.id assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.edit, instance_info.id, configuration=config_id) @test(runs_after=[test_assign_config_and_name_to_instance_using_patch]) def test_unassign_configuration_after_patch(self): # remove the configuration from the instance instance_info.dbaas.instances.edit(instance_info.id, remove_configuration=True) assert_equal(202, instance_info.dbaas.last_http_code) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) # restart to be sure configuration has been unassigned instance_info.dbaas.instances.restart(instance_info.id) assert_equal(202, instance_info.dbaas.last_http_code) sleep(2) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(result.instance_count, 0) @test def test_unassign_configuration_from_invalid_instance_using_patch(self): # test unassign config group from an invalid instance invalid_id = "invalid-inst-id" try: instance_info.dbaas.instances.edit(invalid_id, remove_configuration=True) except exceptions.NotFound: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 404) @test(runs_after=[test_unassign_configuration_after_patch]) def test_delete_unassigned_configuration(self): # test that we can delete the configuration after no instances are # assigned to it any longer instance_info.dbaas.configurations.delete(configuration_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) @test(depends_on=[test_delete_unassigned_configuration]) @time_out(TIMEOUT_INSTANCE_DELETE) def test_delete_configuration_instance(self): # test that we can delete the instance even though there is a # configuration applied to the instance instance_info.dbaas.instances.delete(configuration_instance.id) assert_equal(202, instance_info.dbaas.last_http_code) def instance_is_gone(): try: instance_info.dbaas.instances.get(configuration_instance.id) return False except exceptions.NotFound: return True poll_until(instance_is_gone) assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get, configuration_instance.id) trove-5.0.0/trove/tests/api/instances_delete.py0000664000567000056710000001427312701410316022753 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time from proboscis import after_class from proboscis import asserts from proboscis import before_class from proboscis.decorators import time_out from proboscis import test from troveclient.compat import exceptions from trove.common import cfg from trove.common.utils import poll_until from trove.tests.api.instances import instance_info from trove.tests.api.instances import VOLUME_SUPPORT from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements CONF = cfg.CONF class TestBase(object): def set_up(self): reqs = Requirements(is_admin=True) self.user = test_config.users.find_user(reqs) self.dbaas = create_dbaas_client(self.user) def create_instance(self, name, size=1): volume = None if VOLUME_SUPPORT: volume = {'size': size} result = self.dbaas.instances.create(name, instance_info.dbaas_flavor_href, volume, [], []) return result.id def wait_for_instance_status(self, instance_id, status="ACTIVE", acceptable_states=None): if acceptable_states: acceptable_states.append(status) def assert_state(instance): if acceptable_states: assert_true(instance.status in acceptable_states, "Invalid status: %s" % instance.status) return instance poll_until(lambda: self.dbaas.instances.get(instance_id), lambda instance: assert_state(instance).status == status, time_out=30, sleep_time=1) def wait_for_instance_task_status(self, instance_id, description): poll_until(lambda: self.dbaas.management.show(instance_id), lambda instance: instance.task_description == description, time_out=30, sleep_time=1) def is_instance_deleted(self, instance_id): while True: try: self.dbaas.instances.get(instance_id) except exceptions.NotFound: return True time.sleep(.5) def get_task_info(self, instance_id): instance = self.dbaas.management.show(instance_id) return instance.status, instance.task_description def delete_instance(self, instance_id, assert_deleted=True): instance = self.dbaas.instances.get(instance_id) instance.delete() if assert_deleted: asserts.assert_true(self.is_instance_deleted(instance_id)) def delete_errored_instance(self, instance_id): self.wait_for_instance_status(instance_id, 'ERROR') status, desc = self.get_task_info(instance_id) asserts.assert_equal(status, "ERROR") self.delete_instance(instance_id) @test(runs_after_groups=["services.initialize", "dbaas.guest.shutdown"], groups=['dbaas.api.instances.delete']) class ErroredInstanceDelete(TestBase): """ Test that an instance in an ERROR state is actually deleted when delete is called. """ @before_class def set_up_err(self): """Create some flawed instances.""" from trove.taskmanager.models import CONF self.old_dns_support = CONF.trove_dns_support CONF.trove_dns_support = False super(ErroredInstanceDelete, self).set_up() # Create an instance that fails during server prov. self.server_error = self.create_instance('test_SERVER_ERROR') if VOLUME_SUPPORT: # Create an instance that fails during volume prov. self.volume_error = self.create_instance('test_VOLUME_ERROR', size=9) else: self.volume_error = None # Create an instance that fails during DNS prov. # self.dns_error = self.create_instance('test_DNS_ERROR') # Create an instance that fails while it's been deleted the first time. self.delete_error = self.create_instance('test_ERROR_ON_DELETE') @after_class(always_run=True) def clean_up(self): from trove.taskmanager.models import CONF CONF.trove_dns_support = self.old_dns_support @test @time_out(30) def delete_server_error(self): self.delete_errored_instance(self.server_error) @test(enabled=VOLUME_SUPPORT) @time_out(30) def delete_volume_error(self): self.delete_errored_instance(self.volume_error) @test(enabled=False) @time_out(30) def delete_dns_error(self): self.delete_errored_instance(self.dns_error) @test @time_out(30) def delete_error_on_delete_instance(self): id = self.delete_error self.wait_for_instance_status(id, 'ACTIVE') self.wait_for_instance_task_status(id, 'No tasks for the instance.') instance = self.dbaas.management.show(id) asserts.assert_equal(instance.status, "ACTIVE") asserts.assert_equal(instance.task_description, 'No tasks for the instance.') # Try to delete the instance. This fails the first time due to how # the test fake is setup. self.delete_instance(id, assert_deleted=False) instance = self.dbaas.management.show(id) asserts.assert_equal(instance.status, "SHUTDOWN") asserts.assert_equal(instance.task_description, "Deleting the instance.") # Try a second time. This will succeed. self.delete_instance(id) trove-5.0.0/trove/tests/api/backups.py0000664000567000056710000005161112701410316021067 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis.asserts import assert_equal from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import fail from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove.common import cfg from trove.common import exception from trove.common.utils import generate_uuid from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import assert_unprocessable from trove.tests.api.instances import instance_info from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE from trove.tests.api.instances import WaitForGuestInstallationToFinish from trove.tests.config import CONFIG from trove.tests.util import create_dbaas_client from trove.tests.util.users import Requirements GROUP = "dbaas.api.backups" BACKUP_NAME = 'backup_test' BACKUP_DESC = 'test description' TIMEOUT_BACKUP_CREATE = 60 * 30 TIMEOUT_BACKUP_DELETE = 120 backup_info = None incremental_info = None incremental_db = generate_uuid() incremental_restore_instance_id = None total_num_dbs = 0 backup_count_prior_to_create = 0 backup_count_for_instance_prior_to_create = 0 @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP, tests.INSTANCES]) class CreateBackups(object): @test def test_backup_create_instance_invalid(self): """Test create backup with unknown instance.""" invalid_inst_id = 'invalid-inst-id' try: instance_info.dbaas.backups.create(BACKUP_NAME, invalid_inst_id, BACKUP_DESC) except exceptions.BadRequest as e: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 400) assert_equal(e.message, "Validation error: " "backup['instance'] u'%s' does not match " "'^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-" "([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-" "([0-9a-fA-F]){12}$'" % invalid_inst_id) @test def test_backup_create_instance_not_found(self): """Test create backup with unknown instance.""" assert_raises(exceptions.NotFound, instance_info.dbaas.backups.create, BACKUP_NAME, generate_uuid(), BACKUP_DESC) @test def test_backup_create_instance(self): """Test create backup for a given instance.""" # Necessary to test that the count increases. global backup_count_prior_to_create backup_count_prior_to_create = len(instance_info.dbaas.backups.list()) global backup_count_for_instance_prior_to_create backup_count_for_instance_prior_to_create = len( instance_info.dbaas.instances.backups(instance_info.id)) result = instance_info.dbaas.backups.create(BACKUP_NAME, instance_info.id, BACKUP_DESC) global backup_info backup_info = result assert_equal(BACKUP_NAME, result.name) assert_equal(BACKUP_DESC, result.description) assert_equal(instance_info.id, result.instance_id) assert_equal('NEW', result.status) instance = instance_info.dbaas.instances.get(instance_info.id) datastore_version = instance_info.dbaas.datastore_versions.get( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version) assert_equal('BACKUP', instance.status) assert_equal(instance_info.dbaas_datastore, result.datastore['type']) assert_equal(instance_info.dbaas_datastore_version, result.datastore['version']) assert_equal(datastore_version.id, result.datastore['version_id']) @test(runs_after=[CreateBackups], groups=[GROUP, tests.INSTANCES]) class AfterBackupCreation(object): @test def test_restore_instance_from_not_completed_backup(self): assert_raises(exceptions.Conflict, RestoreUsingBackup._restore, backup_info.id) assert_equal(409, instance_info.dbaas.last_http_code) @test def test_instance_action_right_after_backup_create(self): """Test any instance action while backup is running.""" assert_unprocessable(instance_info.dbaas.instances.resize_instance, instance_info.id, 1) @test def test_backup_create_another_backup_running(self): """Test create backup when another backup is running.""" assert_unprocessable(instance_info.dbaas.backups.create, 'backup_test2', instance_info.id, 'test description2') @test def test_backup_delete_still_running(self): """Test delete backup when it is running.""" result = instance_info.dbaas.backups.list() backup = result[0] assert_unprocessable(instance_info.dbaas.backups.delete, backup.id) class BackupRestoreMixin(): def verify_backup(self, backup_id): def result_is_active(): backup = instance_info.dbaas.backups.get(backup_id) if backup.status == "COMPLETED": return True else: assert_not_equal("FAILED", backup.status) return False poll_until(result_is_active) def instance_is_totally_gone(self, instance_id): def instance_is_gone(): try: instance_info.dbaas.instances.get( instance_id) return False except exceptions.NotFound: return True poll_until( instance_is_gone, time_out=TIMEOUT_INSTANCE_DELETE) def backup_is_totally_gone(self, backup_id): def backup_is_gone(): try: instance_info.dbaas.backups.get(backup_id) return False except exceptions.NotFound: return True poll_until(backup_is_gone, time_out=TIMEOUT_BACKUP_DELETE) def verify_instance_is_active(self, instance_id): # This version just checks the REST API status. def result_is_active(): instance = instance_info.dbaas.instances.get(instance_id) if instance.status == "ACTIVE": return True else: # If its not ACTIVE, anything but BUILD must be # an error. assert_equal("BUILD", instance.status) if instance_info.volume is not None: assert_equal(instance.volume.get('used', None), None) return False poll_until(result_is_active, sleep_time=5, time_out=TIMEOUT_INSTANCE_CREATE) @test(runs_after=[AfterBackupCreation], groups=[GROUP, tests.INSTANCES]) class WaitForBackupCreateToFinish(BackupRestoreMixin): """ Wait until the backup create is finished. """ @test @time_out(TIMEOUT_BACKUP_CREATE) def test_backup_created(self): # This version just checks the REST API status. self.verify_backup(backup_info.id) @test(depends_on=[WaitForBackupCreateToFinish], groups=[GROUP, tests.INSTANCES]) class ListBackups(object): @test def test_backup_list(self): """Test list backups.""" result = instance_info.dbaas.backups.list() assert_equal(backup_count_prior_to_create + 1, len(result)) backup = result[0] assert_equal(BACKUP_NAME, backup.name) assert_equal(BACKUP_DESC, backup.description) assert_not_equal(0.0, backup.size) assert_equal(instance_info.id, backup.instance_id) assert_equal('COMPLETED', backup.status) @test def test_backup_list_filter_datastore(self): """Test list backups and filter by datastore.""" result = instance_info.dbaas.backups.list( datastore=instance_info.dbaas_datastore) assert_equal(backup_count_prior_to_create + 1, len(result)) backup = result[0] assert_equal(BACKUP_NAME, backup.name) assert_equal(BACKUP_DESC, backup.description) assert_not_equal(0.0, backup.size) assert_equal(instance_info.id, backup.instance_id) assert_equal('COMPLETED', backup.status) @test def test_backup_list_filter_different_datastore(self): """Test list backups and filter by datastore.""" result = instance_info.dbaas.backups.list( datastore='Test_Datastore_1') # There should not be any backups for this datastore assert_equal(0, len(result)) @test def test_backup_list_filter_datastore_not_found(self): """Test list backups and filter by datastore.""" assert_raises(exceptions.NotFound, instance_info.dbaas.backups.list, datastore='NOT_FOUND') @test def test_backup_list_for_instance(self): """Test backup list for instance.""" result = instance_info.dbaas.instances.backups(instance_info.id) assert_equal(backup_count_for_instance_prior_to_create + 1, len(result)) backup = result[0] assert_equal(BACKUP_NAME, backup.name) assert_equal(BACKUP_DESC, backup.description) assert_not_equal(0.0, backup.size) assert_equal(instance_info.id, backup.instance_id) assert_equal('COMPLETED', backup.status) @test def test_backup_get(self): """Test get backup.""" backup = instance_info.dbaas.backups.get(backup_info.id) assert_equal(backup_info.id, backup.id) assert_equal(backup_info.name, backup.name) assert_equal(backup_info.description, backup.description) assert_equal(instance_info.id, backup.instance_id) assert_not_equal(0.0, backup.size) assert_equal('COMPLETED', backup.status) assert_equal(instance_info.dbaas_datastore, backup.datastore['type']) assert_equal(instance_info.dbaas_datastore_version, backup.datastore['version']) datastore_version = instance_info.dbaas.datastore_versions.get( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version) assert_equal(datastore_version.id, backup.datastore['version_id']) # Test to make sure that user in other tenant is not able # to GET this backup reqs = Requirements(is_admin=False) other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user]) other_client = create_dbaas_client(other_user) assert_raises(exceptions.NotFound, other_client.backups.get, backup_info.id) @test(runs_after=[ListBackups], depends_on=[WaitForBackupCreateToFinish], groups=[GROUP, tests.INSTANCES]) class IncrementalBackups(BackupRestoreMixin): @test def test_create_db(self): global total_num_dbs total_num_dbs = len(instance_info.dbaas.databases.list( instance_info.id)) databases = [{'name': incremental_db}] instance_info.dbaas.databases.create(instance_info.id, databases) assert_equal(202, instance_info.dbaas.last_http_code) total_num_dbs += 1 @test(runs_after=['test_create_db']) def test_create_incremental_backup(self): result = instance_info.dbaas.backups.create("incremental-backup", backup_info.instance_id, parent_id=backup_info.id) global incremental_info incremental_info = result assert_equal(202, instance_info.dbaas.last_http_code) # Wait for the backup to finish self.verify_backup(incremental_info.id) assert_equal(backup_info.id, incremental_info.parent_id) @test(groups=[GROUP, tests.INSTANCES]) class RestoreUsingBackup(object): @classmethod def _restore(cls, backup_ref): restorePoint = {"backupRef": backup_ref} result = instance_info.dbaas.instances.create( instance_info.name + "_restore", instance_info.dbaas_flavor_href, instance_info.volume, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version, restorePoint=restorePoint) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal("BUILD", result.status) return result.id @test(depends_on=[IncrementalBackups]) def test_restore_incremental(self): global incremental_restore_instance_id incremental_restore_instance_id = self._restore(incremental_info.id) @test(depends_on_classes=[WaitForGuestInstallationToFinish], runs_after_groups=['dbaas.api.configurations.define'], groups=[GROUP, tests.INSTANCES]) class WaitForRestoreToFinish(object): @classmethod def _poll(cls, instance_id_to_poll): """Shared "instance restored" test logic.""" # This version just checks the REST API status. def result_is_active(): instance = instance_info.dbaas.instances.get(instance_id_to_poll) if instance.status == "ACTIVE": return True else: # If its not ACTIVE, anything but BUILD must be # an error. assert_equal("BUILD", instance.status) if instance_info.volume is not None: assert_equal(instance.volume.get('used', None), None) return False poll_until(result_is_active, time_out=TIMEOUT_INSTANCE_CREATE, sleep_time=10) """ Wait until the instance is finished restoring from incremental backup. """ @test(depends_on=[RestoreUsingBackup.test_restore_incremental]) def test_instance_restored_incremental(self): try: self._poll(incremental_restore_instance_id) except exception.PollTimeOut: fail('Timed out') @test(enabled=(not CONFIG.fake_mode), groups=[GROUP, tests.INSTANCES]) class VerifyRestore(object): @classmethod def _poll(cls, instance_id, db): def db_is_found(): databases = instance_info.dbaas.databases.list(instance_id) if db in [d.name for d in databases]: return True else: return False poll_until(db_is_found, time_out=60 * 10, sleep_time=10) @test(depends_on=[WaitForRestoreToFinish. test_instance_restored_incremental]) def test_database_restored_incremental(self): try: self._poll(incremental_restore_instance_id, incremental_db) assert_equal(total_num_dbs, len(instance_info.dbaas.databases.list( incremental_restore_instance_id))) except exception.PollTimeOut: fail('Timed out') @test(groups=[GROUP, tests.INSTANCES]) class DeleteRestoreInstance(object): @classmethod def _delete(cls, instance_id): """Test delete restored instance.""" instance_info.dbaas.instances.delete(instance_id) assert_equal(202, instance_info.dbaas.last_http_code) def instance_is_gone(): try: instance_info.dbaas.instances.get(instance_id) return False except exceptions.NotFound: return True poll_until(instance_is_gone, time_out=TIMEOUT_INSTANCE_DELETE) assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get, instance_id) @test(runs_after=[VerifyRestore.test_database_restored_incremental]) def test_delete_restored_instance_incremental(self): try: self._delete(incremental_restore_instance_id) except exception.PollTimeOut: fail('Timed out') @test(runs_after=[DeleteRestoreInstance], groups=[GROUP, tests.INSTANCES]) class DeleteBackups(object): @test def test_backup_delete_not_found(self): """Test delete unknown backup.""" assert_raises(exceptions.NotFound, instance_info.dbaas.backups.delete, 'nonexistent_backup') @test def test_backup_delete_other(self): """Test another user cannot delete backup.""" # Test to make sure that user in other tenant is not able # to DELETE this backup reqs = Requirements(is_admin=False) other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user]) other_client = create_dbaas_client(other_user) assert_raises(exceptions.NotFound, other_client.backups.delete, backup_info.id) @test(runs_after=[test_backup_delete_other]) def test_backup_delete(self): """Test backup deletion.""" instance_info.dbaas.backups.delete(backup_info.id) assert_equal(202, instance_info.dbaas.last_http_code) def backup_is_gone(): try: instance_info.dbaas.backups.get(backup_info.id) return False except exceptions.NotFound: return True poll_until(backup_is_gone, time_out=TIMEOUT_BACKUP_DELETE) @test(runs_after=[test_backup_delete]) def test_incremental_deleted(self): """Test backup children are deleted.""" if incremental_info is None: raise SkipTest("Incremental Backup not created") assert_raises(exceptions.NotFound, instance_info.dbaas.backups.get, incremental_info.id) @test(depends_on=[WaitForGuestInstallationToFinish], runs_after=[DeleteBackups]) class FakeTestHugeBackupOnSmallInstance(BackupRestoreMixin): report = CONFIG.get_report() def tweak_fake_guest(self, size): from trove.tests.fakes import guestagent guestagent.BACKUP_SIZE = size @test def test_load_mysql_with_data(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") self.tweak_fake_guest(1.9) @test(depends_on=[test_load_mysql_with_data]) def test_create_huge_backup(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") self.new_backup = instance_info.dbaas.backups.create( BACKUP_NAME, instance_info.id, BACKUP_DESC) assert_equal(202, instance_info.dbaas.last_http_code) @test(depends_on=[test_create_huge_backup]) def test_verify_huge_backup_completed(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") self.verify_backup(self.new_backup.id) @test(depends_on=[test_verify_huge_backup_completed]) def test_try_to_restore_on_small_instance_with_volume(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") assert_raises(exceptions.Forbidden, instance_info.dbaas.instances.create, instance_info.name + "_restore", instance_info.dbaas_flavor_href, {'size': 1}, datastore=instance_info.dbaas_datastore, datastore_version=(instance_info. dbaas_datastore_version), restorePoint={"backupRef": self.new_backup.id}) assert_equal(403, instance_info.dbaas.last_http_code) @test(depends_on=[test_verify_huge_backup_completed]) def test_try_to_restore_on_small_instance_with_flavor_only(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") self.orig_conf_value = cfg.CONF.get( instance_info.dbaas_datastore).volume_support cfg.CONF.get(instance_info.dbaas_datastore).volume_support = False assert_raises(exceptions.Forbidden, instance_info.dbaas.instances.create, instance_info.name + "_restore", 11, datastore=instance_info.dbaas_datastore, datastore_version=(instance_info. dbaas_datastore_version), restorePoint={"backupRef": self.new_backup.id}) assert_equal(403, instance_info.dbaas.last_http_code) cfg.CONF.get( instance_info.dbaas_datastore ).volume_support = self.orig_conf_value trove-5.0.0/trove/tests/api/header.py0000664000567000056710000000365312701410316020672 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from functools import wraps from proboscis import SkipTest from proboscis import test from troveclient.compat.client import TroveHTTPClient from trove.tests.api.versions import Versions @test(groups=['dbaas.api.headers']) def must_work_with_blank_accept_headers(): """Test to make sure that trove works without the headers.""" versions = Versions() versions.setUp() client = versions.client if type(client.client).morph_request != TroveHTTPClient.morph_request: raise SkipTest("Not using the JSON client so can't execute this test.") original_morph_request = client.client.morph_request def morph_content_type_to(content_type): @wraps(original_morph_request) def _morph_request(kwargs): original_morph_request(kwargs) kwargs['headers']['Accept'] = content_type kwargs['headers']['Content-Type'] = content_type client.client.morph_request = _morph_request try: morph_content_type_to('') # run versions to make sure the API still returns JSON even though the # header type is blank versions.test_list_versions_index() finally: client.client.morph_request = original_morph_request trove-5.0.0/trove/tests/api/instances_actions.py0000664000567000056710000006767312701410316023165 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from proboscis import after_class from proboscis import asserts from proboscis import before_class from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from sqlalchemy import exc as sqlalchemy_exc from sqlalchemy.sql.expression import text from troveclient.compat.exceptions import BadRequest from troveclient.compat.exceptions import HTTPNotImplemented from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import assert_unprocessable from trove.tests.api.instances import EPHEMERAL_SUPPORT from trove.tests.api.instances import GROUP as INSTANCE_GROUP from trove.tests.api.instances import GROUP_START from trove.tests.api.instances import instance_info from trove.tests.api.instances import VOLUME_SUPPORT from trove.tests.config import CONFIG import trove.tests.util as testsutil from trove.tests.util.check import Checker from trove.tests.util.check import TypeCheck from trove.tests.util import LocalSqlClient from trove.tests.util.server_connection import create_server_connection GROUP = "dbaas.api.instances.actions" GROUP_REBOOT = "dbaas.api.instances.actions.reboot" GROUP_RESTART = "dbaas.api.instances.actions.restart" GROUP_RESIZE = "dbaas.api.instances.actions.resize.instance" GROUP_STOP_MYSQL = "dbaas.api.instances.actions.stop" MYSQL_USERNAME = "test_user" MYSQL_PASSWORD = "abcde" # stored in test conf SERVICE_ID = '123' FAKE_MODE = CONFIG.fake_mode # If true, then we will actually log into the database. USE_IP = not FAKE_MODE # If true, then we will actually search for the process USE_LOCAL_OVZ = CONFIG.use_local_ovz class MySqlConnection(object): def __init__(self, host): self.host = host def connect(self): """Connect to MySQL database.""" print("Connecting to MySQL, mysql --host %s -u %s -p%s" % (self.host, MYSQL_USERNAME, MYSQL_PASSWORD)) sql_engine = LocalSqlClient.init_engine(MYSQL_USERNAME, MYSQL_PASSWORD, self.host) self.client = LocalSqlClient(sql_engine, use_flush=False) def is_connected(self): try: with self.client: self.client.execute(text("""SELECT "Hello.";""")) return True except (sqlalchemy_exc.OperationalError, sqlalchemy_exc.DisconnectionError, sqlalchemy_exc.TimeoutError): return False except Exception as ex: print("EX WAS:") print(type(ex)) print(ex) raise ex TIME_OUT_TIME = 15 * 60 USER_WAS_DELETED = False class ActionTestBase(object): """Has some helpful functions for testing actions. The test user must be created for some of these functions to work. """ def set_up(self): """If you're using this as a base class, call this method first.""" self.dbaas = instance_info.dbaas if USE_IP: address = instance_info.get_address() self.connection = MySqlConnection(address) @property def instance(self): return self.dbaas.instances.get(self.instance_id) @property def instance_address(self): return instance_info.get_address() @property def instance_id(self): return instance_info.id def create_user(self): """Create a MySQL user we can use for this test.""" users = [{"name": MYSQL_USERNAME, "password": MYSQL_PASSWORD, "databases": [{"name": MYSQL_USERNAME}]}] self.dbaas.users.create(instance_info.id, users) def has_user(): users = self.dbaas.users.list(instance_info.id) return any([user.name == MYSQL_USERNAME for user in users]) poll_until(has_user, time_out=30) if not FAKE_MODE: time.sleep(5) def ensure_mysql_is_running(self): """Make sure MySQL is accessible before restarting.""" with Checker() as check: if USE_IP: self.connection.connect() check.true(self.connection.is_connected(), "Able to connect to MySQL.") self.proc_id = self.find_mysql_proc_on_instance() check.true(self.proc_id is not None, "MySQL process can not be found.") instance = self.instance check.false(instance is None) check.equal(instance.status, "ACTIVE") def find_mysql_proc_on_instance(self): server = create_server_connection(self.instance_id) cmd = "ps acux | grep mysqld " \ "| grep -v mysqld_safe | awk '{print $2}'" stdout, stderr = server.execute(cmd) try: return int(stdout) except ValueError: return None def log_current_users(self): users = self.dbaas.users.list(self.instance_id) CONFIG.get_report().log("Current user count = %d" % len(users)) for user in users: CONFIG.get_report().log("\t" + str(user)) def _build_expected_msg(self): expected = { 'instance_size': instance_info.dbaas_flavor.ram, 'tenant_id': instance_info.user.tenant_id, 'instance_id': instance_info.id, 'instance_name': instance_info.name, 'created_at': testsutil.iso_time( instance_info.initial_result.created), 'launched_at': testsutil.iso_time(self.instance.updated), 'modify_at': testsutil.iso_time(self.instance.updated) } return expected @test(depends_on_groups=[GROUP_START]) def create_user(): """Create a test user so that subsequent tests can log in.""" helper = ActionTestBase() helper.set_up() if USE_IP: try: helper.create_user() except BadRequest: pass # Ignore this if the user already exists. helper.connection.connect() asserts.assert_true(helper.connection.is_connected(), "Test user must be able to connect to MySQL.") class RebootTestBase(ActionTestBase): """Tests restarting MySQL.""" def call_reboot(self): raise NotImplementedError() def wait_for_broken_connection(self): """Wait until our connection breaks.""" if not USE_IP: return if not hasattr(self, "connection"): return poll_until(self.connection.is_connected, lambda connected: not connected, time_out=TIME_OUT_TIME) def wait_for_successful_restart(self): """Wait until status becomes running.""" def is_finished_rebooting(): instance = self.instance if instance.status == "REBOOT": return False asserts.assert_equal("ACTIVE", instance.status) return True poll_until(is_finished_rebooting, time_out=TIME_OUT_TIME) def assert_mysql_proc_is_different(self): if not USE_IP: return new_proc_id = self.find_mysql_proc_on_instance() asserts.assert_not_equal(new_proc_id, self.proc_id, "MySQL process ID should be different!") def successful_restart(self): """Restart MySQL via the REST API successfully.""" self.fix_mysql() self.call_reboot() self.wait_for_broken_connection() self.wait_for_successful_restart() self.assert_mysql_proc_is_different() def mess_up_mysql(self): """Ruin MySQL's ability to restart.""" server = create_server_connection(self.instance_id) cmd = "sudo cp /dev/null /var/lib/mysql/data/ib_logfile%d" instance_info.dbaas_admin.management.stop(self.instance_id) for index in range(2): server.execute(cmd % index) def fix_mysql(self): """Fix MySQL's ability to restart.""" if not FAKE_MODE: server = create_server_connection(self.instance_id) cmd = "sudo rm /var/lib/mysql/data/ib_logfile%d" # We want to stop mysql so that upstart does not keep trying to # respawn it and block the guest agent from accessing the logs. instance_info.dbaas_admin.management.stop(self.instance_id) for index in range(2): server.execute(cmd % index) def wait_for_failure_status(self): """Wait until status becomes running.""" def is_finished_rebooting(): instance = self.instance if instance.status == "REBOOT" or instance.status == "ACTIVE": return False # The reason we check for BLOCKED as well as SHUTDOWN is because # Upstart might try to bring mysql back up after the borked # connection and the guest status can be either asserts.assert_true(instance.status in ("SHUTDOWN", "BLOCKED")) return True poll_until(is_finished_rebooting, time_out=TIME_OUT_TIME) def unsuccessful_restart(self): """Restart MySQL via the REST when it should fail, assert it does.""" assert not FAKE_MODE self.mess_up_mysql() self.call_reboot() self.wait_for_broken_connection() self.wait_for_failure_status() def restart_normally(self): """Fix iblogs and reboot normally.""" self.fix_mysql() self.test_successful_restart() @test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_RESTART], depends_on_groups=[GROUP_START], depends_on=[create_user]) class RestartTests(RebootTestBase): """Tests restarting MySQL.""" def call_reboot(self): self.instance.restart() asserts.assert_equal(202, self.dbaas.last_http_code) @before_class def test_set_up(self): self.set_up() @test def test_ensure_mysql_is_running(self): """Make sure MySQL is accessible before restarting.""" self.ensure_mysql_is_running() @test(depends_on=[test_ensure_mysql_is_running], enabled=not FAKE_MODE) def test_unsuccessful_restart(self): """Restart MySQL via the REST when it should fail, assert it does.""" if FAKE_MODE: raise SkipTest("Cannot run this in fake mode.") self.unsuccessful_restart() @test(depends_on=[test_set_up], runs_after=[test_ensure_mysql_is_running, test_unsuccessful_restart]) def test_successful_restart(self): """Restart MySQL via the REST API successfully.""" self.successful_restart() @test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_STOP_MYSQL], depends_on_groups=[GROUP_START], depends_on=[create_user]) class StopTests(RebootTestBase): """Tests which involve stopping MySQL.""" def call_reboot(self): self.instance.restart() @before_class def test_set_up(self): self.set_up() @test def test_ensure_mysql_is_running(self): """Make sure MySQL is accessible before restarting.""" self.ensure_mysql_is_running() @test(depends_on=[test_ensure_mysql_is_running]) def test_stop_mysql(self): """Stops MySQL.""" instance_info.dbaas_admin.management.stop(self.instance_id) self.wait_for_broken_connection() self.wait_for_failure_status() @test(depends_on=[test_stop_mysql]) def test_instance_get_shows_volume_info_while_mysql_is_down(self): """ Confirms the get call behaves appropriately while an instance is down. """ if not VOLUME_SUPPORT: raise SkipTest("Not testing volumes.") instance = self.dbaas.instances.get(self.instance_id) with TypeCheck("instance", instance) as check: check.has_field("volume", dict) check.true('size' in instance.volume) check.true('used' in instance.volume) check.true(isinstance(instance.volume.get('size', None), int)) check.true(isinstance(instance.volume.get('used', None), float)) @test(depends_on=[test_set_up], runs_after=[test_instance_get_shows_volume_info_while_mysql_is_down]) def test_successful_restart_when_in_shutdown_state(self): """Restart MySQL via the REST API successfully when MySQL is down.""" self.successful_restart() @test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_REBOOT], depends_on_groups=[GROUP_START], depends_on=[RestartTests, create_user]) class RebootTests(RebootTestBase): """Tests restarting instance.""" def call_reboot(self): instance_info.dbaas_admin.management.reboot(self.instance_id) @before_class def test_set_up(self): self.set_up() asserts.assert_true(hasattr(self, 'dbaas')) asserts.assert_true(self.dbaas is not None) @test def test_ensure_mysql_is_running(self): """Make sure MySQL is accessible before restarting.""" self.ensure_mysql_is_running() @test(depends_on=[test_ensure_mysql_is_running]) def test_unsuccessful_restart(self): """Restart MySQL via the REST when it should fail, assert it does.""" if FAKE_MODE: raise SkipTest("Cannot run this in fake mode.") self.unsuccessful_restart() @after_class(depends_on=[test_set_up]) def test_successful_restart(self): """Restart MySQL via the REST API successfully.""" if FAKE_MODE: raise SkipTest("Cannot run this in fake mode.") self.successful_restart() @test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_RESIZE], depends_on_groups=[GROUP_START], depends_on=[create_user], runs_after=[RebootTests]) class ResizeInstanceTest(ActionTestBase): """ Integration Test cases for resize instance """ @property def flavor_id(self): return instance_info.dbaas_flavor_href def get_flavor_href(self, flavor_id=2): res = instance_info.dbaas.find_flavor_and_self_href(flavor_id) dbaas_flavor, dbaas_flavor_href = res return dbaas_flavor_href def wait_for_resize(self): def is_finished_resizing(): instance = self.instance if instance.status == "RESIZE": return False asserts.assert_equal("ACTIVE", instance.status) return True poll_until(is_finished_resizing, time_out=TIME_OUT_TIME) @before_class def setup(self): self.set_up() if USE_IP: self.connection.connect() asserts.assert_true(self.connection.is_connected(), "Should be able to connect before resize.") self.user_was_deleted = False @test def test_instance_resize_same_size_should_fail(self): asserts.assert_raises(BadRequest, self.dbaas.instances.resize_instance, self.instance_id, self.flavor_id) @test(enabled=VOLUME_SUPPORT) def test_instance_resize_to_ephemeral_in_volume_support_should_fail(self): flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name', 'eph.rd-smaller') flavors = self.dbaas.find_flavors_by_name(flavor_name) def is_active(): return self.instance.status == 'ACTIVE' poll_until(is_active, time_out=TIME_OUT_TIME) asserts.assert_equal(self.instance.status, 'ACTIVE') self.get_flavor_href( flavor_id=self.expected_old_flavor_id) asserts.assert_raises(HTTPNotImplemented, self.dbaas.instances.resize_instance, self.instance_id, flavors[0].id) @test(enabled=EPHEMERAL_SUPPORT) def test_instance_resize_to_non_ephemeral_flavor_should_fail(self): flavor_name = CONFIG.values.get('instance_bigger_flavor_name', 'm1-small') flavors = self.dbaas.find_flavors_by_name(flavor_name) asserts.assert_raises(BadRequest, self.dbaas.instances.resize_instance, self.instance_id, flavors[0].id) def obtain_flavor_ids(self): old_id = self.instance.flavor['id'] self.expected_old_flavor_id = old_id res = instance_info.dbaas.find_flavor_and_self_href(old_id) self.expected_dbaas_flavor, _dontcare_ = res if EPHEMERAL_SUPPORT: flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name', 'eph.rd-smaller') else: flavor_name = CONFIG.values.get('instance_bigger_flavor_name', 'm1.small') flavors = self.dbaas.find_flavors_by_name(flavor_name) asserts.assert_equal(len(flavors), 1, "Number of flavors with name '%s' " "found was '%d'." % (flavor_name, len(flavors))) flavor = flavors[0] self.old_dbaas_flavor = instance_info.dbaas_flavor instance_info.dbaas_flavor = flavor asserts.assert_true(flavor is not None, "Flavor '%s' not found!" % flavor_name) flavor_href = self.dbaas.find_flavor_self_href(flavor) asserts.assert_true(flavor_href is not None, "Flavor href '%s' not found!" % flavor_name) self.expected_new_flavor_id = flavor.id @test(depends_on=[test_instance_resize_same_size_should_fail]) def test_status_changed_to_resize(self): self.log_current_users() self.obtain_flavor_ids() self.dbaas.instances.resize_instance( self.instance_id, self.get_flavor_href(flavor_id=self.expected_new_flavor_id)) asserts.assert_equal(202, self.dbaas.last_http_code) # (WARNING) IF THE RESIZE IS WAY TOO FAST THIS WILL FAIL assert_unprocessable( self.dbaas.instances.resize_instance, self.instance_id, self.get_flavor_href(flavor_id=self.expected_new_flavor_id)) @test(depends_on=[test_status_changed_to_resize]) @time_out(TIME_OUT_TIME) def test_instance_returns_to_active_after_resize(self): self.wait_for_resize() @test(depends_on=[test_instance_returns_to_active_after_resize, test_status_changed_to_resize], groups=["dbaas.usage"]) def test_resize_instance_usage_event_sent(self): expected = self._build_expected_msg() expected['old_instance_size'] = self.old_dbaas_flavor.ram instance_info.consumer.check_message(instance_info.id, 'trove.instance.modify_flavor', **expected) @test(depends_on=[test_instance_returns_to_active_after_resize], runs_after=[test_resize_instance_usage_event_sent]) def resize_should_not_delete_users(self): """Resize should not delete users.""" # Resize has an incredibly weird bug where users are deleted after # a resize. The code below is an attempt to catch this while proceeding # with the rest of the test (note the use of runs_after). if USE_IP: self.connection.connect() if not self.connection.is_connected(): # Ok, this is def. a failure, but before we toss up an error # lets recreate to see how far we can get. CONFIG.get_report().log( "Having to recreate the test_user! Resizing killed it!") self.log_current_users() self.create_user() asserts.fail( "Somehow, the resize made the test user disappear.") @test(depends_on=[test_instance_returns_to_active_after_resize], runs_after=[resize_should_not_delete_users]) def test_make_sure_mysql_is_running_after_resize(self): self.ensure_mysql_is_running() @test(depends_on=[test_instance_returns_to_active_after_resize], runs_after=[test_make_sure_mysql_is_running_after_resize]) def test_instance_has_new_flavor_after_resize(self): actual = self.get_flavor_href(self.instance.flavor['id']) expected = self.get_flavor_href(flavor_id=self.expected_new_flavor_id) asserts.assert_equal(actual, expected) @test(depends_on=[test_instance_has_new_flavor_after_resize]) @time_out(TIME_OUT_TIME) def test_resize_down(self): expected_dbaas_flavor = self.expected_dbaas_flavor def is_active(): return self.instance.status == 'ACTIVE' poll_until(is_active, time_out=TIME_OUT_TIME) asserts.assert_equal(self.instance.status, 'ACTIVE') old_flavor_href = self.get_flavor_href( flavor_id=self.expected_old_flavor_id) self.dbaas.instances.resize_instance(self.instance_id, old_flavor_href) asserts.assert_equal(202, self.dbaas.last_http_code) self.old_dbaas_flavor = instance_info.dbaas_flavor instance_info.dbaas_flavor = expected_dbaas_flavor self.wait_for_resize() asserts.assert_equal(str(self.instance.flavor['id']), str(self.expected_old_flavor_id)) @test(depends_on=[test_resize_down], groups=["dbaas.usage"]) def test_resize_instance_down_usage_event_sent(self): expected = self._build_expected_msg() expected['old_instance_size'] = self.old_dbaas_flavor.ram instance_info.consumer.check_message(instance_info.id, 'trove.instance.modify_flavor', **expected) @test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP + ".resize.instance"], depends_on_groups=[GROUP_START], depends_on=[create_user], runs_after=[RebootTests, ResizeInstanceTest]) def resize_should_not_delete_users(): if USER_WAS_DELETED: asserts.fail("Somehow, the resize made the test user disappear.") @test(runs_after=[ResizeInstanceTest], depends_on=[create_user], groups=[GROUP, tests.INSTANCES, INSTANCE_GROUP, GROUP_RESIZE], enabled=VOLUME_SUPPORT) class ResizeInstanceVolume(ActionTestBase): """Resize the volume of the instance.""" @before_class def setUp(self): self.set_up() self.old_volume_size = int(instance_info.volume['size']) self.new_volume_size = self.old_volume_size + 1 self.old_volume_fs_size = instance_info.get_volume_filesystem_size() # Create some databases to check they still exist after the resize self.expected_dbs = ['salmon', 'halibut'] databases = [] for name in self.expected_dbs: databases.append({"name": name}) instance_info.dbaas.databases.create(instance_info.id, databases) @test @time_out(60) def test_volume_resize(self): instance_info.dbaas.instances.resize_volume(instance_info.id, self.new_volume_size) @test(depends_on=[test_volume_resize]) @time_out(300) def test_volume_resize_success(self): def check_resize_status(): instance = instance_info.dbaas.instances.get(instance_info.id) if instance.status == "ACTIVE": return True elif instance.status == "RESIZE": return False else: asserts.fail("Status should not be %s" % instance.status) poll_until(check_resize_status, sleep_time=2, time_out=300) instance = instance_info.dbaas.instances.get(instance_info.id) asserts.assert_equal(instance.volume['size'], self.new_volume_size) @test(depends_on=[test_volume_resize_success]) def test_volume_filesystem_resize_success(self): # The get_volume_filesystem_size is a mgmt call through the guestagent # and the volume resize occurs through the fake nova-volume. # Currently the guestagent fakes don't have access to the nova fakes so # it doesn't know that a volume resize happened and to what size so # we can't fake the filesystem size. if FAKE_MODE: raise SkipTest("Cannot run this in fake mode.") new_volume_fs_size = instance_info.get_volume_filesystem_size() asserts.assert_true(self.old_volume_fs_size < new_volume_fs_size) # The total filesystem size is not going to be exactly the same size of # cinder volume but it should round to it. (e.g. round(1.9) == 2) asserts.assert_equal(round(new_volume_fs_size), self.new_volume_size) @test(depends_on=[test_volume_resize_success], groups=["dbaas.usage"]) def test_resize_volume_usage_event_sent(self): expected = self._build_expected_msg() expected['volume_size'] = self.new_volume_size expected['old_volume_size'] = self.old_volume_size instance_info.consumer.check_message(instance_info.id, 'trove.instance.modify_volume', **expected) @test @time_out(300) def test_volume_resize_success_databases(self): databases = instance_info.dbaas.databases.list(instance_info.id) db_list = [] for database in databases: db_list.append(database.name) for name in self.expected_dbs: if name not in db_list: asserts.fail( "Database %s was not found after the volume resize. " "Returned list: %s" % (name, databases)) # This tests the ability of the guest to upgrade itself. # It is necessarily tricky because we need to be able to upload a new copy of # the guest into an apt-repo in the middle of the test. # "guest-update-test" is where the knowledge of how to do this is set in the # test conf. If it is not specified this test never runs. UPDATE_GUEST_CONF = CONFIG.values.get("guest-update-test", None) @test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP + ".update_guest"], depends_on=[create_user], depends_on_groups=[GROUP_START]) class UpdateGuest(object): def get_version(self): info = instance_info.dbaas_admin.diagnostics.get(instance_info.id) return info.version @before_class(enabled=UPDATE_GUEST_CONF is not None) def check_version_is_old(self): """Make sure we have the old version before proceeding.""" self.old_version = self.get_version() self.next_version = UPDATE_GUEST_CONF["next-version"] asserts.assert_not_equal(self.old_version, self.next_version) @test(enabled=UPDATE_GUEST_CONF is not None) def upload_update_to_repo(self): cmds = UPDATE_GUEST_CONF["install-repo-cmd"] testsutil.execute(*cmds, run_as_root=True, root_helper="sudo") @test(enabled=UPDATE_GUEST_CONF is not None, depends_on=[upload_update_to_repo]) def update_and_wait_to_finish(self): instance_info.dbaas_admin.management.update(instance_info.id) def finished(): current_version = self.get_version() if current_version == self.next_version: return True # The only valid thing for it to be aside from next_version is # old version. asserts.assert_equal(current_version, self.old_version) poll_until(finished, sleep_time=1, time_out=3 * 60) @test(enabled=UPDATE_GUEST_CONF is not None, depends_on=[upload_update_to_repo]) @time_out(30) def update_again(self): """Test the wait time of a pointless update.""" instance_info.dbaas_admin.management.update(instance_info.id) # Make sure this isn't taking too long. instance_info.dbaas_admin.diagnostics.get(instance_info.id) trove-5.0.0/trove/tests/api/users.py0000664000567000056710000004562612701410316020611 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from six.moves.urllib import parse as urllib_parse from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.asserts import fail from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api.databases import TestDatabases from trove.tests.api.databases import TestMysqlAccess from trove.tests.api.instances import instance_info from trove.tests import util from trove.tests.util import test_config GROUP = "dbaas.api.users" FAKE = test_config.values['fake_mode'] @test(depends_on_classes=[TestMysqlAccess], groups=[tests.DBAAS_API, GROUP, tests.INSTANCES], runs_after=[TestDatabases]) class TestUsers(object): """ Test the creation and deletion of users """ username = "tes!@#tuser" password = "testpa$^%ssword" username1 = "anous*&^er" password1 = "anopas*?.sword" db1 = "usersfirstdb" db2 = "usersseconddb" created_users = [username, username1] system_users = ['root', 'debian_sys_maint'] def __init__(self): self.dbaas = util.create_dbaas_client(instance_info.user) self.dbaas_admin = util.create_dbaas_client(instance_info.admin_user) @before_class def setUp(self): databases = [{"name": self.db1, "character_set": "latin2", "collate": "latin2_general_ci"}, {"name": self.db2}] try: self.dbaas.databases.create(instance_info.id, databases) except exceptions.BadRequest as e: if "Validation error" in e.message: raise e if not FAKE: time.sleep(5) @after_class def tearDown(self): self.dbaas.databases.delete(instance_info.id, self.db1) self.dbaas.databases.delete(instance_info.id, self.db2) @test() def test_delete_nonexistent_user(self): assert_raises(exceptions.NotFound, self.dbaas.users.delete, instance_info.id, "thisuserDNE") assert_equal(404, self.dbaas.last_http_code) @test() def test_create_users(self): users = [] users.append({"name": self.username, "password": self.password, "databases": [{"name": self.db1}]}) users.append({"name": self.username1, "password": self.password1, "databases": [{"name": self.db1}, {"name": self.db2}]}) self.dbaas.users.create(instance_info.id, users) assert_equal(202, self.dbaas.last_http_code) # Do we need this? if not FAKE: time.sleep(5) self.check_database_for_user(self.username, self.password, [self.db1]) self.check_database_for_user(self.username1, self.password1, [self.db1, self.db2]) @test(depends_on=[test_create_users]) def test_create_users_list(self): # tests for users that should be listed users = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) found = False for user in self.created_users: for result in users: if user == result.name: found = True assert_true(found, "User '%s' not found in result" % user) found = False @test(depends_on=[test_create_users]) def test_fails_when_creating_user_twice(self): users = [] users.append({"name": self.username, "password": self.password, "databases": [{"name": self.db1}]}) users.append({"name": self.username1, "password": self.password1, "databases": [{"name": self.db1}, {"name": self.db2}]}) assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) assert_equal(400, self.dbaas.last_http_code) @test(depends_on=[test_create_users_list]) def test_cannot_create_root_user(self): # Tests that the user root (in Config:ignore_users) cannot be created. users = [{"name": "root", "password": "12345", "databases": [{"name": self.db1}]}] assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) @test(depends_on=[test_create_users_list]) def test_get_one_user(self): user = self.dbaas.users.get(instance_info.id, username=self.username, hostname='%') assert_equal(200, self.dbaas.last_http_code) assert_equal(user.name, self.username) assert_equal(1, len(user.databases)) for db in user.databases: assert_equal(db["name"], self.db1) self.check_database_for_user(self.username, self.password, [self.db1]) @test(depends_on=[test_create_users_list]) def test_create_users_list_system(self): # tests for users that should not be listed users = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) for user in self.system_users: found = any(result.name == user for result in users) msg = "User '%s' SHOULD NOT BE found in result" % user assert_false(found, msg) @test(depends_on=[test_create_users_list], runs_after=[test_fails_when_creating_user_twice]) def test_delete_users(self): self.dbaas.users.delete(instance_info.id, self.username, hostname='%') assert_equal(202, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, self.username1, hostname='%') assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) self._check_connection(self.username, self.password) self._check_connection(self.username1, self.password1) @test(depends_on=[test_create_users_list, test_delete_users]) def test_hostnames_default_if_not_present(self): # These tests rely on test_delete_users as they create users only # they use. username = "testuser_nohost" user = {"name": username, "password": "password", "databases": []} self.dbaas.users.create(instance_info.id, [user]) user["host"] = "%" # Can't create the user a second time if it already exists. assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, [user]) self.dbaas.users.delete(instance_info.id, username) @test(depends_on=[test_create_users_list, test_delete_users]) def test_hostnames_make_users_unique(self): # These tests rely on test_delete_users as they create users only # they use. username = "testuser_unique" hostnames = ["192.168.0.1", "192.168.0.2"] users = [{"name": username, "password": "password", "databases": [], "host": hostname} for hostname in hostnames] # Nothing wrong with creating two users with the same name, so long # as their hosts are different. self.dbaas.users.create(instance_info.id, users) for hostname in hostnames: self.dbaas.users.delete(instance_info.id, username, hostname=hostname) @test() def test_updateduser_newname_host_unique(self): # The updated_username@hostname should not exist already users = [] old_name = "testuser1" hostname = "192.168.0.1" users.append({"name": old_name, "password": "password", "host": hostname, "databases": []}) users.append({"name": "testuser2", "password": "password", "host": hostname, "databases": []}) self.dbaas.users.create(instance_info.id, users) user_new = {"name": "testuser2"} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, old_name, user_new, hostname) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, old_name, hostname=hostname) self.dbaas.users.delete(instance_info.id, "testuser2", hostname=hostname) @test() def test_updateduser_name_newhost_unique(self): # The username@updated_hostname should not exist already users = [] username = "testuser" hostname1 = "192.168.0.1" hostname2 = "192.168.0.2" users.append({"name": username, "password": "password", "host": hostname1, "databases": []}) users.append({"name": username, "password": "password", "host": hostname2, "databases": []}) self.dbaas.users.create(instance_info.id, users) user_new = {"host": "192.168.0.2"} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, user_new, hostname1) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, username, hostname=hostname1) self.dbaas.users.delete(instance_info.id, username, hostname=hostname2) @test() def test_updateduser_newname_newhost_unique(self): # The updated_username@updated_hostname should not exist already users = [] username = "testuser1" hostname1 = "192.168.0.1" hostname2 = "192.168.0.2" users.append({"name": username, "password": "password", "host": hostname1, "databases": []}) users.append({"name": "testuser2", "password": "password", "host": hostname2, "databases": []}) self.dbaas.users.create(instance_info.id, users) user_new = {"name": "testuser2", "host": "192.168.0.2"} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, user_new, hostname1) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, username, hostname=hostname1) self.dbaas.users.delete(instance_info.id, "testuser2", hostname=hostname2) @test() def test_updateduser_newhost_invalid(self): # Ensure invalid hostnames/usernames aren't allowed to enter the system users = [] username = "testuser1" hostname1 = "192.168.0.1" users.append({"name": username, "password": "password", "host": hostname1, "databases": []}) self.dbaas.users.create(instance_info.id, users) hostname1 = hostname1.replace('.', '%2e') assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, {"host": "badjuju"}, hostname1) assert_equal(400, self.dbaas.last_http_code) assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, {"name": " bad username "}, hostname1) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, username, hostname=hostname1) @test() def test_cannot_change_rootpassword(self): # Cannot change password for a root user user_new = {"password": "12345"} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, "root", user_new) @test() def test_updateuser_emptyhost(self): # Cannot update the user hostname with an empty string users = [] username = "testuser1" hostname = "192.168.0.1" users.append({"name": username, "password": "password", "host": hostname, "databases": []}) self.dbaas.users.create(instance_info.id, users) user_new = {"host": ""} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, user_new, hostname) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, username, hostname=hostname) @test(depends_on=[test_create_users]) def test_hostname_ipv4_restriction(self): # By default, user hostnames are required to be % or IPv4 addresses. user = {"name": "ipv4_nodice", "password": "password", "databases": [], "host": "disallowed_host"} assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, [user]) def show_databases(self, user, password): print("Going to connect to %s, %s, %s" % (instance_info.get_address(), user, password)) with util.mysql_connection().create(instance_info.get_address(), user, password) as db: print(db) dbs = db.execute("show databases") return [row['Database'] for row in dbs] def check_database_for_user(self, user, password, dbs): if not FAKE: # Make the real call to the database to check things. actual_list = self.show_databases(user, password) for db in dbs: assert_true( db in actual_list, "No match for db %s in dblist. %s :(" % (db, actual_list)) # Confirm via API list. result = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) for item in result: if item.name == user: break else: fail("User %s not added to collection." % user) # Confirm via API get. result = self.dbaas.users.get(instance_info.id, user, '%') assert_equal(200, self.dbaas.last_http_code) if result.name != user: fail("User %s not found via get." % user) @test def test_username_too_long(self): users = [{"name": "1233asdwer345tyg56", "password": self.password, "database": self.db1}] assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) assert_equal(400, self.dbaas.last_http_code) @test def test_invalid_username(self): users = [] users.append({"name": "user,", "password": self.password, "database": self.db1}) assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) assert_equal(400, self.dbaas.last_http_code) @test(enabled=False) # TODO(hub_cap): Make this test work once python-routes is updated, # if ever. def test_delete_user_with_period_in_name(self): """Attempt to create/destroy a user with a period in its name.""" users = [] username_with_period = "user.name" users.append({"name": username_with_period, "password": self.password, "databases": [{"name": self.db1}]}) self.dbaas.users.create(instance_info.id, users) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) self.check_database_for_user(username_with_period, self.password, [self.db1]) self.dbaas.users.delete(instance_info.id, username_with_period) assert_equal(202, self.dbaas.last_http_code) @test def test_invalid_password(self): users = [{"name": "anouser", "password": "sdf,;", "database": self.db1}] assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) assert_equal(400, self.dbaas.last_http_code) @test def test_pagination(self): users = [] users.append({"name": "Jetson", "password": "george", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Jetson", "password": "george", "host": "127.0.0.1", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Spacely", "password": "cosmo", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Spacely", "password": "cosmo", "host": "127.0.0.1", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Uniblab", "password": "fired", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Uniblab", "password": "fired", "host": "192.168.0.10", "databases": [{"name": "Sprockets"}]}) self.dbaas.users.create(instance_info.id, users) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) limit = 2 users = self.dbaas.users.list(instance_info.id, limit=limit) assert_equal(200, self.dbaas.last_http_code) marker = users.next # Better get only as many as we asked for assert_true(len(users) <= limit) assert_true(users.next is not None) expected_marker = "%s@%s" % (users[-1].name, users[-1].host) expected_marker = urllib_parse.quote(expected_marker) assert_equal(marker, expected_marker) marker = users.next # I better get new users if I use the marker I was handed. users = self.dbaas.users.list(instance_info.id, limit=limit, marker=marker) assert_equal(200, self.dbaas.last_http_code) assert_true(marker not in [user.name for user in users]) # Now fetch again with a larger limit. users = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) assert_true(users.next is None) def _check_connection(self, username, password): if not FAKE: util.mysql_connection().assert_fails(instance_info.get_address(), username, password) # Also determine the db is gone via API. result = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) for item in result: if item.name == username: fail("User %s was not deleted." % username) trove-5.0.0/trove/tests/api/root_on_create.py0000664000567000056710000001167112701410320022436 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_true from proboscis import before_class from proboscis import test from trove.common import cfg from trove.common.utils import poll_until from trove import tests from trove.tests.api.databases import TestMysqlAccess from trove.tests.api.instances import instance_info from trove.tests.api.users import TestUsers from trove.tests import util CONF = cfg.CONF GROUP = "dbaas.api.root.oncreate" @test(depends_on_classes=[TestMysqlAccess], runs_after=[TestUsers], groups=[tests.DBAAS_API, GROUP, tests.INSTANCES]) class TestRootOnCreate(object): """ Test 'CONF.root_on_create', which if True, creates the root user upon database instance initialization. """ root_enabled_timestamp = 'Never' instance_id = None def create_instance(self): result = self.dbaas.instances.create( instance_info.name, instance_info.dbaas_flavor_href, instance_info.volume, instance_info.databases, instance_info.users, nics=instance_info.nics, availability_zone="nova", datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version) assert_equal(200, self.dbaas.last_http_code) new_id = result.id def result_is_active(): instance = self.dbaas.instances.get(new_id) if instance.status == "ACTIVE": return True else: assert_equal("BUILD", instance.status) poll_until(result_is_active) if 'password' in result._info: self.dbaas.root.create(new_id) return new_id @before_class def setUp(self): self.orig_conf_value = CONF.get( instance_info.dbaas_datastore).root_on_create CONF.get(instance_info.dbaas_datastore).root_on_create = True self.dbaas = util.create_dbaas_client(instance_info.user) self.dbaas_admin = util.create_dbaas_client(instance_info.admin_user) self.history = self.dbaas_admin.management.root_enabled_history self.enabled = self.dbaas.root.is_root_enabled self.instance_id = self.create_instance() @after_class def tearDown(self): CONF.get(instance_info. dbaas_datastore).root_on_create = self.orig_conf_value instance = self.dbaas.instances.get(self.instance_id) instance.delete() @test def test_root_on_create(self): """Test that root is enabled after instance creation.""" enabled = self.enabled(self.instance_id).rootEnabled assert_equal(200, self.dbaas.last_http_code) assert_true(enabled) @test(depends_on=[test_root_on_create]) def test_history_after_root_on_create(self): """Test that the timestamp in the root enabled history is set.""" self.root_enabled_timestamp = self.history(self.instance_id).enabled assert_equal(200, self.dbaas.last_http_code) assert_not_equal(self.root_enabled_timestamp, 'Never') @test(depends_on=[test_history_after_root_on_create]) def test_reset_root(self): """Test that root reset does not alter the timestamp.""" orig_timestamp = self.root_enabled_timestamp self.dbaas.root.create(self.instance_id) assert_equal(200, self.dbaas.last_http_code) self.root_enabled_timestamp = self.history(self.instance_id).enabled assert_equal(200, self.dbaas.last_http_code) assert_equal(orig_timestamp, self.root_enabled_timestamp) @test(depends_on=[test_reset_root]) def test_root_still_enabled(self): """Test that after root was reset, it's still enabled.""" enabled = self.enabled(self.instance_id).rootEnabled assert_equal(200, self.dbaas.last_http_code) assert_true(enabled) @test(depends_on=[test_root_still_enabled]) def test_root_disable(self): """ After root disable ensure the the history enabled flag is still enabled. """ self.dbaas.root.delete(self.instance_id) assert_equal(200, self.dbaas.last_http_code) enabled = self.enabled(self.instance_id).rootEnabled assert_equal(200, self.dbaas.last_http_code) assert_true(enabled) trove-5.0.0/trove/tests/api/datastores.py0000664000567000056710000001663712701410316021621 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nose.tools import assert_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.util.check import TypeCheck from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.datastores" NAME = "nonexistent" @test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES], depends_on_groups=["services.initialize"]) class Datastores(object): @before_class def setUp(self): rd_user = test_config.users.find_user( Requirements(is_admin=False, services=["trove"])) rd_admin = test_config.users.find_user( Requirements(is_admin=True, services=["trove"])) self.rd_client = create_dbaas_client(rd_user) self.rd_admin = create_dbaas_client(rd_admin) @test def test_datastore_list_attrs(self): datastores = self.rd_client.datastores.list() for datastore in datastores: with TypeCheck('Datastore', datastore) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("links", list) check.has_field("versions", list) @test def test_datastore_get(self): # Test get by name datastore_by_name = self.rd_client.datastores.get( test_config.dbaas_datastore) with TypeCheck('Datastore', datastore_by_name) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("links", list) assert_equal(datastore_by_name.name, test_config.dbaas_datastore) # test get by id datastore_by_id = self.rd_client.datastores.get( datastore_by_name.id) with TypeCheck('Datastore', datastore_by_id) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("links", list) check.has_field("versions", list) assert_equal(datastore_by_id.id, datastore_by_name.id) @test def test_datastore_not_found(self): try: assert_raises(exceptions.NotFound, self.rd_client.datastores.get, NAME) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore '%s' cannot be found." % NAME) @test def test_datastore_with_no_active_versions_is_hidden(self): datastores = self.rd_client.datastores.list() name_list = [datastore.name for datastore in datastores] name_no_versions = test_config.dbaas_datastore_name_no_versions assert_true(name_no_versions not in name_list) @test def test_datastore_with_no_active_versions_is_visible_for_admin(self): datastores = self.rd_admin.datastores.list() name_list = [datastore.name for datastore in datastores] name_no_versions = test_config.dbaas_datastore_name_no_versions assert_true(name_no_versions in name_list) @test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES], depends_on_groups=["services.initialize"]) class DatastoreVersions(object): @before_class def setUp(self): rd_user = test_config.users.find_user( Requirements(is_admin=False, services=["trove"])) self.rd_client = create_dbaas_client(rd_user) self.datastore_active = self.rd_client.datastores.get( test_config.dbaas_datastore) self.datastore_version_active = self.rd_client.datastore_versions.list( self.datastore_active.id)[0] @test def test_datastore_version_list_attrs(self): versions = self.rd_client.datastore_versions.list( self.datastore_active.name) for version in versions: with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("links", list) @test def test_datastore_version_get_attrs(self): version = self.rd_client.datastore_versions.get( self.datastore_active.name, self.datastore_version_active.name) with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("datastore", basestring) check.has_field("links", list) assert_equal(version.name, self.datastore_version_active.name) @test def test_datastore_version_get_by_uuid_attrs(self): version = self.rd_client.datastore_versions.get_by_uuid( self.datastore_version_active.id) with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("datastore", basestring) check.has_field("links", list) assert_equal(version.name, self.datastore_version_active.name) @test def test_datastore_version_not_found(self): try: assert_raises(exceptions.NotFound, self.rd_client.datastore_versions.get, self.datastore_active.name, NAME) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore version '%s' cannot be found." % NAME) @test def test_datastore_version_list_by_uuid(self): versions = self.rd_client.datastore_versions.list( self.datastore_active.id) for version in versions: with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("links", list) @test def test_datastore_version_get_by_uuid(self): version = self.rd_client.datastore_versions.get( self.datastore_active.id, self.datastore_version_active.id) with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("datastore", basestring) check.has_field("links", list) assert_equal(version.name, self.datastore_version_active.name) @test def test_datastore_version_invalid_uuid(self): try: self.rd_client.datastore_versions.get_by_uuid( self.datastore_version_active.id) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore version '%s' cannot be found." % test_config.dbaas_datastore_version) trove-5.0.0/trove/tests/api/instances_resize.py0000664000567000056710000002446412701410316023015 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from mox3 import mox from novaclient.exceptions import BadRequest from novaclient.v2.servers import Server from oslo_messaging._drivers.common import RPCException from proboscis import test from testtools import TestCase from trove.common.exception import PollTimeOut from trove.common import instance as rd_instance from trove.common import template from trove.common import utils from trove.datastore.models import DatastoreVersion from trove.guestagent import api as guest from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.instance.tasks import InstanceTasks from trove.taskmanager import models as models from trove.tests.fakes import nova from trove.tests.unittests import trove_testtools from trove.tests.util import test_config GROUP = 'dbaas.api.instances.resize' OLD_FLAVOR_ID = 1 NEW_FLAVOR_ID = 2 OLD_FLAVOR = nova.FLAVORS.get(OLD_FLAVOR_ID) NEW_FLAVOR = nova.FLAVORS.get(NEW_FLAVOR_ID) class ResizeTestBase(TestCase): def _init(self): self.mock = mox.Mox() self.instance_id = 500 context = trove_testtools.TroveTestContext(self) self.db_info = DBInstance.create( name="instance", flavor_id=OLD_FLAVOR_ID, tenant_id=999, volume_size=None, datastore_version_id=test_config.dbaas_datastore_version_id, task_status=InstanceTasks.RESIZING) self.server = self.mock.CreateMock(Server) self.instance = models.BuiltInstanceTasks( context, self.db_info, self.server, datastore_status=InstanceServiceStatus.create( instance_id=self.db_info.id, status=rd_instance.ServiceStatuses.RUNNING)) self.instance.server.flavor = {'id': OLD_FLAVOR_ID} self.guest = self.mock.CreateMock(guest.API) self.instance._guest = self.guest self.instance.refresh_compute_server_info = lambda: None self.instance._refresh_datastore_status = lambda: None self.mock.StubOutWithMock(self.instance, 'update_db') self.mock.StubOutWithMock(self.instance, 'set_datastore_status_to_paused') self.poll_until_mocked = False self.action = None def tearDown(self): super(ResizeTestBase, self).tearDown() self.mock.UnsetStubs() self.db_info.delete() def _execute_action(self): self.instance.update_db(task_status=InstanceTasks.NONE) self.mock.ReplayAll() excs = (Exception) self.assertRaises(excs, self.action.execute) self.mock.VerifyAll() def _stop_db(self, reboot=True): self.guest.stop_db(do_not_start_on_reboot=reboot) self.instance.datastore_status.status = ( rd_instance.ServiceStatuses.SHUTDOWN) def _server_changes_to(self, new_status, new_flavor_id): def change(): self.server.status = new_status self.instance.server.flavor['id'] = new_flavor_id if not self.poll_until_mocked: self.mock.StubOutWithMock(utils, "poll_until") self.poll_until_mocked = True utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120)\ .WithSideEffects(lambda ignore, sleep_time, time_out: change()) def _nova_resizes_successfully(self): self.server.resize(NEW_FLAVOR_ID) self._server_changes_to("VERIFY_RESIZE", NEW_FLAVOR_ID) @test(groups=[GROUP, GROUP + '.resize']) class ResizeTests(ResizeTestBase): def setUp(self): super(ResizeTests, self).setUp() self._init() # By the time flavor objects pass over amqp to the # resize action they have been turned into dicts self.action = models.ResizeAction(self.instance, OLD_FLAVOR.__dict__, NEW_FLAVOR.__dict__) def _start_mysql(self): datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'mysql' datastore.name = 'mysql-5.6' datastore.manager = 'mysql' config = template.SingleInstanceConfigTemplate( datastore, NEW_FLAVOR.__dict__, self.instance.id) self.instance.guest.start_db_with_conf_changes(config.render()) def test_guest_wont_stop_mysql(self): self.guest.stop_db(do_not_start_on_reboot=True)\ .AndRaise(RPCException("Could not stop MySQL!")) def test_nova_wont_resize(self): self._stop_db() self.server.resize(NEW_FLAVOR_ID).AndRaise(BadRequest) self.server.status = "ACTIVE" self.guest.restart() self._execute_action() def test_nova_resize_timeout(self): self._stop_db() self.server.resize(NEW_FLAVOR_ID) self.mock.StubOutWithMock(utils, 'poll_until') utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120)\ .AndRaise(PollTimeOut) self._execute_action() def test_nova_doesnt_change_flavor(self): self._stop_db() self.server.resize(NEW_FLAVOR_ID) self._server_changes_to("VERIFY_RESIZE", OLD_FLAVOR_ID) utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self.instance.guest.reset_configuration(mox.IgnoreArg()) self.instance.server.revert_resize() self._server_changes_to("ACTIVE", OLD_FLAVOR_ID) self.guest.restart() self._execute_action() def test_nova_resize_fails(self): self._stop_db() self.server.resize(NEW_FLAVOR_ID) self._server_changes_to("ERROR", OLD_FLAVOR_ID) self._execute_action() def test_nova_resizes_in_weird_state(self): self._stop_db() self.server.resize(NEW_FLAVOR_ID) self._server_changes_to("ACTIVE", NEW_FLAVOR_ID) self.guest.restart() self._execute_action() def test_guest_is_not_okay(self): self._stop_db() self._nova_resizes_successfully() utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self.instance.set_datastore_status_to_paused() self.instance.datastore_status.status = ( rd_instance.ServiceStatuses.PAUSED) utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120)\ .AndRaise(PollTimeOut) self.instance.guest.reset_configuration(mox.IgnoreArg()) self.instance.server.revert_resize() self._server_changes_to("ACTIVE", OLD_FLAVOR_ID) self.guest.restart() self._execute_action() def test_mysql_is_not_okay(self): self._stop_db() self._nova_resizes_successfully() utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self.instance.set_datastore_status_to_paused() self.instance.datastore_status.status = ( rd_instance.ServiceStatuses.SHUTDOWN) utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self._start_mysql() utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120).AndRaise(PollTimeOut) self.instance.guest.reset_configuration(mox.IgnoreArg()) self.instance.server.revert_resize() self._server_changes_to("ACTIVE", OLD_FLAVOR_ID) self.guest.restart() self._execute_action() def test_confirm_resize_fails(self): self._stop_db() self._nova_resizes_successfully() utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self.instance.set_datastore_status_to_paused() self.instance.datastore_status.status = ( rd_instance.ServiceStatuses.RUNNING) utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self._start_mysql() utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self.server.status = "SHUTDOWN" self.instance.server.confirm_resize() self._execute_action() def test_revert_nova_fails(self): self._stop_db() self._nova_resizes_successfully() utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self.instance.set_datastore_status_to_paused() self.instance.datastore_status.status = ( rd_instance.ServiceStatuses.PAUSED) utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120).AndRaise(PollTimeOut) self.instance.guest.reset_configuration(mox.IgnoreArg()) self.instance.server.revert_resize() self._server_changes_to("ERROR", OLD_FLAVOR_ID) self._execute_action() @test(groups=[GROUP, GROUP + '.migrate']) class MigrateTests(ResizeTestBase): def setUp(self): super(MigrateTests, self).setUp() self._init() self.action = models.MigrateAction(self.instance) def _execute_action(self): self.instance.update_db(task_status=InstanceTasks.NONE) self.mock.ReplayAll() self.assertIsNone(self.action.execute()) self.mock.VerifyAll() def _start_mysql(self): self.guest.restart() def test_successful_migrate(self): self.mock.StubOutWithMock(self.instance.server, 'migrate') self._stop_db() self.server.migrate(force_host=None) self._server_changes_to("VERIFY_RESIZE", NEW_FLAVOR_ID) utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self.instance.set_datastore_status_to_paused() self.instance.datastore_status.status = ( rd_instance.ServiceStatuses.RUNNING) utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self._start_mysql() utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self.instance.server.confirm_resize() self._execute_action() trove-5.0.0/trove/tests/int_tests.py0000664000567000056710000002056012701410316020701 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import proboscis from trove.tests.api import backups from trove.tests.api import configurations from trove.tests.api import databases from trove.tests.api import datastores from trove.tests.api import flavors from trove.tests.api import instances from trove.tests.api import instances_actions from trove.tests.api.mgmt import accounts from trove.tests.api.mgmt import admin_required from trove.tests.api.mgmt import datastore_versions from trove.tests.api.mgmt import hosts from trove.tests.api.mgmt import instances as mgmt_instances from trove.tests.api.mgmt import storage from trove.tests.api import replication from trove.tests.api import root from trove.tests.api import user_access from trove.tests.api import users from trove.tests.api import versions from trove.tests.scenario.groups import backup_group from trove.tests.scenario.groups import cluster_actions_group from trove.tests.scenario.groups import configuration_group from trove.tests.scenario.groups import database_actions_group from trove.tests.scenario.groups import guest_log_group from trove.tests.scenario.groups import instance_actions_group from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups import instance_delete_group from trove.tests.scenario.groups import module_group from trove.tests.scenario.groups import negative_cluster_actions_group from trove.tests.scenario.groups import replication_group from trove.tests.scenario.groups import root_actions_group from trove.tests.scenario.groups import user_actions_group GROUP_SERVICES_INITIALIZE = "services.initialize" GROUP_SETUP = 'dbaas.setup' def build_group(*groups): def merge(collection, *items): for item in items: if isinstance(item, list): merge(collection, *item) else: if item not in collection: collection.append(item) out = [] merge(out, *groups) return out def register(datastores, *test_groups): proboscis.register(groups=build_group(datastores), depends_on_groups=build_group(*test_groups)) black_box_groups = [ flavors.GROUP, users.GROUP, user_access.GROUP, databases.GROUP, root.GROUP, GROUP_SERVICES_INITIALIZE, instances.GROUP_START, instances.GROUP_QUOTAS, instances.GROUP_SECURITY_GROUPS, backups.GROUP, replication.GROUP, configurations.GROUP, datastores.GROUP, instances_actions.GROUP_RESIZE, # TODO(SlickNik): The restart tests fail intermittently so pulling # them out of the blackbox group temporarily. Refer to Trove bug: # https://bugs.launchpad.net/trove/+bug/1204233 # instances_actions.GROUP_RESTART, instances_actions.GROUP_STOP_MYSQL, instances.GROUP_STOP, versions.GROUP, instances.GROUP_GUEST, datastore_versions.GROUP, ] proboscis.register(groups=["blackbox", "mysql"], depends_on_groups=black_box_groups) simple_black_box_groups = [ GROUP_SERVICES_INITIALIZE, flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, admin_required.GROUP, datastore_versions.GROUP, ] proboscis.register(groups=["simple_blackbox"], depends_on_groups=simple_black_box_groups) black_box_mgmt_groups = [ accounts.GROUP, hosts.GROUP, storage.GROUP, instances_actions.GROUP_REBOOT, admin_required.GROUP, mgmt_instances.GROUP, datastore_versions.GROUP, ] proboscis.register(groups=["blackbox_mgmt"], depends_on_groups=black_box_mgmt_groups) # # Group designations for datastore agnostic int-tests # # Base groups for all other groups base_groups = [ GROUP_SERVICES_INITIALIZE, flavors.GROUP, versions.GROUP, GROUP_SETUP ] # Cluster-based groups cluster_actions_groups = list(base_groups) cluster_actions_groups.extend([cluster_actions_group.GROUP, negative_cluster_actions_group.GROUP]) # Single-instance based groups instance_create_groups = list(base_groups) instance_create_groups.extend([instance_create_group.GROUP, instance_delete_group.GROUP]) backup_groups = list(instance_create_groups) backup_groups.extend([backup_group.GROUP]) configuration_groups = list(instance_create_groups) configuration_groups.extend([configuration_group.GROUP]) database_actions_groups = list(instance_create_groups) database_actions_groups.extend([database_actions_group.GROUP]) guest_log_groups = list(instance_create_groups) guest_log_groups.extend([guest_log_group.GROUP]) instance_actions_groups = list(instance_create_groups) instance_actions_groups.extend([instance_actions_group.GROUP]) instance_module_groups = list(instance_create_groups) instance_module_groups.extend([module_group.GROUP_INSTANCE_MODULE]) module_groups = list(instance_create_groups) module_groups.extend([module_group.GROUP]) module_create_groups = list(base_groups) module_create_groups.extend([module_group.GROUP_MODULE_CREATE, module_group.GROUP_MODULE_DELETE]) replication_groups = list(instance_create_groups) replication_groups.extend([replication_group.GROUP]) root_actions_groups = list(instance_create_groups) root_actions_groups.extend([root_actions_group.GROUP]) user_actions_groups = list(instance_create_groups) user_actions_groups.extend([user_actions_group.GROUP]) # groups common to all datastores common_groups = list(instance_actions_groups) common_groups.extend([guest_log_groups, module_groups]) # Register: Component based groups register(["backup"], backup_groups) register(["cluster"], cluster_actions_groups) register(["configuration"], configuration_groups) register(["database"], database_actions_groups) register(["guest_log"], guest_log_groups) register(["instance", "instance_actions"], instance_actions_groups) register(["instance_create"], instance_create_groups) register(["instance_module"], instance_module_groups) register(["module"], module_groups) register(["module_create"], module_create_groups) register(["replication"], replication_groups) register(["root"], root_actions_groups) register(["user"], user_actions_groups) # Register: Datastore based groups # These should contain all functionality currently supported by the datastore register(["db2_supported"], common_groups, database_actions_groups, user_actions_groups) register(["cassandra_supported"], common_groups, user_actions_groups, database_actions_groups, backup_groups, configuration_groups, cluster_actions_groups) register(["couchbase_supported"], common_groups, backup_groups, root_actions_groups) register(["couchdb_supported"], common_groups, backup_groups, user_actions_groups, database_actions_groups, root_actions_groups) register(["postgresql_supported"], common_groups, backup_groups, database_actions_groups, configuration_groups, root_actions_groups, user_actions_groups) register(["mysql_supported", "percona_supported"], common_groups, backup_groups, configuration_groups, database_actions_groups, replication_groups, root_actions_groups, user_actions_groups) register(["mariadb_supported"], common_groups, backup_groups, cluster_actions_groups, configuration_groups, database_actions_groups, replication_groups, root_actions_groups, user_actions_groups) register(["mongodb_supported"], common_groups, backup_groups, cluster_actions_groups, configuration_groups, database_actions_groups, root_actions_groups, user_actions_groups) register(["pxc_supported"], common_groups, cluster_actions_groups, root_actions_groups) register(["redis_supported"], common_groups, backup_groups, replication_groups, cluster_actions_groups) register(["vertica_supported"], common_groups, cluster_actions_groups, root_actions_groups, configuration_groups) trove-5.0.0/trove/tests/scenario/0000775000567000056710000000000012701410521020111 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/scenario/__init__.py0000664000567000056710000000000012701410316022212 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/scenario/runners/0000775000567000056710000000000012701410521021605 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/scenario/runners/user_actions_runners.py0000664000567000056710000003743012701410316026442 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves.urllib import parse as urllib_parse from proboscis import SkipTest from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class UserActionsRunner(TestRunner): # TODO(pmalik): I believe the 202 (Accepted) should be replaced by # 200 (OK) as the actions are generally very fast and their results # available immediately upon execution of the request. This would # likely require replacing GA casts with calls which I believe are # more appropriate anyways. def __init__(self): super(UserActionsRunner, self).__init__() self.user_defs = [] @property def first_user_def(self): if self.user_defs: return self.user_defs[0] raise SkipTest("No valid user definitions provided.") def run_users_create(self, expected_http_code=202): users = self.test_helper.get_valid_user_definitions() if users: self.user_defs = self.assert_users_create( self.instance_info.id, users, expected_http_code) else: raise SkipTest("No valid user definitions provided.") def assert_users_create(self, instance_id, serial_users_def, expected_http_code): self.auth_client.users.create(instance_id, serial_users_def) self.assert_client_code(expected_http_code) return serial_users_def def run_user_show(self, expected_http_code=200): for user_def in self.user_defs: self.assert_user_show( self.instance_info.id, user_def, expected_http_code) def assert_user_show(self, instance_id, expected_user_def, expected_http_code): user_name = expected_user_def['name'] user_host = expected_user_def.get('host') queried_user = self.auth_client.users.get( instance_id, user_name, user_host) self.assert_client_code(expected_http_code) self._assert_user_matches(queried_user, expected_user_def) def _assert_user_matches(self, user, expected_user_def): user_name = expected_user_def['name'] self.assert_equal(expected_user_def['name'], user.name, "Mismatch of names for user: %s" % user_name) self.assert_list_elements_equal( expected_user_def['databases'], user.databases, "Mismatch of databases for user: %s" % user_name) def run_users_list(self, expected_http_code=200): self.assert_users_list( self.instance_info.id, self.user_defs, expected_http_code) def assert_users_list(self, instance_id, expected_user_defs, expected_http_code, limit=2): full_list = self.auth_client.users.list(instance_id) self.assert_client_code(expected_http_code) listed_users = {user.name: user for user in full_list} self.assert_is_none(full_list.next, "Unexpected pagination in the list.") for user_def in expected_user_defs: user_name = user_def['name'] self.assert_true( user_name in listed_users, "User not included in the 'user-list' output: %s" % user_name) self._assert_user_matches(listed_users[user_name], user_def) # Check that the system (ignored) users are not included in the output. system_users = self.get_system_users() self.assert_false( any(name in listed_users for name in system_users), "System users should not be included in the 'user-list' output.") # Test list pagination. list_page = self.auth_client.users.list(instance_id, limit=limit) self.assert_client_code(expected_http_code) self.assert_true(len(list_page) <= limit) if len(full_list) > limit: self.assert_is_not_none(list_page.next, "List page is missing.") else: self.assert_is_none(list_page.next, "An extra page in the list.") marker = list_page.next self.assert_pagination_match(list_page, full_list, 0, limit) if marker: last_user = list_page[-1] expected_marker = self.as_pagination_marker(last_user) self.assert_equal(expected_marker, marker, "Pagination marker should be the last element " "in the page.") list_page = self.auth_client.users.list(instance_id, marker=marker) self.assert_client_code(expected_http_code) self.assert_pagination_match( list_page, full_list, limit, len(full_list)) def as_pagination_marker(self, user): return urllib_parse.quote(user.name) def run_user_create_with_no_attributes( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_users_create_failure( self.instance_info.id, {}, expected_exception, expected_http_code) def run_user_create_with_blank_name( self, expected_exception=exceptions.BadRequest, expected_http_code=400): usr_def = self.test_helper.get_non_existing_user_definition() # Test with missing user name attribute. no_name_usr_def = self.copy_dict(usr_def, ignored_keys=['name']) self.assert_users_create_failure( self.instance_info.id, no_name_usr_def, expected_exception, expected_http_code) # Test with empty user name attribute. blank_name_usr_def = self.copy_dict(usr_def) blank_name_usr_def.update({'name': ''}) self.assert_users_create_failure( self.instance_info.id, blank_name_usr_def, expected_exception, expected_http_code) def run_user_create_with_blank_password( self, expected_exception=exceptions.BadRequest, expected_http_code=400): usr_def = self.test_helper.get_non_existing_user_definition() # Test with missing password attribute. no_pass_usr_def = self.copy_dict(usr_def, ignored_keys=['password']) self.assert_users_create_failure( self.instance_info.id, no_pass_usr_def, expected_exception, expected_http_code) # Test with missing databases attribute. no_db_usr_def = self.copy_dict(usr_def, ignored_keys=['databases']) self.assert_users_create_failure( self.instance_info.id, no_db_usr_def, expected_exception, expected_http_code) def run_existing_user_create( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_users_create_failure( self.instance_info.id, self.first_user_def, expected_exception, expected_http_code) def run_system_user_create( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_users = self.get_system_users() if system_users: user_defs = [{'name': name, 'password': 'password1', 'databases': []} for name in system_users] self.assert_users_create_failure( self.instance_info.id, user_defs, expected_exception, expected_http_code) def assert_users_create_failure( self, instance_id, serial_users_def, expected_exception, expected_http_code): self.assert_raises( expected_exception, expected_http_code, self.auth_client.users.create, instance_id, serial_users_def) def run_user_update_with_blank_name( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_user_attribute_update_failure( self.instance_info.id, self.first_user_def, {'name': ''}, expected_exception, expected_http_code) def run_user_update_with_existing_name( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_user_attribute_update_failure( self.instance_info.id, self.first_user_def, {'name': self.first_user_def['name']}, expected_exception, expected_http_code) def assert_user_attribute_update_failure( self, instance_id, user_def, update_attribites, expected_exception, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) self.assert_raises( expected_exception, expected_http_code, self.auth_client.users.update_attributes, instance_id, user_name, update_attribites, user_host) def _get_user_name_host_pair(self, user_def): return user_def['name'], user_def.get('host') def run_system_user_attribute_update( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_users = self.get_system_users() if system_users: for name in system_users: user_def = {'name': name, 'password': 'password2'} self.assert_user_attribute_update_failure( self.instance_info.id, user_def, user_def, expected_exception, expected_http_code) def run_user_attribute_update(self, expected_http_code=202): updated_def = self.first_user_def # Update the name by appending a random string to it. updated_name = ''.join([updated_def['name'], 'upd']) update_attribites = {'name': updated_name, 'password': 'password2'} self.assert_user_attribute_update( self.instance_info.id, updated_def, update_attribites, expected_http_code) def assert_user_attribute_update(self, instance_id, user_def, update_attribites, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) self.auth_client.users.update_attributes( instance_id, user_name, update_attribites, user_host) self.assert_client_code(expected_http_code) # Update the stored definitions with the new value. expected_def = None for user_def in self.user_defs: if user_def['name'] == user_name: user_def.update(update_attribites) expected_def = user_def # Verify using 'user-show' and 'user-list'. self.assert_user_show(instance_id, expected_def, 200) self.assert_users_list(instance_id, self.user_defs, 200) def run_user_delete(self, expected_http_code=202): for user_def in self.user_defs: self.assert_user_delete( self.instance_info.id, user_def, expected_http_code) def assert_user_delete(self, instance_id, user_def, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) self.auth_client.users.delete(instance_id, user_name, user_host) self.assert_client_code(expected_http_code) self.assert_raises(exceptions.NotFound, 404, self.auth_client.users.get, instance_id, user_name, user_host) for user in self.auth_client.users.list(instance_id): if user.name == user_name: self.fail("User still listed after delete: %s" % user_name) def run_nonexisting_user_show( self, expected_exception=exceptions.NotFound, expected_http_code=404): usr_def = self.test_helper.get_non_existing_user_definition() self.assert_user_show_failure( self.instance_info.id, {'name': usr_def['name']}, expected_exception, expected_http_code) def assert_user_show_failure(self, instance_id, user_def, expected_exception, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) self.assert_raises( expected_exception, expected_http_code, self.auth_client.users.get, instance_id, user_name, user_host) def run_system_user_show( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_users = self.get_system_users() if system_users: for name in system_users: self.assert_user_show_failure( self.instance_info.id, {'name': name}, expected_exception, expected_http_code) def run_nonexisting_user_update(self, expected_http_code=404): # Test valid update on a non-existing user. usr_def = self.test_helper.get_non_existing_user_definition() update_def = {'name': usr_def['name']} self.assert_user_attribute_update_failure( self.instance_info.id, update_def, update_def, exceptions.NotFound, expected_http_code) def run_nonexisting_user_delete( self, expected_exception=exceptions.NotFound, expected_http_code=404): usr_def = self.test_helper.get_non_existing_user_definition() self.assert_user_delete_failure( self.instance_info.id, {'name': usr_def['name']}, expected_exception, expected_http_code) def assert_user_delete_failure( self, instance_id, user_def, expected_exception, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) self.assert_raises(expected_exception, expected_http_code, self.auth_client.users.delete, instance_id, user_name, user_host) def run_system_user_delete( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_users = self.get_system_users() if system_users: for name in system_users: self.assert_user_delete_failure( self.instance_info.id, {'name': name}, expected_exception, expected_http_code) def get_system_users(self): return self.get_datastore_config_property('ignore_users') class MysqlUserActionsRunner(UserActionsRunner): def as_pagination_marker(self, user): return urllib_parse.quote('%s@%s' % (user.name, user.host)) class MariadbUserActionsRunner(MysqlUserActionsRunner): def __init__(self): super(MariadbUserActionsRunner, self).__init__() class PerconaUserActionsRunner(MysqlUserActionsRunner): def __init__(self): super(PerconaUserActionsRunner, self).__init__() trove-5.0.0/trove/tests/scenario/runners/__init__.py0000664000567000056710000000000012701410316023706 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/scenario/runners/cluster_actions_runners.py0000664000567000056710000003715012701410316027144 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from proboscis import SkipTest import time as timer from trove.common import cfg from trove.common import exception from trove.common.utils import poll_until from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import TestRunner from trove.tests.util.check import TypeCheck from troveclient.compat import exceptions CONF = cfg.CONF class ClusterActionsRunner(TestRunner): USE_CLUSTER_ID_FLAG = 'TESTS_USE_CLUSTER_ID' DO_NOT_DELETE_CLUSTER_FLAG = 'TESTS_DO_NOT_DELETE_CLUSTER' EXTRA_INSTANCE_NAME = "named_instance" def __init__(self): super(ClusterActionsRunner, self).__init__() self.cluster_id = 0 self.current_root_creds = None @property def is_using_existing_cluster(self): return self.has_env_flag(self.USE_CLUSTER_ID_FLAG) @property def has_do_not_delete_cluster(self): return self.has_env_flag(self.DO_NOT_DELETE_CLUSTER_FLAG) def run_cluster_create(self, num_nodes=None, expected_task_name='BUILDING', expected_instance_states=['BUILD', 'ACTIVE'], expected_http_code=200): if not num_nodes: num_nodes = self.min_cluster_node_count instances_def = [ self.build_flavor( flavor_id=self.instance_info.dbaas_flavor_href, volume_size=self.instance_info.volume['size'])] * num_nodes self.cluster_id = self.assert_cluster_create( 'test_cluster', instances_def, expected_task_name, expected_instance_states, expected_http_code) @property def min_cluster_node_count(self): return 2 def assert_cluster_create( self, cluster_name, instances_def, expected_task_name, expected_instance_states, expected_http_code): self.report.log("Testing cluster create: %s" % cluster_name) cluster = self.get_existing_cluster() if cluster: self.report.log("Using an existing cluster: %s" % cluster.id) cluster_instances = self._get_cluster_instances(cluster.id) self.assert_all_instance_states( cluster_instances, expected_instance_states[-1:]) else: cluster = self.auth_client.clusters.create( cluster_name, self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version, instances=instances_def) self._assert_cluster_action(cluster.id, expected_task_name, expected_http_code) cluster_instances = self._get_cluster_instances(cluster.id) self.assert_all_instance_states( cluster_instances, expected_instance_states) # Create the helper user/database on the first node. # The cluster should handle the replication itself. self.create_test_helper_on_instance(cluster_instances[0]) cluster_id = cluster.id # Although all instances have already acquired the expected state, # we still need to poll for the final cluster task, because # it may take up to the periodic task interval until the task name # gets updated in the Trove database. self._assert_cluster_states(cluster_id, ['NONE']) self._assert_cluster_response(cluster_id, 'NONE') return cluster_id def get_existing_cluster(self): if self.is_using_existing_cluster: cluster_id = os.environ.get(self.USE_CLUSTER_ID_FLAG) return self.auth_client.clusters.get(cluster_id) return None def run_cluster_root_enable(self, expected_task_name=None, expected_http_code=200): root_credentials = self.test_helper.get_helper_credentials_root() self.current_root_creds = self.auth_client.root.create_cluster_root( self.cluster_id, root_credentials['password']) self.assert_equal(root_credentials['name'], self.current_root_creds[0]) self.assert_equal(root_credentials['password'], self.current_root_creds[1]) self._assert_cluster_action(self.cluster_id, expected_task_name, expected_http_code) def run_verify_cluster_root_enable(self): if not self.current_root_creds: raise SkipTest("Root not enabled.") cluster = self.auth_client.clusters.get(self.cluster_id) for instance in cluster.instances: root_enabled_test = self.auth_client.root.is_instance_root_enabled( instance['id']) self.assert_true(root_enabled_test.rootEnabled) self.test_helper.ping( cluster.ip[0], username=self.current_root_creds[0], password=self.current_root_creds[1] ) def run_add_initial_cluster_data(self, data_type=DataType.tiny): self.assert_add_cluster_data(data_type, self.cluster_id) def run_add_extra_cluster_data(self, data_type=DataType.tiny2): self.assert_add_cluster_data(data_type, self.cluster_id) def assert_add_cluster_data(self, data_type, cluster_id): cluster = self.auth_client.clusters.get(cluster_id) self.test_helper.add_data(data_type, cluster.ip[0]) def run_verify_initial_cluster_data(self, data_type=DataType.tiny): self.assert_verify_cluster_data(data_type, self.cluster_id) def run_verify_extra_cluster_data(self, data_type=DataType.tiny2): self.assert_verify_cluster_data(data_type, self.cluster_id) def assert_verify_cluster_data(self, data_type, cluster_id): cluster = self.auth_client.clusters.get(cluster_id) self.test_helper.verify_data(data_type, cluster.ip[0]) def run_remove_initial_cluster_data(self, data_type=DataType.tiny): self.assert_remove_cluster_data(data_type, self.cluster_id) def run_remove_extra_cluster_data(self, data_type=DataType.tiny2): self.assert_remove_cluster_data(data_type, self.cluster_id) def assert_remove_cluster_data(self, data_type, cluster_id): cluster = self.auth_client.clusters.get(cluster_id) self.test_helper.remove_data(data_type, cluster.ip[0]) def run_cluster_grow(self, expected_task_name='GROWING_CLUSTER', expected_http_code=202): # Add two instances. One with an explicit name. added_instance_defs = [ self._build_instance_def(self.instance_info.dbaas_flavor_href, self.instance_info.volume['size']), self._build_instance_def(self.instance_info.dbaas_flavor_href, self.instance_info.volume['size'], self.EXTRA_INSTANCE_NAME)] self.assert_cluster_grow( self.cluster_id, added_instance_defs, expected_task_name, expected_http_code) def _build_instance_def(self, flavor_id, volume_size, name=None): instance_def = self.build_flavor( flavor_id=flavor_id, volume_size=volume_size) if name: instance_def.update({'name': name}) return instance_def def assert_cluster_grow(self, cluster_id, added_instance_defs, expected_task_name, expected_http_code): cluster = self.auth_client.clusters.get(cluster_id) initial_instance_count = len(cluster.instances) cluster = self.auth_client.clusters.grow(cluster_id, added_instance_defs) self._assert_cluster_action(cluster_id, expected_task_name, expected_http_code) self.assert_equal(len(added_instance_defs), len(cluster.instances) - initial_instance_count, "Unexpected number of added nodes.") cluster_instances = self._get_cluster_instances(cluster_id) self.assert_all_instance_states(cluster_instances, ['ACTIVE']) self._assert_cluster_states(cluster_id, ['NONE']) self._assert_cluster_response(cluster_id, 'NONE') def run_cluster_shrink( self, expected_task_name=None, expected_http_code=202): self.assert_cluster_shrink(self.cluster_id, [self.EXTRA_INSTANCE_NAME], expected_task_name, expected_http_code) def assert_cluster_shrink(self, cluster_id, removed_instance_names, expected_task_name, expected_http_code): cluster = self.auth_client.clusters.get(cluster_id) initial_instance_count = len(cluster.instances) removed_instances = self._find_cluster_instances_by_name( cluster, removed_instance_names) cluster = self.auth_client.clusters.shrink( cluster_id, [{'id': instance['id']} for instance in removed_instances]) self._assert_cluster_action(cluster_id, expected_task_name, expected_http_code) self._assert_cluster_states(cluster_id, ['NONE']) cluster = self.auth_client.clusters.get(cluster_id) self.assert_equal( len(removed_instance_names), initial_instance_count - len(cluster.instances), "Unexpected number of removed nodes.") cluster_instances = self._get_cluster_instances(cluster_id) self.assert_all_instance_states(cluster_instances, ['ACTIVE']) self._assert_cluster_response(cluster_id, 'NONE') def _find_cluster_instances_by_name(self, cluster, instance_names): return [instance for instance in cluster.instances if instance['name'] in instance_names] def run_cluster_delete( self, expected_task_name='DELETING', expected_last_instance_state='SHUTDOWN', expected_http_code=202): if self.has_do_not_delete_cluster: self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was " "specified, skipping delete...") raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.") self.assert_cluster_delete( self.cluster_id, expected_task_name, expected_last_instance_state, expected_http_code) def assert_cluster_delete( self, cluster_id, expected_task_name, expected_last_instance_state, expected_http_code): self.report.log("Testing cluster delete: %s" % cluster_id) cluster_instances = self._get_cluster_instances(cluster_id) self.auth_client.clusters.delete(cluster_id) self._assert_cluster_action(cluster_id, expected_task_name, expected_http_code) self.assert_all_gone(cluster_instances, expected_last_instance_state) self._assert_cluster_gone(cluster_id) def _get_cluster_instances(self, cluster_id): cluster = self.auth_client.clusters.get(cluster_id) return [self.auth_client.instances.get(instance['id']) for instance in cluster.instances] def _assert_cluster_action( self, cluster_id, expected_state, expected_http_code): if expected_http_code is not None: self.assert_client_code(expected_http_code) if expected_state: self._assert_cluster_response(cluster_id, expected_state) def _assert_cluster_states(self, cluster_id, expected_states, fast_fail_status=None): for status in expected_states: start_time = timer.time() try: poll_until(lambda: self._has_task( cluster_id, status, fast_fail_status=fast_fail_status), sleep_time=self.def_sleep_time, time_out=self.def_timeout) self.report.log("Cluster has gone '%s' in %s." % (status, self._time_since(start_time))) except exception.PollTimeOut: self.report.log( "Status of cluster '%s' did not change to '%s' after %s." % (cluster_id, status, self._time_since(start_time))) return False return True def _has_task(self, cluster_id, task, fast_fail_status=None): cluster = self.auth_client.clusters.get(cluster_id) task_name = cluster.task['name'] self.report.log("Waiting for cluster '%s' to become '%s': %s" % (cluster_id, task, task_name)) if fast_fail_status and task_name == fast_fail_status: raise RuntimeError("Cluster '%s' acquired a fast-fail task: %s" % (cluster_id, task)) return task_name == task def _assert_cluster_response(self, cluster_id, expected_state): cluster = self.auth_client.clusters.get(cluster_id) with TypeCheck('Cluster', cluster) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("datastore", dict) check.has_field("instances", list) check.has_field("links", list) check.has_field("created", unicode) check.has_field("updated", unicode) for instance in cluster.instances: isinstance(instance, dict) self.assert_is_not_none(instance['id']) self.assert_is_not_none(instance['links']) self.assert_is_not_none(instance['name']) self.assert_equal(expected_state, cluster.task['name'], 'Unexpected cluster task name') def _assert_cluster_gone(self, cluster_id): t0 = timer.time() try: # This will poll until the cluster goes away. self._assert_cluster_states(cluster_id, ['NONE']) self.fail( "Cluster '%s' still existed after %s seconds." % (cluster_id, self._time_since(t0))) except exceptions.NotFound: self.assert_client_code(404) class CassandraClusterActionsRunner(ClusterActionsRunner): def run_cluster_root_enable(self): raise SkipTest("Operation is currently not supported.") class MariadbClusterActionsRunner(ClusterActionsRunner): @property def min_cluster_node_count(self): return self.get_datastore_config_property('min_cluster_member_count') def run_cluster_root_enable(self): raise SkipTest("Operation is currently not supported.") class PxcClusterActionsRunner(ClusterActionsRunner): @property def min_cluster_node_count(self): return self.get_datastore_config_property('min_cluster_member_count') class VerticaClusterActionsRunner(ClusterActionsRunner): @property def min_cluster_node_count(self): return self.get_datastore_config_property('cluster_member_count') class RedisClusterActionsRunner(ClusterActionsRunner): def run_cluster_root_enable(self): raise SkipTest("Operation is currently not supported.") class MongodbClusterActionsRunner(ClusterActionsRunner): def run_cluster_root_enable(self): raise SkipTest("Operation is currently not supported.") trove-5.0.0/trove/tests/scenario/runners/module_runners.py0000664000567000056710000013223012701410316025223 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import Crypto.Random from proboscis import SkipTest import tempfile from troveclient.compat import exceptions from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.module import models from trove.tests.scenario.runners.test_runners import TestRunner # Variables here are set up to be used across multiple groups, # since each group will instantiate a new runner random_data = Crypto.Random.new().read(20) test_modules = [] module_count_prior_to_create = 0 module_ds_count_prior_to_create = 0 module_ds_all_count_prior_to_create = 0 module_all_tenant_count_prior_to_create = 0 module_auto_apply_count_prior_to_create = 0 module_admin_count_prior_to_create = 0 module_other_count_prior_to_create = 0 module_create_count = 0 module_ds_create_count = 0 module_ds_all_create_count = 0 module_all_tenant_create_count = 0 module_auto_apply_create_count = 0 module_admin_create_count = 0 module_other_create_count = 0 class ModuleRunner(TestRunner): def __init__(self): self.TIMEOUT_MODULE_APPLY = 60 * 10 super(ModuleRunner, self).__init__( sleep_time=10, timeout=self.TIMEOUT_MODULE_APPLY) self.MODULE_CONTENTS_PATTERN = 'Message=%s\n' self.MODULE_MESSAGE_PATTERN = 'Hello World from: %s' self.MODULE_NAME = 'test_module_1' self.MODULE_DESC = 'test description' self.MODULE_NEG_CONTENTS = 'contents for negative tests' self.MODULE_BINARY_SUFFIX = '_bin_auto' self.MODULE_BINARY_SUFFIX2 = self.MODULE_BINARY_SUFFIX + '_2' self.MODULE_BINARY_CONTENTS = random_data self.MODULE_BINARY_CONTENTS2 = '\x00\xFF\xea\x9c\x11\xfeok\xb1\x8ax' self.mod_inst_id = None self.temp_module = None self._module_type = None @property def module_type(self): if not self._module_type: self._module_type = self.test_helper.get_valid_module_type() return self._module_type @property def main_test_module(self): if not test_modules or not test_modules[0]: SkipTest("No main module created") return test_modules[0] def build_module_args(self, extra=None): extra = extra or '' name = self.MODULE_NAME + extra desc = self.MODULE_DESC + extra.replace('_', ' ') cont = self.get_module_contents(name) return name, desc, cont def get_module_contents(self, name=None): message = self.get_module_message(name=name) return self.MODULE_CONTENTS_PATTERN % message def get_module_message(self, name=None): name = name or self.MODULE_NAME return self.MODULE_MESSAGE_PATTERN % name def _find_invisible_module(self): def _match(mod): return not mod.visible and mod.tenant_id and not mod.auto_apply return self._find_module(_match, "Could not find invisible module") def _find_module(self, match_fn, not_found_message, find_all=False): found = [] if find_all else None for test_module in test_modules: if match_fn(test_module): if find_all: found.append(test_module) else: found = test_module break if not found: self.fail(not_found_message) return found def _find_auto_apply_module(self): def _match(mod): return mod.auto_apply and mod.tenant_id and mod.visible return self._find_module(_match, "Could not find auto-apply module") def _find_all_tenant_module(self): def _match(mod): return mod.tenant_id is None and mod.visible return self._find_module(_match, "Could not find all tenant module") def _find_all_auto_apply_modules(self, visible=None): def _match(mod): return mod.auto_apply and ( visible is None or mod.visible == visible) return self._find_module( _match, "Could not find all auto apply modules", find_all=True) # Tests start here def run_module_delete_existing(self): modules = self.admin_client.modules.list() for module in modules: if module.name.startswith(self.MODULE_NAME): self.admin_client.modules.delete(module.id) def run_module_create_bad_type( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.create, self.MODULE_NAME, 'invalid-type', self.MODULE_NEG_CONTENTS) def run_module_create_non_admin_auto( self, expected_exception=exceptions.Forbidden, expected_http_code=403): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, auto_apply=True) def run_module_create_non_admin_all_tenant( self, expected_exception=exceptions.Forbidden, expected_http_code=403): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, all_tenants=True) def run_module_create_non_admin_hidden( self, expected_exception=exceptions.Forbidden, expected_http_code=403): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, visible=False) def run_module_create_bad_datastore( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, datastore='bad-datastore') def run_module_create_bad_datastore_version( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, datastore=self.instance_info.dbaas_datastore, datastore_version='bad-datastore-version') def run_module_create_missing_datastore( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, datastore_version=self.instance_info.dbaas_datastore_version) def run_module_create(self): # Necessary to test that the count increases. global module_count_prior_to_create global module_ds_count_prior_to_create global module_ds_all_count_prior_to_create global module_all_tenant_count_prior_to_create global module_auto_apply_count_prior_to_create global module_admin_count_prior_to_create global module_other_count_prior_to_create module_count_prior_to_create = len( self.auth_client.modules.list()) module_ds_count_prior_to_create = len( self.auth_client.modules.list( datastore=self.instance_info.dbaas_datastore)) module_ds_all_count_prior_to_create = len( self.auth_client.modules.list( datastore=models.Modules.MATCH_ALL_NAME)) module_all_tenant_count_prior_to_create = len( self.unauth_client.modules.list()) module_auto_apply_count_prior_to_create = len( [module for module in self.admin_client.modules.list() if module.auto_apply]) module_admin_count_prior_to_create = len( self.admin_client.modules.list()) module_other_count_prior_to_create = len( self.unauth_client.modules.list()) name, description, contents = self.build_module_args() self.assert_module_create( self.auth_client, name=name, module_type=self.module_type, contents=contents, description=description) def assert_module_create(self, client, name=None, module_type=None, contents=None, description=None, all_tenants=False, datastore=None, datastore_version=None, auto_apply=False, live_update=False, visible=True): result = client.modules.create( name, module_type, contents, description=description, all_tenants=all_tenants, datastore=datastore, datastore_version=datastore_version, auto_apply=auto_apply, live_update=live_update, visible=visible) global module_create_count global module_ds_create_count global module_ds_all_create_count global module_auto_apply_create_count global module_all_tenant_create_count global module_admin_create_count global module_other_create_count if (client == self.auth_client or (client == self.admin_client and visible)): module_create_count += 1 if datastore: module_ds_create_count += 1 else: module_ds_all_create_count += 1 elif not visible: module_admin_create_count += 1 else: module_other_create_count += 1 if all_tenants and visible: module_all_tenant_create_count += 1 if auto_apply and visible: module_auto_apply_create_count += 1 global test_modules test_modules.append(result) tenant_id = None tenant = models.Modules.MATCH_ALL_NAME if not all_tenants: tenant, tenant_id = self.get_client_tenant(client) # If we find a way to grab the tenant name in the module # stuff, the line below can be removed tenant = tenant_id datastore = datastore or models.Modules.MATCH_ALL_NAME datastore_version = datastore_version or models.Modules.MATCH_ALL_NAME self.validate_module( result, validate_all=False, expected_name=name, expected_module_type=module_type, expected_description=description, expected_tenant=tenant, expected_tenant_id=tenant_id, expected_datastore=datastore, expected_ds_version=datastore_version, expected_auto_apply=auto_apply, expected_contents=contents) def validate_module(self, module, validate_all=False, expected_name=None, expected_module_type=None, expected_description=None, expected_tenant=None, expected_tenant_id=None, expected_datastore=None, expected_datastore_id=None, expected_ds_version=None, expected_ds_version_id=None, expected_all_tenants=None, expected_auto_apply=None, expected_live_update=None, expected_visible=None, expected_contents=None): if expected_all_tenants: expected_tenant = expected_tenant or models.Modules.MATCH_ALL_NAME if expected_name: self.assert_equal(expected_name, module.name, 'Unexpected module name') if expected_module_type: self.assert_equal(expected_module_type.lower(), module.type, 'Unexpected module type') if expected_description: self.assert_equal(expected_description, module.description, 'Unexpected module description') if expected_tenant_id: self.assert_equal(expected_tenant_id, module.tenant_id, 'Unexpected tenant id') if expected_tenant: self.assert_equal(expected_tenant, module.tenant, 'Unexpected tenant name') if expected_datastore: self.assert_equal(expected_datastore, module.datastore, 'Unexpected datastore') if expected_ds_version: self.assert_equal(expected_ds_version, module.datastore_version, 'Unexpected datastore version') if expected_auto_apply is not None: self.assert_equal(expected_auto_apply, module.auto_apply, 'Unexpected auto_apply') if validate_all: if expected_datastore_id: self.assert_equal(expected_datastore_id, module.datastore_id, 'Unexpected datastore id') if expected_ds_version_id: self.assert_equal(expected_ds_version_id, module.datastore_version_id, 'Unexpected datastore version id') if expected_live_update is not None: self.assert_equal(expected_live_update, module.live_update, 'Unexpected live_update') if expected_visible is not None: self.assert_equal(expected_visible, module.visible, 'Unexpected visible') def run_module_create_dupe( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS) def run_module_create_bin(self): name, description, contents = self.build_module_args( self.MODULE_BINARY_SUFFIX) self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=self.MODULE_BINARY_CONTENTS, description=description, auto_apply=True, visible=False) def run_module_create_bin2(self): name, description, contents = self.build_module_args( self.MODULE_BINARY_SUFFIX2) self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=self.MODULE_BINARY_CONTENTS2, description=description, auto_apply=True, visible=False) def run_module_show(self): test_module = self.main_test_module result = self.auth_client.modules.get(test_module.id) self.validate_module( result, validate_all=True, expected_name=test_module.name, expected_module_type=test_module.type, expected_description=test_module.description, expected_tenant=test_module.tenant, expected_datastore=test_module.datastore, expected_ds_version=test_module.datastore_version, expected_auto_apply=test_module.auto_apply, expected_live_update=False, expected_visible=True) def run_module_show_unauth_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, None, self.unauth_client.modules.get, self.main_test_module.id) # we're using a different client, so we'll check the return code # on it explicitly, instead of depending on 'assert_raises' self.assert_client_code(expected_http_code=expected_http_code, client=self.unauth_client) def run_module_list(self): self.assert_module_list( self.auth_client, module_count_prior_to_create + module_create_count) def assert_module_list(self, client, expected_count, datastore=None, skip_validation=False): if datastore: module_list = client.modules.list(datastore=datastore) else: module_list = client.modules.list() self.assert_equal(expected_count, len(module_list), "Wrong number of modules for list") if not skip_validation: for module in module_list: if module.name != self.MODULE_NAME: continue test_module = self.main_test_module self.validate_module( module, validate_all=False, expected_name=test_module.name, expected_module_type=test_module.type, expected_description=test_module.description, expected_tenant=test_module.tenant, expected_datastore=test_module.datastore, expected_ds_version=test_module.datastore_version, expected_auto_apply=test_module.auto_apply) def run_module_list_unauth_user(self): self.assert_module_list( self.unauth_client, module_all_tenant_count_prior_to_create + module_all_tenant_create_count + module_other_create_count) def run_module_create_admin_all(self): name, description, contents = self.build_module_args( '_hidden_all_tenant_auto') self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=contents, description=description, all_tenants=True, visible=False, auto_apply=True) def run_module_create_admin_hidden(self): name, description, contents = self.build_module_args('_hidden') self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=contents, description=description, visible=False) def run_module_create_admin_auto(self): name, description, contents = self.build_module_args('_auto') self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=contents, description=description, auto_apply=True) def run_module_create_admin_live_update(self): name, description, contents = self.build_module_args('_live') self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=contents, description=description, live_update=True) def run_module_create_datastore(self): name, description, contents = self.build_module_args('_ds') self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=contents, description=description, datastore=self.instance_info.dbaas_datastore) def run_module_create_ds_version(self): name, description, contents = self.build_module_args('_ds_ver') self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=contents, description=description, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) def run_module_create_all_tenant(self): name, description, contents = self.build_module_args( '_all_tenant_ds_ver') self.assert_module_create( self.admin_client, name=name, module_type=self.module_type, contents=contents, description=description, all_tenants=True, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) def run_module_create_different_tenant(self): name, description, contents = self.build_module_args() self.assert_module_create( self.unauth_client, name=name, module_type=self.module_type, contents=contents, description=description) def run_module_list_again(self): self.assert_module_list( self.auth_client, module_count_prior_to_create + module_create_count, skip_validation=True) def run_module_list_ds(self): self.assert_module_list( self.auth_client, module_ds_count_prior_to_create + module_ds_create_count, datastore=self.instance_info.dbaas_datastore, skip_validation=True) def run_module_list_ds_all(self): self.assert_module_list( self.auth_client, module_ds_all_count_prior_to_create + module_ds_all_create_count, datastore=models.Modules.MATCH_ALL_NAME, skip_validation=True) def run_module_show_invisible( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.get, module.id) def run_module_list_admin(self): self.assert_module_list( self.admin_client, (module_admin_count_prior_to_create + module_create_count + module_admin_create_count + module_other_create_count), skip_validation=True) def run_module_update(self): self.assert_module_update( self.auth_client, self.main_test_module.id, description=self.MODULE_DESC + " modified") def run_module_update_same_contents(self): old_md5 = self.main_test_module.md5 self.assert_module_update( self.auth_client, self.main_test_module.id, contents=self.get_module_contents(self.main_test_module.name)) self.assert_equal(old_md5, self.main_test_module.md5, "MD5 changed with same contents") def run_module_update_auto_toggle(self): module = self._find_auto_apply_module() toggle_off_args = {'auto_apply': False} toggle_on_args = {'auto_apply': True} self.assert_module_toggle(module, toggle_off_args, toggle_on_args) def assert_module_toggle(self, module, toggle_off_args, toggle_on_args): # First try to update the module based on the change # (this should toggle the state and allow non-admin access) self.assert_module_update( self.admin_client, module.id, **toggle_off_args) # Now we can update using the non-admin client self.assert_module_update( self.auth_client, module.id, description='Updated by auth') # Now set it back self.assert_module_update( self.admin_client, module.id, description=module.description, **toggle_on_args) def run_module_update_all_tenant_toggle(self): module = self._find_all_tenant_module() toggle_off_args = {'all_tenants': False} toggle_on_args = {'all_tenants': True} self.assert_module_toggle(module, toggle_off_args, toggle_on_args) def run_module_update_invisible_toggle(self): module = self._find_invisible_module() toggle_off_args = {'visible': True} toggle_on_args = {'visible': False} self.assert_module_toggle(module, toggle_off_args, toggle_on_args) def assert_module_update(self, client, module_id, **kwargs): result = client.modules.update(module_id, **kwargs) global test_modules found = False index = -1 for test_module in test_modules: index += 1 if test_module.id == module_id: found = True break if not found: self.fail("Could not find updated module in module list") test_modules[index] = result expected_args = {} for key, value in kwargs.items(): new_key = 'expected_' + key expected_args[new_key] = value self.validate_module(result, **expected_args) def run_module_update_unauth( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, expected_http_code, self.unauth_client.modules.update, self.main_test_module.id, description='Upd') def run_module_update_non_admin_auto( self, expected_exception=exceptions.Forbidden, expected_http_code=403): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, self.main_test_module.id, visible=False) def run_module_update_non_admin_auto_off( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_auto_apply_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, module.id, auto_apply=False) def run_module_update_non_admin_auto_any( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_auto_apply_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, module.id, description='Upd') def run_module_update_non_admin_all_tenant( self, expected_exception=exceptions.Forbidden, expected_http_code=403): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, self.main_test_module.id, all_tenants=True) def run_module_update_non_admin_all_tenant_off( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_all_tenant_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, module.id, all_tenants=False) def run_module_update_non_admin_all_tenant_any( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_all_tenant_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, module.id, description='Upd') def run_module_update_non_admin_invisible( self, expected_exception=exceptions.Forbidden, expected_http_code=403): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, self.main_test_module.id, visible=False) def run_module_update_non_admin_invisible_off( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, module.id, visible=True) def run_module_update_non_admin_invisible_any( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.update, module.id, description='Upd') # ModuleInstanceGroup methods def run_module_list_instance_empty(self): self.assert_module_list_instance( self.auth_client, self.instance_info.id, module_auto_apply_count_prior_to_create) def assert_module_list_instance(self, client, instance_id, expected_count, expected_http_code=200): module_list = client.instances.modules(instance_id) self.assert_client_code(expected_http_code, client) count = len(module_list) self.assert_equal(expected_count, count, "Wrong number of modules from list instance") for module in module_list: self.validate_module(module) def run_module_instances_empty(self): self.assert_module_instances( self.auth_client, self.main_test_module.id, 0) def assert_module_instances(self, client, module_id, expected_count, expected_http_code=200): instance_list = client.modules.instances(module_id) self.assert_client_code(expected_http_code, client) count = len(instance_list) self.assert_equal(expected_count, count, "Wrong number of instances applied from module") def run_module_query_empty(self): self.assert_module_query(self.auth_client, self.instance_info.id, module_auto_apply_count_prior_to_create) def assert_module_query(self, client, instance_id, expected_count, expected_http_code=200, expected_results=None): modquery_list = client.instances.module_query(instance_id) self.assert_client_code(expected_http_code, client) count = len(modquery_list) self.assert_equal(expected_count, count, "Wrong number of modules from query") expected_results = expected_results or {} for modquery in modquery_list: if modquery.name in expected_results: expected = expected_results[modquery.name] self.validate_module_info( modquery, expected_status=expected['status'], expected_message=expected['message']) def run_module_apply(self): self.assert_module_apply(self.auth_client, self.instance_info.id, self.main_test_module) def assert_module_apply(self, client, instance_id, module, expected_status=None, expected_message=None, expected_contents=None, expected_http_code=200): module_apply_list = client.instances.module_apply( instance_id, [module.id]) self.assert_client_code(expected_http_code, client) admin_only = (not module.visible or module.auto_apply or not module.tenant_id) expected_status = expected_status or 'OK' expected_message = (expected_message or self.get_module_message(module.name)) for module_apply in module_apply_list: self.validate_module_info( module_apply, expected_name=module.name, expected_module_type=module.type, expected_datastore=module.datastore, expected_ds_version=module.datastore_version, expected_auto_apply=module.auto_apply, expected_visible=module.visible, expected_admin_only=admin_only, expected_contents=expected_contents, expected_status=expected_status, expected_message=expected_message) def validate_module_info(self, module_apply, expected_name=None, expected_module_type=None, expected_datastore=None, expected_ds_version=None, expected_auto_apply=None, expected_visible=None, expected_admin_only=None, expected_contents=None, expected_message=None, expected_status=None): prefix = "Module: %s -" % expected_name if expected_name: self.assert_equal(expected_name, module_apply.name, '%s Unexpected module name' % prefix) if expected_module_type: self.assert_equal(expected_module_type, module_apply.type, '%s Unexpected module type' % prefix) if expected_datastore: self.assert_equal(expected_datastore, module_apply.datastore, '%s Unexpected datastore' % prefix) if expected_ds_version: self.assert_equal(expected_ds_version, module_apply.datastore_version, '%s Unexpected datastore version' % prefix) if expected_auto_apply is not None: self.assert_equal(expected_auto_apply, module_apply.auto_apply, '%s Unexpected auto_apply' % prefix) if expected_visible is not None: self.assert_equal(expected_visible, module_apply.visible, '%s Unexpected visible' % prefix) if expected_admin_only is not None: self.assert_equal(expected_admin_only, module_apply.admin_only, '%s Unexpected admin_only' % prefix) if expected_contents is not None: self.assert_equal(expected_contents, module_apply.contents, '%s Unexpected contents' % prefix) if expected_message is not None: self.assert_equal(expected_message, module_apply.message, '%s Unexpected message' % prefix) if expected_status is not None: self.assert_equal(expected_status, module_apply.status, '%s Unexpected status' % prefix) def run_module_list_instance_after_apply(self): self.assert_module_list_instance( self.auth_client, self.instance_info.id, 1) def run_module_query_after_apply(self): expected_count = module_auto_apply_count_prior_to_create + 1 expected_results = self.create_default_query_expected_results( [self.main_test_module]) self.assert_module_query(self.auth_client, self.instance_info.id, expected_count=expected_count, expected_results=expected_results) def create_default_query_expected_results(self, modules, is_admin=False): expected_results = {} for module in modules: status = 'OK' message = self.get_module_message(module.name) contents = self.get_module_contents(module.name) if not is_admin and (not module.visible or module.auto_apply or not module.tenant_id): contents = ('Must be admin to retrieve contents for module %s' % module.name) elif self.MODULE_BINARY_SUFFIX in module.name: status = 'ERROR' message = 'Message not found in contents file' contents = self.MODULE_BINARY_CONTENTS if self.MODULE_BINARY_SUFFIX2 in module.name: contents = self.MODULE_BINARY_CONTENTS2 expected_results[module.name] = { 'status': status, 'message': message, 'datastore': module.datastore, 'datastore_version': module.datastore_version, 'contents': contents, } return expected_results def run_create_inst_with_mods(self, expected_http_code=200): self.mod_inst_id = self.assert_inst_mod_create( self.main_test_module.id, 'module_1', expected_http_code) def assert_inst_mod_create(self, module_id, name_suffix, expected_http_code): inst = self.auth_client.instances.create( self.instance_info.name + name_suffix, self.instance_info.dbaas_flavor_href, self.instance_info.volume, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, nics=self.instance_info.nics, modules=[module_id], ) self.assert_client_code(expected_http_code) return inst.id def run_module_delete_applied( self, expected_exception=exceptions.Forbidden, expected_http_code=403): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.delete, self.main_test_module.id) def run_module_remove(self): self.assert_module_remove(self.auth_client, self.instance_info.id, self.main_test_module.id) def assert_module_remove(self, client, instance_id, module_id, expected_http_code=200): client.instances.module_remove(instance_id, module_id) self.assert_client_code(expected_http_code, client) def run_wait_for_inst_with_mods(self, expected_states=['BUILD', 'ACTIVE']): self.assert_instance_action(self.mod_inst_id, expected_states, None) def run_module_query_after_inst_create(self): auto_modules = self._find_all_auto_apply_modules(visible=True) expected_count = 1 + len(auto_modules) expected_results = self.create_default_query_expected_results( [self.main_test_module] + auto_modules) self.assert_module_query(self.auth_client, self.mod_inst_id, expected_count=expected_count, expected_results=expected_results) def run_module_retrieve_after_inst_create(self): auto_modules = self._find_all_auto_apply_modules(visible=True) expected_count = 1 + len(auto_modules) expected_results = self.create_default_query_expected_results( [self.main_test_module] + auto_modules) self.assert_module_retrieve(self.auth_client, self.mod_inst_id, expected_count=expected_count, expected_results=expected_results) def assert_module_retrieve(self, client, instance_id, expected_count, expected_http_code=200, expected_results=None): try: temp_dir = tempfile.mkdtemp() prefix = 'contents' modretrieve_list = client.instances.module_retrieve( instance_id, directory=temp_dir, prefix=prefix) self.assert_client_code(expected_http_code, client) count = len(modretrieve_list) self.assert_equal(expected_count, count, "Wrong number of modules from retrieve") expected_results = expected_results or {} for module_name, filename in modretrieve_list.items(): if module_name in expected_results: expected = expected_results[module_name] contents_name = '%s_%s_%s_%s' % ( prefix, module_name, expected['datastore'], expected['datastore_version']) expected_filename = guestagent_utils.build_file_path( temp_dir, contents_name, 'dat') self.assert_equal(expected_filename, filename, 'Unexpected retrieve filename') if 'contents' in expected and expected['contents']: with open(filename, 'rb') as fh: contents = fh.read() # convert contents into bytearray to work with py27 # and py34 contents = bytes([ord(item) for item in contents]) expected_contents = bytes( [ord(item) for item in expected['contents']]) self.assert_equal(expected_contents, contents, "Unexpected contents for %s" % module_name) finally: operating_system.remove(temp_dir) def run_module_query_after_inst_create_admin(self): auto_modules = self._find_all_auto_apply_modules() expected_count = 1 + len(auto_modules) expected_results = self.create_default_query_expected_results( [self.main_test_module] + auto_modules, is_admin=True) self.assert_module_query(self.admin_client, self.mod_inst_id, expected_count=expected_count, expected_results=expected_results) def run_module_retrieve_after_inst_create_admin(self): pass auto_modules = self._find_all_auto_apply_modules() expected_count = 1 + len(auto_modules) expected_results = self.create_default_query_expected_results( [self.main_test_module] + auto_modules, is_admin=True) self.assert_module_retrieve(self.admin_client, self.mod_inst_id, expected_count=expected_count, expected_results=expected_results) def run_module_delete_auto_applied( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_auto_apply_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.delete, module.id) def run_delete_inst_with_mods(self, expected_last_state=['SHUTDOWN'], expected_http_code=202): self.assert_delete_instance( self.mod_inst_id, expected_last_state, expected_http_code) def assert_delete_instance( self, instance_id, expected_last_state, expected_http_code): self.auth_client.instances.delete(instance_id) self.assert_client_code(expected_http_code) self.assert_all_gone(instance_id, expected_last_state) # ModuleDeleteGroup methods def run_module_delete_non_existent( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.delete, 'bad_id') def run_module_delete_unauth_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, expected_http_code, self.unauth_client.modules.delete, self.main_test_module.id) def run_module_delete_hidden_by_non_admin( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.delete, module.id) def run_module_delete_all_tenant_by_non_admin( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_all_tenant_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.delete, module.id) def run_module_delete_auto_by_non_admin( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_auto_apply_module() self.assert_raises( expected_exception, expected_http_code, self.auth_client.modules.delete, module.id) def run_module_delete(self): expected_count = len(self.auth_client.modules.list()) - 1 test_module = test_modules.pop(0) self.assert_module_delete(self.auth_client, test_module.id, expected_count) def run_module_delete_admin(self): start_count = count = len(self.admin_client.modules.list()) for test_module in test_modules: count -= 1 self.report.log("Deleting module '%s' (tenant: %s)" % ( test_module.name, test_module.tenant_id)) self.assert_module_delete(self.admin_client, test_module.id, count) self.assert_not_equal(start_count, count, "Nothing was deleted") count = len(self.admin_client.modules.list()) self.assert_equal(module_admin_count_prior_to_create, count, "Wrong number of admin modules after deleting all") count = len(self.auth_client.modules.list()) self.assert_equal(module_count_prior_to_create, count, "Wrong number of modules after deleting all") def assert_module_delete(self, client, module_id, expected_count): client.modules.delete(module_id) count = len(client.modules.list()) self.assert_equal(expected_count, count, "Wrong number of modules after delete") trove-5.0.0/trove/tests/scenario/runners/database_actions_runners.py0000664000567000056710000002026512701410316027226 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class DatabaseActionsRunner(TestRunner): # TODO(pmalik): I believe the 202 (Accepted) should be replaced by # 200 (OK) as the actions are generally very fast and their results # available immediately upon execution of the request. This would # likely require replacing GA casts with calls which I believe are # more appropriate anyways. def __init__(self): super(DatabaseActionsRunner, self).__init__() self.db_defs = [] @property def first_db_def(self): if self.db_defs: return self.db_defs[0] raise SkipTest("No valid database definitions provided.") def run_databases_create(self, expected_http_code=202): databases = self.test_helper.get_valid_database_definitions() if databases: self.db_defs = self.assert_databases_create( self.instance_info.id, databases, expected_http_code) else: raise SkipTest("No valid database definitions provided.") def assert_databases_create(self, instance_id, serial_databases_def, expected_http_code): self.auth_client.databases.create(instance_id, serial_databases_def) self.assert_client_code(expected_http_code) return serial_databases_def def run_databases_list(self, expected_http_code=200): self.assert_databases_list( self.instance_info.id, self.db_defs, expected_http_code) def assert_databases_list(self, instance_id, expected_database_defs, expected_http_code, limit=2): full_list = self.auth_client.databases.list(instance_id) self.assert_client_code(expected_http_code) listed_databases = {database.name: database for database in full_list} self.assert_is_none(full_list.next, "Unexpected pagination in the list.") for database_def in expected_database_defs: database_name = database_def['name'] self.assert_true( database_name in listed_databases, "Database not included in the 'database-list' output: %s" % database_name) # Check that the system (ignored) databases are not included in the # output. system_databases = self.get_system_databases() self.assert_false( any(name in listed_databases for name in system_databases), "System databases should not be included in the 'database-list' " "output.") # Test list pagination. list_page = self.auth_client.databases.list(instance_id, limit=limit) self.assert_client_code(expected_http_code) self.assert_true(len(list_page) <= limit) if len(full_list) > limit: self.assert_is_not_none(list_page.next, "List page is missing.") else: self.assert_is_none(list_page.next, "An extra page in the list.") marker = list_page.next self.assert_pagination_match(list_page, full_list, 0, limit) if marker: last_database = list_page[-1] expected_marker = last_database.name self.assert_equal(expected_marker, marker, "Pagination marker should be the last element " "in the page.") list_page = self.auth_client.databases.list( instance_id, marker=marker) self.assert_client_code(expected_http_code) self.assert_pagination_match( list_page, full_list, limit, len(full_list)) def run_database_create_with_no_attributes( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_databases_create_failure( self.instance_info.id, {}, expected_exception, expected_http_code) def run_database_create_with_blank_name( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_databases_create_failure( self.instance_info.id, {'name': ''}, expected_exception, expected_http_code) def run_existing_database_create( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_databases_create_failure( self.instance_info.id, self.first_db_def, expected_exception, expected_http_code) def assert_databases_create_failure( self, instance_id, serial_databases_def, expected_exception, expected_http_code): self.assert_raises( expected_exception, expected_http_code, self.auth_client.databases.create, instance_id, serial_databases_def) def run_system_database_create( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_databases = self.get_system_databases() database_defs = [{'name': name} for name in system_databases] if system_databases: self.assert_databases_create_failure( self.instance_info.id, database_defs, expected_exception, expected_http_code) def run_database_delete(self, expected_http_code=202): for database_def in self.db_defs: self.assert_database_delete( self.instance_info.id, database_def['name'], expected_http_code) def assert_database_delete( self, instance_id, database_name, expected_http_code): self.auth_client.databases.delete(instance_id, database_name) self.assert_client_code(expected_http_code) for database in self.auth_client.databases.list(instance_id): if database.name == database_name: self.fail( "Database still listed after delete: %s" % database_name) def run_nonexisting_database_delete(self, expected_http_code=202): # Deleting a non-existing database is expected to succeed as if the # database was deleted. db_def = self.test_helper.get_non_existing_database_definition() self.assert_database_delete( self.instance_info.id, db_def['name'], expected_http_code) def run_system_database_delete( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_databases = self.get_system_databases() if system_databases: for name in system_databases: self.assert_database_delete_failure( self.instance_info.id, name, expected_exception, expected_http_code) def assert_database_delete_failure( self, instance_id, database_name, expected_exception, expected_http_code): self.assert_raises(expected_exception, expected_http_code, self.auth_client.databases.delete, instance_id, database_name) def get_system_databases(self): return self.get_datastore_config_property('ignore_dbs') trove-5.0.0/trove/tests/scenario/runners/instance_create_runners.py0000664000567000056710000003503612701410316027073 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from proboscis import SkipTest from trove.tests.api.instances import CheckInstance, InstanceTestInfo from trove.tests.config import CONFIG from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import TestRunner class InstanceCreateRunner(TestRunner): def __init__(self): super(InstanceCreateRunner, self).__init__() self.init_inst_id = None self.init_inst_dbs = None self.init_inst_users = None self.init_inst_host = None self.init_inst_data = None self.init_config_group_id = None def run_empty_instance_create( self, expected_states=['BUILD', 'ACTIVE'], expected_http_code=200): name = self.instance_info.name flavor = self._get_instance_flavor() trove_volume_size = CONFIG.get('trove_volume_size', 1) info = self.assert_instance_create( name, flavor, trove_volume_size, [], [], None, None, CONFIG.dbaas_datastore, CONFIG.dbaas_datastore_version, expected_states, expected_http_code, create_helper_user=True) # Update the shared instance info. self.instance_info.databases = info.databases self.instance_info.users = info.users self.instance_info.dbaas_datastore = info.dbaas_datastore self.instance_info.dbaas_datastore_version = (info. dbaas_datastore_version) self.instance_info.dbaas_flavor_href = info.dbaas_flavor_href self.instance_info.volume = info.volume self.instance_info.id = info.id def run_initial_configuration_create(self, expected_http_code=200): dynamic_config = self.test_helper.get_dynamic_group() non_dynamic_config = self.test_helper.get_non_dynamic_group() values = dynamic_config or non_dynamic_config if values: json_def = json.dumps(values) result = self.auth_client.configurations.create( 'initial_configuration_for_instance_create', json_def, "Configuration group used by instance create tests.", datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(expected_http_code) self.init_config_group_id = result.id else: raise SkipTest("No groups defined.") def run_initialized_instance_create( self, with_dbs=True, with_users=True, configuration_id=None, expected_states=['BUILD', 'ACTIVE'], expected_http_code=200, create_helper_user=True): if self.is_using_existing_instance: # The user requested to run the tests using an existing instance. # We therefore skip any scenarios that involve creating new # test instances. raise SkipTest("Using an existing instance.") name = self.instance_info.name + '_init' flavor = self._get_instance_flavor() trove_volume_size = CONFIG.get('trove_volume_size', 1) self.init_inst_dbs = (self.test_helper.get_valid_database_definitions() if with_dbs else []) self.init_inst_users = (self.test_helper.get_valid_user_definitions() if with_users else []) if configuration_id: self.init_config_group_id = configuration_id if (self.init_inst_dbs or self.init_inst_users or self.init_config_group_id): info = self.assert_instance_create( name, flavor, trove_volume_size, self.init_inst_dbs, self.init_inst_users, self.init_config_group_id, None, CONFIG.dbaas_datastore, CONFIG.dbaas_datastore_version, expected_states, expected_http_code, create_helper_user=create_helper_user) self.init_inst_id = info.id else: # There is no need to run this test as it's effectively the same as # the empty instance test. raise SkipTest("No testable initial properties provided.") def _get_instance_flavor(self): if self.EPHEMERAL_SUPPORT: flavor_name = CONFIG.values.get('instance_eph_flavor_name', 'eph.rd-tiny') else: flavor_name = CONFIG.values.get('instance_flavor_name', 'm1.tiny') return self.get_flavor(flavor_name) def _get_flavor_href(self, flavor): return self.auth_client.find_flavor_self_href(flavor) def assert_instance_create( self, name, flavor, trove_volume_size, database_definitions, user_definitions, configuration_id, root_password, datastore, datastore_version, expected_states, expected_http_code, create_helper_user=False): """This assert method executes a 'create' call and verifies the server response. It neither waits for the instance to become available nor it performs any other validations itself. It has been designed this way to increase test granularity (other tests may run while the instance is building) and also to allow its reuse in other runners . """ databases = database_definitions users = [{'name': item['name'], 'password': item['password']} for item in user_definitions] # Here we add helper user/database if any. if create_helper_user: helper_db_def, helper_user_def, root_def = self.build_helper_defs() if helper_db_def: self.report.log( "Appending a helper database '%s' to the instance " "definition." % helper_db_def['name']) databases.append(helper_db_def) if helper_user_def: self.report.log( "Appending a helper user '%s:%s' to the instance " "definition." % (helper_user_def['name'], helper_user_def['password'])) users.append(helper_user_def) instance_info = InstanceTestInfo() instance_info.name = name instance_info.databases = databases instance_info.users = users instance_info.dbaas_datastore = CONFIG.dbaas_datastore instance_info.dbaas_datastore_version = CONFIG.dbaas_datastore_version instance_info.dbaas_flavor_href = self._get_flavor_href(flavor) if self.VOLUME_SUPPORT: instance_info.volume = {'size': trove_volume_size} else: instance_info.volume = None shared_network = CONFIG.get('shared_network', None) if shared_network: instance_info.nics = [{'net-id': shared_network}] self.report.log("Testing create instance: %s" % {'name': name, 'flavor': flavor.id, 'volume': trove_volume_size, 'nics': instance_info.nics, 'databases': databases, 'users': users, 'configuration': configuration_id, 'root password': root_password, 'datastore': datastore, 'datastore version': datastore_version}) instance = self.get_existing_instance() if instance: self.report.log("Using an existing instance: %s" % instance.id) self.assert_equal(expected_states[-1], instance.status, "Given instance is in a bad state.") else: self.report.log("Creating a new instance.") instance = self.auth_client.instances.create( instance_info.name, instance_info.dbaas_flavor_href, instance_info.volume, instance_info.databases, instance_info.users, nics=instance_info.nics, configuration=configuration_id, availability_zone="nova", datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version) self.assert_instance_action( instance.id, expected_states[0:1], expected_http_code) instance_info.id = instance.id with CheckInstance(instance._info) as check: check.flavor() check.datastore() check.links(instance._info['links']) if self.VOLUME_SUPPORT: check.volume() self.assert_equal(trove_volume_size, instance._info['volume']['size'], "Unexpected Trove volume size") self.assert_equal(instance_info.name, instance._info['name'], "Unexpected instance name") self.assert_equal(flavor.id, int(instance._info['flavor']['id']), "Unexpected instance flavor") self.assert_equal(instance_info.dbaas_datastore, instance._info['datastore']['type'], "Unexpected instance datastore version") self.assert_equal(instance_info.dbaas_datastore_version, instance._info['datastore']['version'], "Unexpected instance datastore version") self.assert_configuration_group(instance_info.id, configuration_id) return instance_info def wait_for_created_instances(self, expected_states=['BUILD', 'ACTIVE']): instances = [self.instance_info.id] if self.init_inst_id: instances.append(self.init_inst_id) self.assert_all_instance_states(instances, expected_states) def run_add_initialized_instance_data(self): self.init_inst_data = DataType.small self.init_inst_host = self.get_instance_host(self.instance_info.id) self.test_helper.add_data(self.init_inst_data, self.init_inst_host) def run_validate_initialized_instance(self): if self.init_inst_id: self.assert_instance_properties( self.init_inst_id, self.init_inst_dbs, self.init_inst_users, self.init_config_group_id, self.init_inst_data) def assert_instance_properties( self, instance_id, expected_dbs_definitions, expected_user_definitions, expected_config_group_id, expected_data_type): if expected_dbs_definitions: self.assert_database_list(instance_id, expected_dbs_definitions) else: self.report.log("No databases to validate for instance: %s" % instance_id) if expected_user_definitions: self.assert_user_list(instance_id, expected_user_definitions) else: self.report.log("No users to validate for instance: %s" % instance_id) self.assert_configuration_group(instance_id, expected_config_group_id) if self.init_inst_host: self.test_helper.verify_data( expected_data_type, self.init_inst_host) else: self.report.log("No data to validate for instance: %s" % instance_id) def assert_configuration_group(self, instance_id, expected_group_id): instance = self.get_instance(instance_id) if expected_group_id: self.assert_equal(expected_group_id, instance.configuration['id'], "Wrong configuration group attached") else: self.assert_false(hasattr(instance, 'configuration'), "No configuration group expected") def assert_database_list(self, instance_id, expected_databases): expected_names = self._get_names(expected_databases) full_list = self.auth_client.databases.list(instance_id) self.assert_is_none(full_list.next, "Unexpected pagination in the database list.") listed_names = [database.name for database in full_list] self.assert_is_sublist(expected_names, listed_names, "Mismatch in instance databases.") def _get_names(self, definitions): return [item['name'] for item in definitions] def assert_user_list(self, instance_id, expected_users): expected_names = self._get_names(expected_users) full_list = self.auth_client.users.list(instance_id) self.assert_is_none(full_list.next, "Unexpected pagination in the user list.") listed_names = [user.name for user in full_list] self.assert_is_sublist(expected_names, listed_names, "Mismatch in instance users.") # Verify that user definitions include only created databases. all_databases = self._get_names( self.test_helper.get_valid_database_definitions()) for user in expected_users: self.assert_is_sublist( self._get_names(user['databases']), all_databases, "Definition of user '%s' specifies databases not included in " "the list of initial databases." % user['name']) def run_initialized_instance_delete(self, expected_states=['SHUTDOWN'], expected_http_code=202): if self.init_inst_id: self.auth_client.instances.delete(self.init_inst_id) self.assert_client_code(expected_http_code) self.assert_all_gone(self.init_inst_id, expected_states[-1]) else: raise SkipTest("Cleanup is not required.") def run_initial_configuration_delete(self, expected_http_code=202): if self.init_config_group_id: self.auth_client.configurations.delete(self.init_config_group_id) self.assert_client_code(expected_http_code) else: raise SkipTest("Cleanup is not required.") trove-5.0.0/trove/tests/scenario/runners/instance_delete_runners.py0000664000567000056710000000342112701410316027063 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import proboscis from trove.tests.scenario.runners.test_runners import TestRunner class InstanceDeleteRunner(TestRunner): def __init__(self): super(InstanceDeleteRunner, self).__init__() def run_instance_delete( self, expected_states=['SHUTDOWN'], expected_http_code=202): if self.has_do_not_delete_instance: self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was " "specified, skipping delete...") raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE " "was specified.") self.assert_instance_delete(self.instance_info.id, expected_states, expected_http_code) def assert_instance_delete(self, instance_id, expected_states, expected_http_code): self.report.log("Testing delete on instance: %s" % instance_id) self.auth_client.instances.delete(instance_id) self.assert_instance_action(instance_id, expected_states, expected_http_code) self.assert_all_gone(instance_id, expected_states[-1]) trove-5.0.0/trove/tests/scenario/runners/negative_cluster_actions_runners.py0000664000567000056710000001056312701410316031025 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class NegativeClusterActionsRunner(TestRunner): def __init__(self): super(NegativeClusterActionsRunner, self).__init__() def run_create_constrained_size_cluster(self, min_nodes=2, max_nodes=None, expected_http_code=400): self.assert_create_constrained_size_cluster('negative_cluster', min_nodes, max_nodes, expected_http_code) def assert_create_constrained_size_cluster(self, cluster_name, min_nodes, max_nodes, expected_http_code): # Create a cluster with less than 'min_nodes'. if min_nodes: instances_def = [self.build_flavor()] * (min_nodes - 1) self._assert_cluster_create_raises(cluster_name, instances_def, expected_http_code) # Create a cluster with mare than 'max_nodes'. if max_nodes: instances_def = [self.build_flavor()] * (max_nodes + 1) self._assert_cluster_create_raises(cluster_name, instances_def, expected_http_code) def run_create_heterogeneous_cluster(self, expected_http_code=400): # Create a cluster with different node flavors. instances_def = [self.build_flavor(flavor_id=2, volume_size=1), self.build_flavor(flavor_id=3, volume_size=1)] self._assert_cluster_create_raises('heterocluster', instances_def, expected_http_code) # Create a cluster with different volume sizes. instances_def = [self.build_flavor(flavor_id=2, volume_size=1), self.build_flavor(flavor_id=2, volume_size=2)] self._assert_cluster_create_raises('heterocluster', instances_def, expected_http_code) def _assert_cluster_create_raises(self, cluster_name, instances_def, expected_http_code): self.assert_raises(exceptions.BadRequest, expected_http_code, self.auth_client.clusters.create, cluster_name, self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version, instances=instances_def) class MongodbNegativeClusterActionsRunner(NegativeClusterActionsRunner): def run_create_constrained_size_cluster(self): super(NegativeClusterActionsRunner, self).run_create_constrained_size_cluster(min_nodes=3, max_nodes=3) class CassandraNegativeClusterActionsRunner(NegativeClusterActionsRunner): def run_create_constrained_size_cluster(self): raise SkipTest("No constraints apply to the number of cluster nodes.") def run_create_heterogeneous_cluster(self): raise SkipTest("No constraints apply to the size of cluster nodes.") class RedisNegativeClusterActionsRunner(NegativeClusterActionsRunner): def run_create_constrained_size_cluster(self): raise SkipTest("No constraints apply to the number of cluster nodes.") def run_create_heterogeneous_cluster(self): raise SkipTest("No constraints apply to the size of cluster nodes.") class PxcNegativeClusterActionsRunner(NegativeClusterActionsRunner): def run_create_constrained_size_cluster(self): raise SkipTest("No constraints apply to the number of cluster nodes.") trove-5.0.0/trove/tests/scenario/runners/test_runners.py0000664000567000056710000004310612701410316024720 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time as timer from oslo_config.cfg import NoSuchOptError from proboscis import asserts import swiftclient from troveclient.compat import exceptions from trove.common import cfg from trove.common import exception from trove.common import utils from trove.common.utils import poll_until, build_polling_task from trove.tests.api.instances import instance_info from trove.tests.config import CONFIG from trove.tests.util import create_dbaas_client from trove.tests.util.users import Requirements CONF = cfg.CONF class TestRunner(object): """ Base class for all 'Runner' classes. The Runner classes are those that actually do the work. The 'Group' classes are set up with decorators that control how the tests flow, and are used to organized the tests - however they are typically set up to just call a corresponding method in a Runner class. A Runner class can be overridden if a particular set of tests needs to have DataStore specific coding. The corresponding Group class will try to first load a DataStore specific class, and then fall back to the generic one if need be. For example, the NegativeClusterActionsGroup class specifies a runner_base_name of NegativeClusterActionsRunner. If the manager of the default datastore is mongodb, then the MongodbNegativeClusterActionsRunner is used instead. The prefix is created by capitalizing the name of the manager - overriding classes *must* follow this naming convention to be automatically used. The main assumption made here is that if a manager is used for different datastore versions, then the overriding runner should also be valid for the same datastore versions. """ USE_INSTANCE_ID_FLAG = 'TESTS_USE_INSTANCE_ID' DO_NOT_DELETE_INSTANCE_FLAG = 'TESTS_DO_NOT_DELETE_INSTANCE' VOLUME_SUPPORT = CONFIG.get('trove_volume_support', True) EPHEMERAL_SUPPORT = not VOLUME_SUPPORT and CONFIG.get('device_path', None) ROOT_PARTITION = not (VOLUME_SUPPORT or CONFIG.get('device_path', None)) report = CONFIG.get_report() def __init__(self, sleep_time=10, timeout=1200): self.def_sleep_time = sleep_time self.def_timeout = timeout self.instance_info = instance_info instance_info.dbaas_datastore = CONFIG.dbaas_datastore instance_info.dbaas_datastore_version = CONFIG.dbaas_datastore_version if self.VOLUME_SUPPORT: instance_info.volume = {'size': CONFIG.get('trove_volume_size', 1)} else: instance_info.volume = None self.auth_client = create_dbaas_client(self.instance_info.user) self._unauth_client = None self._admin_client = None self._swift_client = None self._test_helper = None @classmethod def fail(cls, message): asserts.fail(message) @classmethod def assert_is_sublist(cls, sub_list, full_list, message=None): return cls.assert_true(set(sub_list).issubset(full_list), message) @classmethod def assert_unique(cls, iterable, message=None): """Assert that a given iterable contains only unique elements. """ cls.assert_equal(len(iterable), len(set(iterable)), message) @classmethod def assert_true(cls, condition, message=None): asserts.assert_true(condition, message=message) @classmethod def assert_false(cls, condition, message=None): asserts.assert_false(condition, message=message) @classmethod def assert_is_none(cls, value, message=None): asserts.assert_is_none(value, message=message) @classmethod def assert_is_not_none(cls, value, message=None): asserts.assert_is_not_none(value, message=message) @classmethod def assert_list_elements_equal(cls, expected, actual, message=None): """Assert that two lists contain same elements (with same multiplicities) ignoring the element order. """ return cls.assert_equal(sorted(expected), sorted(actual), message) @classmethod def assert_equal(cls, expected, actual, message=None): if not message: message = 'Unexpected value' try: message += ": '%s' (expected '%s')." % (actual, expected) except TypeError: pass asserts.assert_equal(expected, actual, message=message) @classmethod def assert_not_equal(cls, expected, actual, message=None): if not message: message = 'Expected different value than' try: message += ": '%s'." % expected except TypeError: pass asserts.assert_not_equal(expected, actual, message=message) @property def test_helper(self): return self._test_helper @test_helper.setter def test_helper(self, test_helper): self._test_helper = test_helper @property def unauth_client(self): if not self._unauth_client: self._unauth_client = self._create_unauthorized_client() return self._unauth_client def _create_unauthorized_client(self): """Create a client from a different 'unauthorized' user to facilitate negative testing. """ requirements = Requirements(is_admin=False) other_user = CONFIG.users.find_user( requirements, black_list=[self.instance_info.user.auth_user]) return create_dbaas_client(other_user) @property def admin_client(self): if not self._admin_client: self._admin_client = self._create_admin_client() return self._admin_client def _create_admin_client(self): """Create a client from an admin user.""" requirements = Requirements(is_admin=True, services=["swift"]) admin_user = CONFIG.users.find_user(requirements) return create_dbaas_client(admin_user) @property def swift_client(self): if not self._swift_client: self._swift_client = self._create_swift_client() return self._swift_client def _create_swift_client(self): """Create a swift client from the admin user details.""" requirements = Requirements(is_admin=True, services=["swift"]) user = CONFIG.users.find_user(requirements) os_options = {'region_name': CONFIG.trove_client_region_name} return swiftclient.client.Connection( authurl=CONFIG.nova_client['auth_url'], user=user.auth_user, key=user.auth_key, tenant_name=user.tenant, auth_version='2.0', os_options=os_options) def get_client_tenant(self, client): tenant_name = client.real_client.client.tenant service_url = client.real_client.client.service_url su_parts = service_url.split('/') tenant_id = su_parts[-1] return tenant_name, tenant_id def assert_raises(self, expected_exception, expected_http_code, client_cmd, *cmd_args, **cmd_kwargs): asserts.assert_raises(expected_exception, client_cmd, *cmd_args, **cmd_kwargs) self.assert_client_code(expected_http_code) def get_datastore_config_property(self, name, datastore=None): """Get a Trove configuration property for a given datastore. Use the current instance's datastore if None. """ try: datastore = datastore or self.instance_info.dbaas_datastore return CONF.get(datastore).get(name) except NoSuchOptError: return CONF.get(name) @property def is_using_existing_instance(self): return self.has_env_flag(self.USE_INSTANCE_ID_FLAG) @staticmethod def has_env_flag(flag_name): """Return whether a given flag was set.""" return os.environ.get(flag_name, None) is not None def get_existing_instance(self): if self.is_using_existing_instance: instance_id = os.environ.get(self.USE_INSTANCE_ID_FLAG) return self.get_instance(instance_id) return None @property def has_do_not_delete_instance(self): return self.has_env_flag(self.DO_NOT_DELETE_INSTANCE_FLAG) def assert_instance_action( self, instance_ids, expected_states, expected_http_code): self.assert_client_code(expected_http_code) if expected_states: self.assert_all_instance_states( instance_ids if utils.is_collection(instance_ids) else [instance_ids], expected_states) def assert_client_code(self, expected_http_code, client=None): if expected_http_code is not None: client = client or self.auth_client self.assert_equal(expected_http_code, client.last_http_code, "Unexpected client status code") def assert_all_instance_states(self, instance_ids, expected_states): tasks = [build_polling_task( lambda: self._assert_instance_states(instance_id, expected_states), sleep_time=self.def_sleep_time, time_out=self.def_timeout) for instance_id in instance_ids] poll_until(lambda: all(poll_task.ready() for poll_task in tasks), sleep_time=self.def_sleep_time, time_out=self.def_timeout) for task in tasks: if task.has_result(): self.assert_true( task.poll_result(), "Some instances failed to acquire all expected states.") elif task.has_exception(): self.fail(str(task.poll_exception())) def _assert_instance_states(self, instance_id, expected_states, fast_fail_status=['ERROR', 'FAILED'], require_all_states=False): """Keep polling for the expected instance states until the instance acquires either the last or fast-fail state. If the instance state does not match the state expected at the time of polling (and 'require_all_states' is not set) the code assumes the instance had already acquired before and moves to the next expected state. """ found = False for status in expected_states: if require_all_states or found or self._has_status( instance_id, status, fast_fail_status=fast_fail_status): found = True start_time = timer.time() try: poll_until(lambda: self._has_status( instance_id, status, fast_fail_status=fast_fail_status), sleep_time=self.def_sleep_time, time_out=self.def_timeout) self.report.log("Instance has gone '%s' in %s." % (status, self._time_since(start_time))) except exception.PollTimeOut: self.report.log( "Status of instance '%s' did not change to '%s' " "after %s." % (instance_id, status, self._time_since(start_time))) return False else: self.report.log( "Instance state was not '%s', moving to the next expected " "state." % status) return found def _time_since(self, start_time): return '%.1fs' % (timer.time() - start_time) def assert_all_gone(self, instance_ids, expected_last_status): self._wait_all_deleted(instance_ids if utils.is_collection(instance_ids) else [instance_ids], expected_last_status) def assert_pagination_match( self, list_page, full_list, start_idx, end_idx): self.assert_equal(full_list[start_idx:end_idx], list(list_page), "List page does not match the expected full " "list section.") def _wait_all_deleted(self, instance_ids, expected_last_status): tasks = [build_polling_task( lambda: self._wait_for_delete(instance_id, expected_last_status), sleep_time=self.def_sleep_time, time_out=self.def_timeout) for instance_id in instance_ids] poll_until(lambda: all(poll_task.ready() for poll_task in tasks), sleep_time=self.def_sleep_time, time_out=self.def_timeout) for task in tasks: if task.has_result(): self.assert_true( task.poll_result(), "Some instances were not removed.") elif task.has_exception(): self.fail(str(task.poll_exception())) def _wait_for_delete(self, instance_id, expected_last_status): start_time = timer.time() try: self._poll_while(instance_id, expected_last_status, sleep_time=self.def_sleep_time, time_out=self.def_timeout) except exceptions.NotFound: self.assert_client_code(404) self.report.log("Instance was removed in %s." % self._time_since(start_time)) return True except exception.PollTimeOut: self.report.log( "Instance '%s' still existed after %s." % (instance_id, self._time_since(start_time))) return False def _poll_while(self, instance_id, expected_status, sleep_time=1, time_out=None): poll_until(lambda: not self._has_status(instance_id, expected_status), sleep_time=sleep_time, time_out=time_out) def _has_status(self, instance_id, status, fast_fail_status=None): fast_fail_status = fast_fail_status or [] instance = self.get_instance(instance_id) self.report.log("Polling instance '%s' for state '%s', was '%s'." % (instance_id, status, instance.status)) if instance.status in fast_fail_status: raise RuntimeError("Instance '%s' acquired a fast-fail status: %s" % (instance_id, instance.status)) return instance.status == status def get_instance(self, instance_id): return self.auth_client.instances.get(instance_id) def get_instance_host(self, instance_id=None): instance_id = instance_id or self.instance_info.id instance = self.get_instance(instance_id) host = str(instance._info['ip'][0]) self.report.log("Found host %s for instance %s." % (host, instance_id)) return host def build_flavor(self, flavor_id=2, volume_size=1): return {"flavorRef": flavor_id, "volume": {"size": volume_size}} def get_flavor(self, flavor_name): flavors = self.auth_client.find_flavors_by_name(flavor_name) self.assert_equal( 1, len(flavors), "Unexpected number of flavors with name '%s' found." % flavor_name) flavor = flavors[0] self.assert_is_not_none(flavor, "Flavor '%s' not found." % flavor_name) return flavor def copy_dict(self, d, ignored_keys=None): return {k: v for k, v in d.items() if not ignored_keys or k not in ignored_keys} def create_test_helper_on_instance(self, instance_id): """Here we add a helper user/database, if any, to a given instance via the Trove API. These are for internal use by the test framework and should not be changed by individual test-cases. """ database_def, user_def, root_def = self.build_helper_defs() if database_def: self.report.log( "Creating a helper database '%s' on instance: %s" % (database_def['name'], instance_id)) self.auth_client.databases.create(instance_id, [database_def]) if user_def: self.report.log( "Creating a helper user '%s:%s' on instance: %s" % (user_def['name'], user_def['password'], instance_id)) self.auth_client.users.create(instance_id, [user_def]) if root_def: # Not enabling root on a single instance of the cluster here # because we want to test the cluster root enable instead. pass def build_helper_defs(self): """Build helper database and user JSON definitions if credentials are defined by the helper. """ database_def = None def _get_credentials(creds): if creds: username = creds.get('name') if username: password = creds.get('password', '') return {'name': username, 'password': password, 'databases': [{'name': database}]} return None credentials = self.test_helper.get_helper_credentials() if credentials: database = credentials.get('database') if database: database_def = {'name': database} credentials_root = self.test_helper.get_helper_credentials_root() return (database_def, _get_credentials(credentials), _get_credentials(credentials_root)) trove-5.0.0/trove/tests/scenario/runners/instance_actions_runners.py0000664000567000056710000001031112701410316027255 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.config import CONFIG from trove.tests.scenario.runners.test_runners import TestRunner class InstanceActionsRunner(TestRunner): def __init__(self): super(InstanceActionsRunner, self).__init__() def _get_resize_flavor(self): if self.EPHEMERAL_SUPPORT: flavor_name = CONFIG.values.get( 'instance_bigger_eph_flavor_name', 'eph.rd-smaller') else: flavor_name = CONFIG.values.get( 'instance_bigger_flavor_name', 'm1.rd-smaller') return self.get_flavor(flavor_name) def run_instance_restart( self, expected_states=['REBOOT', 'ACTIVE'], expected_http_code=202): self.assert_instance_restart(self.instance_info.id, expected_states, expected_http_code) def assert_instance_restart(self, instance_id, expected_states, expected_http_code): self.report.log("Testing restart on instance: %s" % instance_id) self.auth_client.instances.restart(instance_id) self.assert_instance_action(instance_id, expected_states, expected_http_code) def run_instance_resize_volume( self, resize_amount=1, expected_states=['RESIZE', 'ACTIVE'], expected_http_code=202): if self.VOLUME_SUPPORT: self.assert_instance_resize_volume(self.instance_info.id, resize_amount, expected_states, expected_http_code) else: raise SkipTest("Volume support is disabled.") def assert_instance_resize_volume(self, instance_id, resize_amount, expected_states, expected_http_code): self.report.log("Testing volume resize by '%d' on instance: %s" % (resize_amount, instance_id)) instance = self.get_instance(instance_id) old_volume_size = int(instance.volume['size']) new_volume_size = old_volume_size + resize_amount self.auth_client.instances.resize_volume(instance_id, new_volume_size) self.assert_instance_action(instance_id, expected_states, expected_http_code) instance = self.get_instance(instance_id) self.assert_equal(instance.volume['size'], new_volume_size, 'Unexpected new volume size') def run_instance_resize_flavor( self, expected_states=['RESIZE', 'ACTIVE'], expected_http_code=202): resize_flavor = self._get_resize_flavor() self.assert_instance_resize_flavor(self.instance_info.id, resize_flavor, expected_states, expected_http_code) def assert_instance_resize_flavor(self, instance_id, resize_flavor, expected_states, expected_http_code): self.report.log("Testing resize to '%s' on instance: %s" % (resize_flavor, instance_id)) self.auth_client.instances.resize_instance(instance_id, resize_flavor.id) self.assert_instance_action(instance_id, expected_states, expected_http_code) instance = self.get_instance(instance_id) self.assert_equal(instance.flavor['id'], resize_flavor.id, 'Unexpected resize flavor_id') trove-5.0.0/trove/tests/scenario/runners/backup_runners.py0000664000567000056710000003457312701410316025216 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from troveclient.compat import exceptions from trove.common.utils import generate_uuid from trove.common.utils import poll_until from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import TestRunner class BackupRunner(TestRunner): def __init__(self): self.TIMEOUT_BACKUP_CREATE = 60 * 30 self.TIMEOUT_BACKUP_DELETE = 120 super(BackupRunner, self).__init__(sleep_time=20, timeout=self.TIMEOUT_BACKUP_CREATE) self.BACKUP_NAME = 'backup_test' self.BACKUP_DESC = 'test description' self.backup_host = None self.backup_info = None self.backup_count_prior_to_create = 0 self.backup_count_for_ds_prior_to_create = 0 self.backup_count_for_instance_prior_to_create = 0 self.incremental_backup_info = None self.restore_instance_id = 0 self.restore_host = None def run_backup_create_instance_invalid( self, expected_exception=exceptions.BadRequest, expected_http_code=400): invalid_inst_id = 'invalid-inst-id' self.assert_raises( expected_exception, expected_http_code, self.auth_client.backups.create, self.BACKUP_NAME, invalid_inst_id, self.BACKUP_DESC) def run_backup_create_instance_not_found( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, expected_http_code, self.auth_client.backups.create, self.BACKUP_NAME, generate_uuid(), self.BACKUP_DESC) def run_add_data_for_backup(self): self.backup_host = self.get_instance_host() self.assert_add_data_for_backup(self.backup_host) def assert_add_data_for_backup(self, host): """In order for this to work, the corresponding datastore 'helper' class should implement the 'add_large_data' method. """ self.test_helper.add_data(DataType.large, host) def run_verify_data_for_backup(self): self.assert_verify_backup_data(self.backup_host) def assert_verify_backup_data(self, host): """In order for this to work, the corresponding datastore 'helper' class should implement the 'verify_large_data' method. """ self.test_helper.verify_data(DataType.large, host) def run_backup_create(self): self.assert_backup_create() def assert_backup_create(self): # Necessary to test that the count increases. self.backup_count_prior_to_create = len( self.auth_client.backups.list()) self.backup_count_for_ds_prior_to_create = len( self.auth_client.backups.list( datastore=self.instance_info.dbaas_datastore)) self.backup_count_for_instance_prior_to_create = len( self.auth_client.instances.backups(self.instance_info.id)) result = self.auth_client.backups.create( self.BACKUP_NAME, self.instance_info.id, self.BACKUP_DESC) self.backup_info = result self.assert_equal(self.BACKUP_NAME, result.name, 'Unexpected backup name') self.assert_equal(self.BACKUP_DESC, result.description, 'Unexpected backup description') self.assert_equal(self.instance_info.id, result.instance_id, 'Unexpected instance ID for backup') self.assert_equal('NEW', result.status, 'Unexpected status for backup') instance = self.auth_client.instances.get( self.instance_info.id) datastore_version = self.auth_client.datastore_versions.get( self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version) self.assert_equal('BACKUP', instance.status, 'Unexpected instance status') self.assert_equal(self.instance_info.dbaas_datastore, result.datastore['type'], 'Unexpected datastore') self.assert_equal(self.instance_info.dbaas_datastore_version, result.datastore['version'], 'Unexpected datastore version') self.assert_equal(datastore_version.id, result.datastore['version_id'], 'Unexpected datastore version id') def run_restore_instance_from_not_completed_backup( self, expected_exception=exceptions.Conflict, expected_http_code=409): self.assert_raises( expected_exception, expected_http_code, self._restore_from_backup, self.backup_info.id) def run_instance_action_right_after_backup_create( self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): self.assert_raises(expected_exception, expected_http_code, self.auth_client.instances.resize_instance, self.instance_info.id, 1) def run_backup_create_another_backup_running( self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): self.assert_raises(expected_exception, expected_http_code, self.auth_client.backups.create, 'backup_test2', self.instance_info.id, 'test description2') def run_backup_delete_while_backup_running( self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): result = self.auth_client.backups.list() backup = result[0] self.assert_raises(expected_exception, expected_http_code, self.auth_client.backups.delete, backup.id) def run_backup_create_completed(self): self._verify_backup(self.backup_info.id) def _verify_backup(self, backup_id): def _result_is_active(): backup = self.auth_client.backups.get(backup_id) if backup.status == 'COMPLETED': return True else: self.assert_not_equal('FAILED', backup.status, 'Backup status should not be') return False poll_until(_result_is_active, time_out=self.TIMEOUT_BACKUP_CREATE) def run_backup_list(self): backup_list = self.auth_client.backups.list() self.assert_backup_list( backup_list, self.backup_count_prior_to_create + 1) def assert_backup_list(self, backup_list, expected_count): self.assert_equal(expected_count, len(backup_list), 'Unexpected number of backups found') if expected_count: backup = backup_list[0] self.assert_equal(self.BACKUP_NAME, backup.name, 'Unexpected backup name') self.assert_equal(self.BACKUP_DESC, backup.description, 'Unexpected backup description') self.assert_not_equal(0.0, backup.size, 'Unexpected backup size') self.assert_equal(self.instance_info.id, backup.instance_id, 'Unexpected instance id') self.assert_equal('COMPLETED', backup.status, 'Unexpected backup status') def run_backup_list_filter_datastore(self): backup_list = self.auth_client.backups.list( datastore=self.instance_info.dbaas_datastore) self.assert_backup_list( backup_list, self.backup_count_for_ds_prior_to_create + 1) def run_backup_list_filter_different_datastore(self): backup_list = self.auth_client.backups.list( datastore='Test_Datastore_1') self.assert_backup_list(backup_list, 0) def run_backup_list_filter_datastore_not_found( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, expected_http_code, self.auth_client.backups.list, datastore='NOT_FOUND') def run_backup_list_for_instance(self): backup_list = self.auth_client.instances.backups( self.instance_info.id) self.assert_backup_list( backup_list, self.backup_count_for_instance_prior_to_create + 1) def run_backup_get(self): backup = self.auth_client.backups.get(self.backup_info.id) self.assert_backup_list([backup], 1) self.assert_equal(self.instance_info.dbaas_datastore, backup.datastore['type'], 'Unexpected datastore type') self.assert_equal(self.instance_info.dbaas_datastore_version, backup.datastore['version'], 'Unexpected datastore version') datastore_version = self.auth_client.datastore_versions.get( self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version) self.assert_equal(datastore_version.id, backup.datastore['version_id']) def run_backup_get_unauthorized_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, None, self.unauth_client.backups.get, self.backup_info.id) # we're using a different client, so we'll check the return code # on it explicitly, instead of depending on 'assert_raises' self.assert_client_code(expected_http_code=expected_http_code, client=self.unauth_client) def run_restore_from_backup(self): self.assert_restore_from_backup(self.backup_info.id) def assert_restore_from_backup(self, backup_ref): result = self._restore_from_backup(backup_ref) # TODO(peterstac) - This should probably return code 202 self.assert_client_code(200) self.assert_equal('BUILD', result.status, 'Unexpected instance status') self.restore_instance_id = result.id def _restore_from_backup(self, backup_ref): restore_point = {'backupRef': backup_ref} result = self.auth_client.instances.create( self.instance_info.name + '_restore', self.instance_info.dbaas_flavor_href, self.instance_info.volume, nics=self.instance_info.nics, restorePoint=restore_point, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) return result def run_restore_from_backup_completed( self, expected_states=['BUILD', 'ACTIVE'], # TODO(peterstac) - This should probably return code 202 expected_http_code=200): self.assert_restore_from_backup_completed( self.restore_instance_id, expected_states, expected_http_code) self.restore_host = self.get_instance_host(self.restore_instance_id) def assert_restore_from_backup_completed( self, instance_id, expected_states, expected_http_code): self.assert_instance_action(instance_id, expected_states, expected_http_code) def run_verify_data_in_restored_instance(self): self.assert_verify_backup_data(self.restore_host) def run_delete_restored_instance( self, expected_states=['SHUTDOWN'], expected_http_code=202): self.assert_delete_restored_instance( self.restore_instance_id, expected_states, expected_http_code) def assert_delete_restored_instance( self, instance_id, expected_states, expected_http_code): self.auth_client.instances.delete(instance_id) self.assert_instance_action(instance_id, expected_states, expected_http_code) self.assert_all_gone(instance_id, expected_states[-1]) def run_delete_unknown_backup( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, expected_http_code, self.auth_client.backups.delete, 'unknown_backup') def run_delete_backup_unauthorized_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises( expected_exception, None, self.unauth_client.backups.delete, self.backup_info.id) # we're using a different client, so we'll check the return code # on it explicitly, instead of depending on 'assert_raises' self.assert_client_code(expected_http_code=expected_http_code, client=self.unauth_client) def run_delete_backup(self, expected_http_code=202): self.assert_delete_backup(self.backup_info.id, expected_http_code) def assert_delete_backup( self, backup_id, expected_http_code): self.auth_client.backups.delete(backup_id) self.assert_client_code(expected_http_code) self._wait_until_backup_is_gone(backup_id) def _wait_until_backup_is_gone(self, backup_id): def _backup_is_gone(): try: self.auth_client.backups.get(backup_id) return False except exceptions.NotFound: return True poll_until(_backup_is_gone, time_out=self.TIMEOUT_BACKUP_DELETE) def run_check_for_incremental_backup( self, expected_exception=exceptions.NotFound, expected_http_code=404): if self.incremental_backup_info is None: raise SkipTest("Incremental Backup not created") self.assert_raises( expected_exception, expected_http_code, self.auth_client.backups.get, self.incremental_backup_info.id) trove-5.0.0/trove/tests/scenario/runners/guest_log_runners.py0000664000567000056710000007306512701410320025733 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from swiftclient.client import ClientException import tempfile from troveclient.compat import exceptions from trove.common import cfg from trove.guestagent.common import operating_system from trove.guestagent import guest_log from trove.tests.config import CONFIG from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import TestRunner CONF = cfg.CONF class GuestLogRunner(TestRunner): def __init__(self): super(GuestLogRunner, self).__init__() self.container = CONF.guest_log_container_name self.prefix_pattern = '%(instance_id)s/%(datastore)s-%(log)s/' self._last_log_published = {} self._last_log_contents = {} def _get_last_log_published(self, log_name): return self._last_log_published.get(log_name, None) def _set_last_log_published(self, log_name, published): self._last_log_published[log_name] = published def _get_last_log_contents(self, log_name): return self._last_log_contents.get(log_name, []) def _set_last_log_contents(self, log_name, published): self._last_log_contents[log_name] = published def _get_exposed_user_log_names(self): """Returns the full list of exposed user logs.""" return self.test_helper.get_exposed_user_log_names() def _get_exposed_user_log_name(self): """Return the first exposed user log name.""" return self.test_helper.get_exposed_user_log_names()[0] def _get_unexposed_sys_log_name(self): """Return the first unexposed sys log name.""" return self.test_helper.get_unexposed_sys_log_names()[0] def run_test_log_list(self): self.assert_log_list(self.auth_client, self.test_helper.get_exposed_log_list()) def assert_log_list(self, client, expected_list): log_list = list(client.instances.log_list(self.instance_info.id)) log_names = list(ll.name for ll in log_list) self.assert_list_elements_equal(expected_list, log_names) def run_test_admin_log_list(self): self.assert_log_list(self.admin_client, self.test_helper.get_full_log_list()) def run_test_log_show(self): log_pending = self._set_zero_or_none() self.assert_log_show(self.auth_client, self._get_exposed_user_log_name(), expected_published=0, expected_pending=log_pending) def _set_zero_or_none(self): """This attempts to handle the case where an existing instance is used. Values that would normally be '0' are not, and must be ignored. """ value = 0 if self.is_using_existing_instance: value = None return value def assert_log_show(self, client, log_name, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): self.report.log("Executing log_show for log '%s'" % log_name) log_details = client.instances.log_show( self.instance_info.id, log_name) self.assert_client_code(expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending) def assert_log_details(self, log_details, expected_log_name, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): """Check that the action generates the proper response data. For log_published and log_pending, setting the value to 'None' will skip that check (useful when using an existing instance, as there may be pending things in user logs right from the get-go) and setting it to a value other than '0' will verify that the actual value is '>=value' (since it's impossible to know what the actual value will be at any given time). '0' will still match exclusively. """ self.report.log("Validating log details for log '%s'" % expected_log_name) self._set_last_log_published(expected_log_name, log_details.published) self.assert_equal(expected_log_name, log_details.name, "Wrong log name for '%s' log" % expected_log_name) self.assert_equal(expected_type, log_details.type, "Wrong log type for '%s' log" % expected_log_name) current_status = log_details.status.replace(' ', '_') self.assert_equal(expected_status, current_status, "Wrong log status for '%s' log" % expected_log_name) if expected_published is None: pass elif expected_published == 0: self.assert_equal(0, log_details.published, "Wrong log published for '%s' log" % expected_log_name) else: self.assert_true(log_details.published >= expected_published, "Missing log published for '%s' log: " "expected %d, got %d" % (expected_log_name, expected_published, log_details.published)) if expected_pending is None: pass elif expected_pending == 0: self.assert_equal(0, log_details.pending, "Wrong log pending for '%s' log" % expected_log_name) else: self.assert_true(log_details.pending >= expected_pending, "Missing log pending for '%s' log: " "expected %d, got %d" % (expected_log_name, expected_pending, log_details.pending)) container = self.container prefix = self.prefix_pattern % { 'instance_id': self.instance_info.id, 'datastore': CONFIG.dbaas_datastore, 'log': expected_log_name} metafile = prefix.rstrip('/') + '_metafile' if expected_published == 0: self.assert_storage_gone(container, prefix, metafile) container = 'None' prefix = 'None' else: self.assert_storage_exists(container, prefix, metafile) self.assert_equal(container, log_details.container, "Wrong log container for '%s' log" % expected_log_name) self.assert_equal(prefix, log_details.prefix, "Wrong log prefix for '%s' log" % expected_log_name) self.assert_equal(metafile, log_details.metafile, "Wrong log metafile for '%s' log" % expected_log_name) def assert_log_enable(self, client, log_name, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): self.report.log("Executing log_enable for log '%s'" % log_name) log_details = client.instances.log_enable( self.instance_info.id, log_name) self.assert_client_code(expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending) def assert_log_disable(self, client, log_name, discard=None, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): self.report.log("Executing log_disable for log '%s' (discard: %s)" % (log_name, discard)) log_details = client.instances.log_disable( self.instance_info.id, log_name, discard=discard) self.assert_client_code(expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending) def assert_log_publish(self, client, log_name, disable=None, discard=None, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): self.report.log("Executing log_publish for log '%s' (disable: %s " "discard: %s)" % (log_name, disable, discard)) log_details = client.instances.log_publish( self.instance_info.id, log_name, disable=disable, discard=discard) self.assert_client_code(expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending) def assert_log_discard(self, client, log_name, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): self.report.log("Executing log_discard for log '%s'" % log_name) log_details = client.instances.log_discard( self.instance_info.id, log_name) self.assert_client_code(expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending) def assert_storage_gone(self, container, prefix, metafile): try: headers, container_files = self.swift_client.get_container( container, prefix=prefix) self.assert_equal(0, len(container_files), "Found files in %s/%s: %s" % (container, prefix, container_files)) except ClientException as ex: if ex.http_status == 404: self.report.log("Container '%s' does not exist" % container) pass else: raise try: self.swift_client.get_object(container, metafile) self.fail("Found metafile after discard: %s" % metafile) except ClientException as ex: if ex.http_status == 404: self.report.log("Metafile '%s' gone as expected" % metafile) pass else: raise def assert_storage_exists(self, container, prefix, metafile): try: headers, container_files = self.swift_client.get_container( container, prefix=prefix) self.assert_true(len(container_files) > 0, "No files found in %s/%s" % (container, prefix)) except ClientException as ex: if ex.http_status == 404: self.fail("Container '%s' does not exist" % container) else: raise try: self.swift_client.get_object(container, metafile) except ClientException as ex: if ex.http_status == 404: self.fail("Missing metafile: %s" % metafile) else: raise def run_test_log_enable_sys(self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_log_enable_fails( self.admin_client, expected_exception, expected_http_code, self._get_unexposed_sys_log_name()) def assert_log_enable_fails(self, client, expected_exception, expected_http_code, log_name): self.assert_raises(expected_exception, None, client.instances.log_enable, self.instance_info.id, log_name) # we may not be using the main client, so check explicitly here self.assert_client_code(expected_http_code, client) def run_test_log_disable_sys(self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_log_disable_fails( self.admin_client, expected_exception, expected_http_code, self._get_unexposed_sys_log_name()) def assert_log_disable_fails(self, client, expected_exception, expected_http_code, log_name, discard=None): self.assert_raises(expected_exception, None, client.instances.log_disable, self.instance_info.id, log_name, discard=discard) # we may not be using the main client, so check explicitly here self.assert_client_code(expected_http_code, client) def run_test_log_show_unauth_user(self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_log_show_fails( self.unauth_client, expected_exception, expected_http_code, self._get_exposed_user_log_name()) def assert_log_show_fails(self, client, expected_exception, expected_http_code, log_name): self.assert_raises(expected_exception, None, client.instances.log_show, self.instance_info.id, log_name) # we may not be using the main client, so check explicitly here self.assert_client_code(expected_http_code, client) def run_test_log_list_unauth_user(self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_raises(expected_exception, None, self.unauth_client.instances.log_list, self.instance_info.id) # we're not using the main client, so check explicitly here self.assert_client_code(expected_http_code, self.unauth_client) def run_test_log_generator_unauth_user(self): self.assert_log_generator_unauth_user( self.unauth_client, self._get_exposed_user_log_name()) def assert_log_generator_unauth_user(self, client, log_name, publish=None): try: client.instances.log_generator( self.instance_info.id, log_name, publish=publish) raise("Client allowed unauthorized access to log_generator") except Exception: pass def run_test_log_generator_publish_unauth_user(self): self.assert_log_generator_unauth_user( self.unauth_client, self._get_exposed_user_log_name(), publish=True) def run_test_log_show_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_log_show_fails( self.auth_client, expected_exception, expected_http_code, self._get_unexposed_sys_log_name()) def run_test_log_enable_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_log_enable_fails( self.auth_client, expected_exception, expected_http_code, self._get_unexposed_sys_log_name()) def run_test_log_disable_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_log_disable_fails( self.auth_client, expected_exception, expected_http_code, self._get_unexposed_sys_log_name()) def run_test_log_publish_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_log_publish_fails( self.auth_client, expected_exception, expected_http_code, self._get_unexposed_sys_log_name()) def assert_log_publish_fails(self, client, expected_exception, expected_http_code, log_name, disable=None, discard=None): self.assert_raises(expected_exception, None, client.instances.log_publish, self.instance_info.id, log_name, disable=disable, discard=discard) # we may not be using the main client, so check explicitly here self.assert_client_code(expected_http_code, client) def run_test_log_discard_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_log_discard_fails( self.auth_client, expected_exception, expected_http_code, self._get_unexposed_sys_log_name()) def assert_log_discard_fails(self, client, expected_exception, expected_http_code, log_name): self.assert_raises(expected_exception, None, client.instances.log_discard, self.instance_info.id, log_name) # we may not be using the main client, so check explicitly here self.assert_client_code(expected_http_code, client) def run_test_log_enable_user(self): expected_status = guest_log.LogStatus.Ready.name expected_pending = 1 if self.test_helper.log_enable_requires_restart(): expected_status = guest_log.LogStatus.Restart_Required.name # if using an existing instance, there may already be something expected_pending = self._set_zero_or_none() for log_name in self._get_exposed_user_log_names(): self.assert_log_enable( self.auth_client, log_name, expected_status=expected_status, expected_published=0, expected_pending=expected_pending) def run_test_log_enable_flip_user(self): # for restart required datastores, test that flipping them # back to disabled returns the status to 'Disabled' # from 'Restart_Required' if self.test_helper.log_enable_requires_restart(): # if using an existing instance, there may already be something expected_pending = self._set_zero_or_none() for log_name in self._get_exposed_user_log_names(): self.assert_log_disable( self.auth_client, log_name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=0, expected_pending=expected_pending) self.assert_log_enable( self.auth_client, log_name, expected_status=guest_log.LogStatus.Restart_Required.name, expected_published=0, expected_pending=expected_pending) def run_test_restart_datastore(self, expected_http_code=202): if self.test_helper.log_enable_requires_restart(): instance_id = self.instance_info.id # we need to wait until the heartbeat flips the instance # back into 'ACTIVE' before we issue the restart command expected_states = ['RESTART_REQUIRED', 'ACTIVE'] self.assert_instance_action(instance_id, expected_states, None) self.auth_client.instances.restart(instance_id) self.assert_client_code(expected_http_code) def run_test_wait_for_restart(self, expected_states=['REBOOT', 'ACTIVE']): if self.test_helper.log_enable_requires_restart(): self.assert_instance_action(self.instance_info.id, expected_states, None) def run_test_log_publish_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_publish( self.auth_client, log_name, expected_status=guest_log.LogStatus.Published.name, expected_published=1, expected_pending=0) def run_test_add_data(self): self.test_helper.add_data(DataType.micro, self.get_instance_host()) def run_test_verify_data(self): self.test_helper.verify_data(DataType.micro, self.get_instance_host()) def run_test_log_publish_again_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_publish( self.admin_client, log_name, expected_status=guest_log.LogStatus.Published.name, expected_published=self._get_last_log_published(log_name), expected_pending=0) def run_test_log_generator_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_generator( self.auth_client, log_name, lines=2, expected_lines=2) def assert_log_generator(self, client, log_name, publish=False, lines=4, expected_lines=None, swift_client=None): self.report.log("Executing log_generator for log '%s' (publish: %s)" % (log_name, publish)) log_gen = client.instances.log_generator( self.instance_info.id, log_name, publish=publish, lines=lines, swift=swift_client) log_contents = "".join([chunk for chunk in log_gen()]) self.report.log("Returned %d lines for log '%s': %s" % ( len(log_contents.splitlines()), log_name, log_contents)) self._set_last_log_contents(log_name, log_contents) if expected_lines: self.assert_equal(expected_lines, len(log_contents.splitlines()), "Wrong line count for '%s' log" % log_name) else: self.assert_true(len(log_contents.splitlines()) <= lines, "More than %d lines found for '%s' log" % (lines, log_name)) def run_test_log_generator_publish_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_generator( self.auth_client, log_name, publish=True, lines=3, expected_lines=3) def run_test_log_generator_swift_client_user(self): swift_client = self.swift_client for log_name in self._get_exposed_user_log_names(): self.assert_log_generator( self.auth_client, log_name, publish=True, lines=3, expected_lines=3, swift_client=swift_client) def run_test_add_data_again(self): # Add some more data so we have at least 3 log data files self.test_helper.add_data(DataType.micro2, self.get_instance_host()) def run_test_verify_data_again(self): self.test_helper.verify_data(DataType.micro2, self.get_instance_host()) def run_test_log_generator_user_by_row(self): log_name = self._get_exposed_user_log_name() self.assert_log_publish( self.auth_client, log_name, expected_status=guest_log.LogStatus.Published.name, expected_published=self._get_last_log_published(log_name), expected_pending=0) # Now get the full contents of the log self.assert_log_generator(self.auth_client, log_name, lines=100000) log_lines = len(self._get_last_log_contents(log_name).splitlines()) # Make sure we get the right number of log lines back each time for lines in range(1, log_lines): self.assert_log_generator( self.auth_client, log_name, lines=lines, expected_lines=lines) def run_test_log_save_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_test_log_save(self.auth_client, log_name) def run_test_log_save_publish_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_test_log_save(self.auth_client, log_name, publish=True) def assert_test_log_save(self, client, log_name, publish=False): # generate the file self.report.log("Executing log_save for log '%s' (publish: %s)" % (log_name, publish)) with tempfile.NamedTemporaryFile() as temp_file: client.instances.log_save(self.instance_info.id, log_name=log_name, publish=publish, filename=temp_file.name) file_contents = operating_system.read_file(temp_file.name) # now grab the contents ourselves self.assert_log_generator(client, log_name, lines=100000) # and compare them self.assert_equal(self._get_last_log_contents(log_name), file_contents) def run_test_log_discard_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_discard( self.auth_client, log_name, expected_status=guest_log.LogStatus.Ready.name, expected_published=0, expected_pending=1) def run_test_log_disable_user(self): expected_status = guest_log.LogStatus.Disabled.name if self.test_helper.log_enable_requires_restart(): expected_status = guest_log.LogStatus.Restart_Required.name for log_name in self._get_exposed_user_log_names(): self.assert_log_disable( self.auth_client, log_name, expected_status=expected_status, expected_published=0, expected_pending=1) def run_test_log_show_sys(self): self.assert_log_show( self.admin_client, self._get_unexposed_sys_log_name(), expected_type=guest_log.LogType.SYS.name, expected_status=guest_log.LogStatus.Ready.name, expected_published=0, expected_pending=1) def run_test_log_publish_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_publish( self.admin_client, log_name, expected_type=guest_log.LogType.SYS.name, expected_status=guest_log.LogStatus.Partial.name, expected_published=1, expected_pending=1) def run_test_log_publish_again_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_publish( self.admin_client, log_name, expected_type=guest_log.LogType.SYS.name, expected_status=guest_log.LogStatus.Partial.name, expected_published=self._get_last_log_published(log_name) + 1, expected_pending=1) def run_test_log_generator_sys(self): self.assert_log_generator( self.admin_client, self._get_unexposed_sys_log_name(), lines=4, expected_lines=4) def run_test_log_generator_publish_sys(self): self.assert_log_generator( self.admin_client, self._get_unexposed_sys_log_name(), publish=True, lines=4, expected_lines=4) def run_test_log_generator_swift_client_sys(self): self.assert_log_generator( self.admin_client, self._get_unexposed_sys_log_name(), publish=True, lines=4, expected_lines=4, swift_client=self.swift_client) def run_test_log_save_sys(self): self.assert_test_log_save( self.admin_client, self._get_unexposed_sys_log_name()) def run_test_log_save_publish_sys(self): self.assert_test_log_save( self.admin_client, self._get_unexposed_sys_log_name(), publish=True) def run_test_log_discard_sys(self): self.assert_log_discard( self.admin_client, self._get_unexposed_sys_log_name(), expected_type=guest_log.LogType.SYS.name, expected_status=guest_log.LogStatus.Ready.name, expected_published=0, expected_pending=1) trove-5.0.0/trove/tests/scenario/runners/root_actions_runners.py0000664000567000056710000001606112701410316026444 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class RootActionsRunner(TestRunner): def __init__(self): self.current_root_creds = None super(RootActionsRunner, self).__init__() def run_check_root_never_enabled(self, expected_http_code=200): self.assert_root_disabled(self.instance_info.id, expected_http_code) def assert_root_disabled(self, instance_id, expected_http_code): self._assert_root_state(instance_id, False, expected_http_code, "The root has already been enabled on the " "instance.") def _assert_root_state(self, instance_id, expected_state, expected_http_code, message): # The call returns a nameless user object with 'rootEnabled' attribute. response = self.auth_client.root.is_root_enabled(instance_id) self.assert_instance_action(instance_id, None, expected_http_code) actual_state = getattr(response, 'rootEnabled', None) self.assert_equal(expected_state, actual_state, message) def run_disable_root_before_enabled( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_root_disable_failure( self.instance_info.id, expected_exception, expected_http_code) def assert_root_disable_failure(self, instance_id, expected_exception, expected_http_code): self.assert_raises(expected_exception, expected_http_code, self.auth_client.root.delete, instance_id) def run_enable_root_no_password(self, expected_http_code=200): self.current_root_creds = self.assert_root_create( self.instance_info.id, None, expected_http_code) def assert_root_create(self, instance_id, root_password, expected_http_code): if root_password: root_creds = self.auth_client.root.create_instance_root( instance_id, root_password) else: root_creds = self.auth_client.root.create(instance_id) self.assert_instance_action(instance_id, None, expected_http_code) return root_creds def run_check_root_enabled(self, expected_http_code=200): self.assert_root_enabled(self.instance_info.id, expected_http_code) def assert_root_enabled(self, instance_id, expected_http_code): self._assert_root_state(instance_id, True, expected_http_code, "The root has not been enabled on the " "instance yet.") def run_enable_root_with_password(self, expected_http_code=200): password = self.test_helper.get_valid_root_password() self.current_root_creds = self.assert_root_create( self.instance_info.id, password, expected_http_code) def run_check_root_still_enabled(self, expected_http_code=200): self.assert_root_enabled(self.instance_info.id, expected_http_code) def run_disable_root(self, expected_http_code=200): self.assert_root_disable(self.instance_info.id, expected_http_code) def assert_root_disable(self, instance_id, expected_http_code): self.auth_client.root.delete(instance_id) self.assert_instance_action(instance_id, None, expected_http_code) def run_check_root_still_enabled_after_disable( self, expected_http_code=200): self.assert_root_enabled(self.instance_info.id, expected_http_code) def run_delete_root(self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_root_delete_failure( self.instance_info.id, expected_exception, expected_http_code) def assert_root_delete_failure(self, instance_id, expected_exception, expected_http_code): root_user_name = self.current_root_creds[0] self.assert_raises(expected_exception, expected_http_code, self.auth_client.users.delete, instance_id, root_user_name) def run_check_root_enabled_after_restore(self, restored_instance_id, expected_http_code=200): if restored_instance_id: self.assert_root_enabled(restored_instance_id, expected_http_code) else: raise SkipTest("No instance with enabled root restored.") class MysqlRootActionsRunner(RootActionsRunner): def run_enable_root_with_password(self): raise SkipTest("Operation is currently not supported.") class PerconaRootActionsRunner(RootActionsRunner): def run_disable_root_before_enabled(self): raise SkipTest("Operation is currently not supported.") def run_enable_root_with_password(self): raise SkipTest("Operation is currently not supported.") def run_disable_root(self): raise SkipTest("Operation is currently not supported.") class MariadbRootActionsRunner(RootActionsRunner): def run_disable_root_before_enabled(self): raise SkipTest("Operation is currently not supported.") def run_enable_root_with_password(self): raise SkipTest("Operation is currently not supported.") def run_disable_root(self): raise SkipTest("Operation is currently not supported.") class PostgresqlRootActionsRunner(RootActionsRunner): def run_disable_root_before_enabled(self): raise SkipTest("Operation is currently not supported.") def run_enable_root_with_password(self): raise SkipTest("Operation is currently not supported.") def run_disable_root(self): raise SkipTest("Operation is currently not supported.") class CouchbaseRootActionsRunner(RootActionsRunner): def run_disable_root_before_enabled(self): raise SkipTest("Operation is currently not supported.") def run_enable_root_with_password(self): raise SkipTest("Operation is currently not supported.") def run_disable_root(self): raise SkipTest("Operation is currently not supported.") class PxcRootActionsRunner(RootActionsRunner): def run_disable_root_before_enabled(self): raise SkipTest("Operation is currently not supported.") def run_disable_root(self): raise SkipTest("Operation is currently not supported.") def check_root_still_enabled_after_disable(self): raise SkipTest("Operation is currently not supported.") trove-5.0.0/trove/tests/scenario/runners/configuration_runners.py0000664000567000056710000005603512701410316026615 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import json from proboscis import SkipTest from trove.common.utils import generate_uuid from trove.tests.config import CONFIG from trove.tests.scenario.runners.test_runners import TestRunner from trove.tests.util.check import CollectionCheck from trove.tests.util.check import TypeCheck from trove.tests.util import create_dbaas_client from trove.tests.util.users import Requirements from troveclient.compat import exceptions class ConfigurationRunner(TestRunner): def __init__(self): super(ConfigurationRunner, self).__init__(sleep_time=10) self.dynamic_group_name = 'dynamic_test_group' self.dynamic_group_id = None self.dynamic_inst_count = 0 self.non_dynamic_group_name = 'non_dynamic_test_group' self.non_dynamic_group_id = None self.non_dynamic_inst_count = 0 self.initial_group_count = 0 self.additional_group_count = 0 self.other_client = None self.config_id_for_inst = None self.config_inst_id = None def run_create_bad_group(self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): bad_group = {'unknown_datastore_key': 'bad_value'} self.assert_action_on_conf_group_failure( bad_group, expected_exception, expected_http_code) def assert_action_on_conf_group_failure( self, group_values, expected_exception, expected_http_code): json_def = json.dumps(group_values) self.assert_raises( expected_exception, expected_http_code, self.auth_client.configurations.create, 'conf_group', json_def, 'Group with Bad or Invalid entries', datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) def run_create_invalid_groups( self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): invalid_groups = self.test_helper.get_invalid_groups() if invalid_groups: for invalid_group in invalid_groups: self.assert_action_on_conf_group_failure( invalid_group, expected_exception, expected_http_code) elif invalid_groups is None: raise SkipTest("No invalid configuration values defined in %s." % self.test_helper.get_class_name()) else: raise SkipTest("Datastore has no invalid configuration values.") def run_delete_non_existent_group( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_group_delete_failure( None, expected_exception, expected_http_code) def run_delete_bad_group_id( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_group_delete_failure( generate_uuid(), expected_exception, expected_http_code) def run_attach_non_existent_group( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_instance_modify_failure( self.instance_info.id, generate_uuid(), expected_exception, expected_http_code) def run_attach_non_existent_group_to_non_existent_inst( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_instance_modify_failure( generate_uuid(), generate_uuid(), expected_exception, expected_http_code) def run_detach_group_with_none_attached(self, expected_states=['ACTIVE'], expected_http_code=202): self.assert_instance_modify( self.instance_info.id, None, expected_states, expected_http_code) # run again, just to make sure self.assert_instance_modify( self.instance_info.id, None, expected_states, expected_http_code) def run_create_dynamic_group(self, expected_http_code=200): self.initial_group_count = len(self.auth_client.configurations.list()) values = self.test_helper.get_dynamic_group() if values: self.dynamic_group_id = self.assert_create_group( self.dynamic_group_name, 'a fully dynamic group should not require restart', values, expected_http_code) self.additional_group_count += 1 elif values is None: raise SkipTest("No dynamic group defined in %s." % self.test_helper.get_class_name()) else: raise SkipTest("Datastore has no dynamic configuration values.") def assert_create_group(self, name, description, values, expected_http_code): json_def = json.dumps(values) result = self.auth_client.configurations.create( name, json_def, description, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(expected_http_code) with TypeCheck('Configuration', result) as configuration: configuration.has_field('name', basestring) configuration.has_field('description', basestring) configuration.has_field('values', dict) configuration.has_field('datastore_name', basestring) configuration.has_field('datastore_version_id', unicode) configuration.has_field('datastore_version_name', basestring) self.assert_equal(name, result.name) self.assert_equal(description, result.description) self.assert_equal(values, result.values) return result.id def run_create_non_dynamic_group(self, expected_http_code=200): values = self.test_helper.get_non_dynamic_group() if values: self.non_dynamic_group_id = self.assert_create_group( self.non_dynamic_group_name, 'a group containing non-dynamic properties should always ' 'require restart', values, expected_http_code) self.additional_group_count += 1 elif values is None: raise SkipTest("No non-dynamic group defined in %s." % self.test_helper.get_class_name()) else: raise SkipTest("Datastore has no non-dynamic configuration " "values.") def run_attach_dynamic_group_to_non_existent_inst( self, expected_exception=exceptions.NotFound, expected_http_code=404): if self.dynamic_group_id: self.assert_instance_modify_failure( generate_uuid(), self.dynamic_group_id, expected_exception, expected_http_code) def run_attach_non_dynamic_group_to_non_existent_inst( self, expected_exception=exceptions.NotFound, expected_http_code=404): if self.non_dynamic_group_id: self.assert_instance_modify_failure( generate_uuid(), self.non_dynamic_group_id, expected_exception, expected_http_code) def run_list_configuration_groups(self): configuration_list = self.auth_client.configurations.list() self.assert_configuration_list( configuration_list, self.initial_group_count + self.additional_group_count) def assert_configuration_list(self, configuration_list, expected_count): self.assert_equal(expected_count, len(configuration_list), 'Unexpected number of configurations found') if expected_count: configuration_names = [conf.name for conf in configuration_list] if self.dynamic_group_id: self.assert_true( self.dynamic_group_name in configuration_names) if self.non_dynamic_group_id: self.assert_true( self.non_dynamic_group_name in configuration_names) def run_dynamic_configuration_show(self): if self.dynamic_group_id: self.assert_configuration_show(self.dynamic_group_id, self.dynamic_group_name) else: raise SkipTest("No dynamic group created.") def assert_configuration_show(self, config_id, config_name): result = self.auth_client.configurations.get(config_id) self.assert_equal(config_id, result.id, "Unexpected config id") self.assert_equal(config_name, result.name, "Unexpected config name") # check the result field types with TypeCheck("configuration", result) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("description", basestring) check.has_field("values", dict) check.has_field("created", basestring) check.has_field("updated", basestring) check.has_field("instance_count", int) # check for valid timestamps self.assert_true(self._is_valid_timestamp(result.created), 'Created timestamp %s is invalid' % result.created) self.assert_true(self._is_valid_timestamp(result.updated), 'Updated timestamp %s is invalid' % result.updated) with CollectionCheck("configuration_values", result.values) as check: # check each item has the correct type according to the rules for (item_key, item_val) in result.values.iteritems(): print("item_key: %s" % item_key) print("item_val: %s" % item_val) param = ( self.auth_client.configuration_parameters.get_parameter( self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version, item_key)) if param.type == 'integer': check.has_element(item_key, int) if param.type == 'string': check.has_element(item_key, basestring) if param.type == 'boolean': check.has_element(item_key, bool) def _is_valid_timestamp(self, time_string): try: datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S") except ValueError: return False return True def run_non_dynamic_configuration_show(self): if self.non_dynamic_group_id: self.assert_configuration_show(self.non_dynamic_group_id, self.non_dynamic_group_name) else: raise SkipTest("No non-dynamic group created.") def run_dynamic_conf_get_unauthorized_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_conf_get_unauthorized_user(self.dynamic_group_id, expected_exception, expected_http_code) def assert_conf_get_unauthorized_user( self, config_id, expected_exception=exceptions.NotFound, expected_http_code=404): self._create_other_client() self.assert_raises( expected_exception, None, self.other_client.configurations.get, config_id) # we're using a different client, so we'll check the return code # on it explicitly, instead of depending on 'assert_raises' self.assert_client_code(expected_http_code, client=self.other_client) def _create_other_client(self): if not self.other_client: requirements = Requirements(is_admin=False) other_user = CONFIG.users.find_user( requirements, black_list=[self.instance_info.user.auth_user]) self.other_client = create_dbaas_client(other_user) def run_non_dynamic_conf_get_unauthorized_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_conf_get_unauthorized_user(self.dynamic_group_id, expected_exception, expected_http_code) def run_list_dynamic_inst_conf_groups_before(self): if self.dynamic_group_id: self.dynamic_inst_count = len( self.auth_client.configurations.instances( self.dynamic_group_id)) def assert_conf_instance_list(self, group_id, expected_count): conf_instance_list = self.auth_client.configurations.instances( group_id) self.assert_equal(expected_count, len(conf_instance_list), 'Unexpected number of configurations found') if expected_count: conf_instance_ids = [inst.id for inst in conf_instance_list] self.assert_true( self.instance_info.id in conf_instance_ids) def run_attach_dynamic_group( self, expected_states=['ACTIVE'], expected_http_code=202): if self.dynamic_group_id: self.assert_instance_modify( self.instance_info.id, self.dynamic_group_id, expected_states, expected_http_code) def run_list_dynamic_inst_conf_groups_after(self): if self.dynamic_group_id: self.assert_conf_instance_list(self.dynamic_group_id, self.dynamic_inst_count + 1) def run_attach_dynamic_group_again( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # The exception here should probably be UnprocessableEntity or # something else other than BadRequest as the request really is # valid. if self.dynamic_group_id: self.assert_instance_modify_failure( self.instance_info.id, self.dynamic_group_id, expected_exception, expected_http_code) def run_delete_attached_dynamic_group( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # The exception here should probably be UnprocessableEntity or # something else other than BadRequest as the request really is # valid. if self.dynamic_group_id: self.assert_group_delete_failure( self.dynamic_group_id, expected_exception, expected_http_code) def run_update_dynamic_group( self, expected_states=['ACTIVE'], expected_http_code=202): if self.dynamic_group_id: values = json.dumps(self.test_helper.get_dynamic_group()) self.assert_update_group( self.instance_info.id, self.dynamic_group_id, values, expected_states, expected_http_code) def assert_update_group( self, instance_id, group_id, values, expected_states, expected_http_code, restart_inst=False): self.auth_client.configurations.update(group_id, values) self.assert_instance_action( instance_id, expected_states, expected_http_code) if restart_inst: self._restart_instance(instance_id) def run_detach_dynamic_group( self, expected_states=['ACTIVE'], expected_http_code=202): if self.dynamic_group_id: self.assert_instance_modify( self.instance_info.id, None, expected_states, expected_http_code) def run_list_non_dynamic_inst_conf_groups_before(self): if self.non_dynamic_group_id: self.non_dynamic_inst_count = len( self.auth_client.configurations.instances( self.non_dynamic_group_id)) def run_attach_non_dynamic_group( self, expected_states=['RESTART_REQUIRED'], expected_http_code=202): if self.non_dynamic_group_id: self.assert_instance_modify( self.instance_info.id, self.non_dynamic_group_id, expected_states, expected_http_code, restart_inst=True) def run_list_non_dynamic_inst_conf_groups_after(self): if self.non_dynamic_group_id: self.assert_conf_instance_list(self.non_dynamic_group_id, self.non_dynamic_inst_count + 1) def run_attach_non_dynamic_group_again( self, expected_exception=exceptions.BadRequest, expected_http_code=400): if self.non_dynamic_group_id: self.assert_instance_modify_failure( self.instance_info.id, self.non_dynamic_group_id, expected_exception, expected_http_code) def run_delete_attached_non_dynamic_group( self, expected_exception=exceptions.BadRequest, expected_http_code=400): if self.non_dynamic_group_id: self.assert_group_delete_failure( self.non_dynamic_group_id, expected_exception, expected_http_code) def run_update_non_dynamic_group( self, expected_states=['RESTART_REQUIRED'], expected_http_code=202): if self.non_dynamic_group_id: values = json.dumps(self.test_helper.get_non_dynamic_group()) self.assert_update_group( self.instance_info.id, self.non_dynamic_group_id, values, expected_states, expected_http_code, restart_inst=True) def run_detach_non_dynamic_group( self, expected_states=['RESTART_REQUIRED'], expected_http_code=202): if self.non_dynamic_group_id: self.assert_instance_modify( self.instance_info.id, None, expected_states, expected_http_code, restart_inst=True) def assert_instance_modify( self, instance_id, group_id, expected_states, expected_http_code, restart_inst=False): self.auth_client.instances.modify(instance_id, configuration=group_id) self.assert_instance_action( instance_id, expected_states, expected_http_code) # Verify the group has been attached. instance = self.get_instance(instance_id) if group_id: group = self.auth_client.configurations.get(group_id) self.assert_equal( group.id, instance.configuration['id'], "Attached group does not have the expected ID") self.assert_equal( group.name, instance.configuration['name'], "Attached group does not have the expected name") else: self.assert_false( hasattr(instance, 'configuration'), "The configuration group was not detached from the instance.") if restart_inst: self._restart_instance(instance_id) def assert_instance_modify_failure( self, instance_id, group_id, expected_exception, expected_http_code): self.assert_raises( expected_exception, expected_http_code, self.auth_client.instances.modify, instance_id, configuration=group_id) def run_delete_dynamic_group(self, expected_http_code=202): if self.dynamic_group_id: self.assert_group_delete(self.dynamic_group_id, expected_http_code) def assert_group_delete(self, group_id, expected_http_code): self.auth_client.configurations.delete(group_id) self.assert_client_code(expected_http_code) def run_delete_non_dynamic_group(self, expected_http_code=202): if self.non_dynamic_group_id: self.assert_group_delete(self.non_dynamic_group_id, expected_http_code) def assert_group_delete_failure(self, group_id, expected_exception, expected_http_code): self.assert_raises( expected_exception, expected_http_code, self.auth_client.configurations.delete, group_id) def _restart_instance( self, instance_id, expected_states=['REBOOT', 'ACTIVE'], expected_http_code=202): self.auth_client.instances.restart(instance_id) self.assert_instance_action(instance_id, expected_states, expected_http_code) def run_create_instance_with_conf(self): self.config_id_for_inst = ( self.dynamic_group_id or self.non_dynamic_group_id) if self.config_id_for_inst: self.config_inst_id = self.assert_create_instance_with_conf( self.config_id_for_inst) else: raise SkipTest("No groups (dynamic or non-dynamic) defined in %s." % self.test_helper.get_class_name()) def assert_create_instance_with_conf(self, config_id): # test that a new instance will apply the configuration on create result = self.auth_client.instances.create( "TEST_" + str(datetime.now()) + "_config", self.instance_info.dbaas_flavor_href, self.instance_info.volume, [], [], availability_zone="nova", configuration=config_id) self.assert_client_code(200) self.assert_equal("BUILD", result.status, 'Unexpected inst status') return result.id def run_wait_for_conf_instance( # we can't specify all the states here, as it may go active # before this test runs self, final_state=['ACTIVE'], expected_http_code=200): if self.config_inst_id: self.assert_instance_action(self.config_inst_id, final_state, expected_http_code) inst = self.auth_client.instances.get(self.config_inst_id) self.assert_equal(self.config_id_for_inst, inst.configuration['id']) else: raise SkipTest("No instance created with a configuration group.") def run_delete_conf_instance( self, expected_states=['SHUTDOWN'], expected_http_code=202): if self.config_inst_id: self.assert_delete_conf_instance( self.config_inst_id, expected_states, expected_http_code) else: raise SkipTest("No instance created with a configuration group.") def assert_delete_conf_instance( self, instance_id, expected_state, expected_http_code): self.auth_client.instances.delete(instance_id) self.assert_client_code(expected_http_code) self.assert_all_gone(instance_id, expected_state) trove-5.0.0/trove/tests/scenario/runners/replication_runners.py0000664000567000056710000003007012701410316026246 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import utils from trove.tests.api.instances import CheckInstance from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class ReplicationRunner(TestRunner): def __init__(self): super(ReplicationRunner, self).__init__() self.master_id = self.instance_info.id self.replica_1_id = 0 self.replica_2_id = 0 self.master_host = self.get_instance_host(self.master_id) self.replica_1_host = None self.master_backup_count = None self.used_data_sets = set() def run_add_data_for_replication(self, data_type=DataType.small): self.assert_add_replication_data(data_type, self.master_host) def assert_add_replication_data(self, data_type, host): """In order for this to work, the corresponding datastore 'helper' class should implement the 'add__data' method. """ self.test_helper.add_data(data_type, host) self.used_data_sets.add(data_type) def run_verify_data_for_replication(self, data_type=DataType.small): self.assert_verify_replication_data(data_type, self.master_host) def assert_verify_replication_data(self, data_type, host): """In order for this to work, the corresponding datastore 'helper' class should implement the 'verify__data' method. """ self.test_helper.verify_data(data_type, host) def run_create_single_replica(self, expected_states=['BUILD', 'ACTIVE'], expected_http_code=200): master_id = self.instance_info.id self.master_backup_count = len( self.auth_client.instances.backups(master_id)) self.replica_1_id = self.assert_replica_create( master_id, 'replica1', 1, expected_states, expected_http_code) self.replica_1_host = self.get_instance_host(self.replica_1_id) def assert_replica_create( self, master_id, replica_name, replica_count, expected_states, expected_http_code): replica = self.auth_client.instances.create( self.instance_info.name + replica_name, self.instance_info.dbaas_flavor_href, self.instance_info.volume, replica_of=master_id, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, nics=self.instance_info.nics, replica_count=replica_count) replica_id = replica.id self.assert_instance_action(replica_id, expected_states, expected_http_code) self._assert_is_master(master_id, [replica_id]) self._assert_is_replica(replica_id, master_id) return replica_id def _assert_is_master(self, instance_id, replica_ids): instance = self.get_instance(instance_id) self.assert_client_code(200) CheckInstance(instance._info).slaves() self.assert_true( set(replica_ids).issubset(self._get_replica_set(instance_id))) def _get_replica_set(self, master_id): instance = self.get_instance(master_id) return set([replica['id'] for replica in instance._info['replicas']]) def _assert_is_replica(self, instance_id, master_id): instance = self.get_instance(instance_id) self.assert_client_code(200) CheckInstance(instance._info).replica_of() self.assert_equal(master_id, instance._info['replica_of']['id'], 'Unexpected replication master ID') def run_create_multiple_replicas(self, expected_states=['BUILD', 'ACTIVE'], expected_http_code=200): master_id = self.instance_info.id self.replica_2_id = self.assert_replica_create( master_id, 'replica2', 2, expected_states, expected_http_code) def run_add_data_to_replicate(self): self.assert_add_replication_data(DataType.tiny, self.master_host) def run_verify_data_to_replicate(self): self.assert_verify_replication_data(DataType.tiny, self.master_host) def run_wait_for_data_to_replicate(self): self.test_helper.wait_for_replicas() def run_verify_replica_data_orig(self): self.assert_verify_replica_data(self.instance_info.id, DataType.small) def assert_verify_replica_data(self, master_id, data_type): replica_ids = self._get_replica_set(master_id) for replica_id in replica_ids: replica_instance = self.get_instance(replica_id) host = str(replica_instance._info['ip'][0]) self.report.log("Checking data on host %s" % host) self.assert_verify_replication_data(data_type, host) def run_verify_replica_data_new(self): self.assert_verify_replica_data(self.instance_info.id, DataType.tiny) def run_promote_master(self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_raises( expected_exception, expected_http_code, self.auth_client.instances.promote_to_replica_source, self.instance_info.id) def run_eject_replica(self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_raises( expected_exception, expected_http_code, self.auth_client.instances.eject_replica_source, self.replica_1_id) def run_eject_valid_master(self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_raises( expected_exception, expected_http_code, self.auth_client.instances.eject_replica_source, self.instance_info.id) def run_delete_valid_master(self, expected_exception=exceptions.Forbidden, expected_http_code=403): self.assert_raises( expected_exception, expected_http_code, self.auth_client.instances.delete, self.instance_info.id) def run_promote_to_replica_source(self, expected_states=['PROMOTE', 'ACTIVE'], expected_http_code=202): self.assert_promote_to_replica_source( self.replica_1_id, self.instance_info.id, expected_states, expected_http_code) def assert_promote_to_replica_source( self, new_master_id, old_master_id, expected_states, expected_http_code): original_replica_ids = self._get_replica_set(old_master_id) other_replica_ids = list(original_replica_ids) other_replica_ids.remove(new_master_id) # Promote replica self.assert_replica_promote(new_master_id, expected_states, expected_http_code) current_replica_ids = list(other_replica_ids) current_replica_ids.append(old_master_id) self._assert_is_master(new_master_id, current_replica_ids) self._assert_is_replica(old_master_id, new_master_id) def assert_replica_promote( self, new_master_id, expected_states, expected_http_code): self.auth_client.instances.promote_to_replica_source(new_master_id) self.assert_instance_action(new_master_id, expected_states, expected_http_code) def run_add_data_to_replicate2(self): self.assert_add_replication_data(DataType.tiny2, self.replica_1_host) def run_verify_data_to_replicate2(self): self.assert_verify_replication_data(DataType.tiny2, self.replica_1_host) def run_verify_replica_data_new2(self): self.assert_verify_replica_data(self.replica_1_id, DataType.tiny2) def run_promote_original_source(self, expected_states=['PROMOTE', 'ACTIVE'], expected_http_code=202): self.assert_promote_to_replica_source( self.instance_info.id, self.replica_1_id, expected_states, expected_http_code) def run_remove_replicated_data(self): self.assert_remove_replicated_data(self.master_host) def assert_remove_replicated_data(self, host): """In order for this to work, the corresponding datastore 'helper' class should implement the 'remove__data' method. """ for data_set in self.used_data_sets: self.report.log("Removing replicated data set: %s" % data_set) self.test_helper.remove_data(data_set, host) def run_detach_replica_from_source(self, expected_states=['ACTIVE'], expected_http_code=202): self.assert_detach_replica_from_source( self.instance_info.id, self.replica_1_id, expected_states, expected_http_code) def assert_detach_replica_from_source( self, master_id, replica_id, expected_states, expected_http_code): other_replica_ids = self._get_replica_set(master_id) other_replica_ids.remove(replica_id) self.assert_detach_replica( replica_id, expected_states, expected_http_code) self._assert_is_master(master_id, other_replica_ids) self._assert_is_not_replica(replica_id, master_id) def assert_detach_replica( self, replica_id, expected_states, expected_http_code): self.auth_client.instances.edit(replica_id, detach_replica_source=True) self.assert_instance_action( replica_id, expected_states, expected_http_code) def _assert_is_not_replica(self, instance_id, master_id): try: self._assert_is_replica(instance_id, master_id) self.fail("Non-replica '%s' is still replica of '%s'" % (instance_id, master_id)) except AssertionError: pass def run_delete_detached_replica(self, expected_last_state=['SHUTDOWN'], expected_http_code=202): self.assert_delete_instances( self.replica_1_id, expected_last_state=expected_last_state, expected_http_code=expected_http_code) def assert_delete_instances( self, instance_ids, expected_last_state, expected_http_code): instance_ids = (instance_ids if utils.is_collection(instance_ids) else [instance_ids]) for instance_id in instance_ids: self.auth_client.instances.delete(instance_id) self.assert_client_code(expected_http_code) self.assert_all_gone(instance_ids, expected_last_state) def run_delete_all_replicas(self, expected_last_state=['SHUTDOWN'], expected_http_code=202): self.assert_delete_all_replicas( self.instance_info.id, expected_last_state, expected_http_code) def assert_delete_all_replicas( self, master_id, expected_last_state, expected_http_code): self.report.log("Deleting a replication set: %s" % master_id) replica_ids = self._get_replica_set(master_id) self.assert_delete_instances(replica_ids, expected_last_state, expected_http_code) def run_test_backup_deleted(self): backup = self.auth_client.instances.backups(self.master_id) self.assert_equal(self.master_backup_count, len(backup)) trove-5.0.0/trove/tests/scenario/groups/0000775000567000056710000000000012701410521021430 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/scenario/groups/user_actions_group.py0000664000567000056710000001457312701410316025730 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.user_actions_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP]) class UserActionsGroup(TestGroup): def __init__(self): super(UserActionsGroup, self).__init__( 'user_actions_runners', 'UserActionsRunner') self.instance_create_runner = self.get_runner( 'instance_create_runners', 'InstanceCreateRunner') self.database_actions_runner = self.get_runner( 'database_actions_runners', 'DatabaseActionsRunner') @test def create_initialized_instance(self): """Create an instance with initial users.""" self.instance_create_runner.run_initialized_instance_create( with_dbs=False, with_users=True, configuration_id=None, create_helper_user=False) @test(runs_after=[create_initialized_instance]) def create_user_databases(self): """Create user databases on an existing instance.""" # These databases may be referenced by the users (below) so we need to # create them first. self.database_actions_runner.run_databases_create() @test(runs_after=[create_user_databases]) def create_users(self): """Create users on an existing instance.""" self.test_runner.run_users_create() @test(depends_on=[create_users]) def show_user(self): """Show created users.""" self.test_runner.run_user_show() @test(depends_on=[create_users], runs_after=[show_user]) def list_users(self): """List the created users.""" self.test_runner.run_users_list() @test(depends_on=[create_users], runs_after=[list_users]) def create_user_with_no_attributes(self): """Ensure creating a user with blank specification fails.""" self.test_runner.run_user_create_with_no_attributes() @test(depends_on=[create_users], runs_after=[create_user_with_no_attributes]) def create_user_with_blank_name(self): """Ensure creating a user with blank name fails.""" self.test_runner.run_user_create_with_blank_name() @test(depends_on=[create_users], runs_after=[create_user_with_blank_name]) def create_user_with_blank_password(self): """Ensure creating a user with blank password fails.""" self.test_runner.run_user_create_with_blank_password() @test(depends_on=[create_users], runs_after=[create_user_with_blank_password]) def create_existing_user(self): """Ensure creating an existing user fails.""" self.test_runner.run_existing_user_create() @test(depends_on=[create_users], runs_after=[create_existing_user]) def update_user_with_blank_name(self): """Ensure updating a user with blank name fails.""" self.test_runner.run_user_update_with_blank_name() @test(depends_on=[create_users], runs_after=[update_user_with_blank_name]) def update_user_with_existing_name(self): """Ensure updating a user with an existing name fails.""" self.test_runner.run_user_update_with_existing_name() @test(depends_on=[create_users], runs_after=[update_user_with_existing_name]) def update_user_attributes(self): """Update an existing user.""" self.test_runner.run_user_attribute_update() @test(depends_on=[create_users], runs_after=[update_user_attributes]) def delete_user(self): """Delete the created users.""" self.test_runner.run_user_delete() @test(runs_after=[delete_user]) def show_nonexisting_user(self): """Delete non-existing users.""" self.test_runner.run_nonexisting_user_show() @test(runs_after=[show_nonexisting_user]) def update_nonexisting_user(self): """Ensure updating a non-existing user fails.""" self.test_runner.run_nonexisting_user_update() @test(runs_after=[update_nonexisting_user]) def delete_nonexisting_user(self): """Ensure deleting a non-existing user fails.""" self.test_runner.run_nonexisting_user_delete() @test(runs_after=[delete_nonexisting_user]) def create_system_user(self): """Ensure creating a system user fails.""" self.test_runner.run_system_user_create() @test(runs_after=[create_system_user]) def show_system_user(self): """Ensure showing a system user fails.""" self.test_runner.run_system_user_show() @test(runs_after=[show_system_user]) def update_system_user(self): """Ensure updating a system user fails.""" self.test_runner.run_system_user_attribute_update() @test(runs_after=[update_system_user]) def delete_system_user(self): """Ensure deleting a system user fails.""" self.test_runner.run_system_user_delete() @test(depends_on=[create_user_databases], runs_after=[delete_system_user]) def delete_user_databases(self): """Delete the user databases.""" self.database_actions_runner.run_database_delete() @test(depends_on=[create_initialized_instance], runs_after=[delete_user_databases]) def wait_for_instances(self): """Waiting for all instances to become active.""" self.instance_create_runner.wait_for_created_instances() @test(depends_on=[wait_for_instances]) def validate_initialized_instance(self): """Validate the initialized instance data and properties.""" self.instance_create_runner.run_validate_initialized_instance() @test(runs_after=[validate_initialized_instance]) def delete_initialized_instance(self): """Delete the initialized instance.""" self.instance_create_runner.run_initialized_instance_delete() trove-5.0.0/trove/tests/scenario/groups/configuration_group.py0000664000567000056710000002265312701410316026077 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.configuration_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP]) class ConfigurationGroup(TestGroup): def __init__(self): super(ConfigurationGroup, self).__init__( 'configuration_runners', 'ConfigurationRunner') @test def create_bad_group(self): """Ensure a group with bad entries fails create.""" self.test_runner.run_create_bad_group() @test def create_invalid_groups(self): """Ensure a group with invalid entries fails create.""" self.test_runner.run_create_invalid_groups() @test def delete_non_existent_group(self): """Ensure delete non-existent group fails.""" self.test_runner.run_delete_non_existent_group() @test def delete_bad_group_id(self): """Ensure delete bad group fails.""" self.test_runner.run_delete_bad_group_id() @test def attach_non_existent_group(self): """Ensure attach non-existent group fails.""" self.test_runner.run_attach_non_existent_group() def attach_non_existent_group_to_non_existent_inst(self): """Ensure attach non-existent group to non-existent inst fails.""" self.test_runner.run_attach_non_existent_group_to_non_existent_inst() @test def detach_group_with_none_attached(self): """Test detach with none attached.""" self.test_runner.run_detach_group_with_none_attached() @test def create_dynamic_group(self): """Create a group with only dynamic entries.""" self.test_runner.run_create_dynamic_group() @test def create_non_dynamic_group(self): """Create a group with only non-dynamic entries.""" self.test_runner.run_create_non_dynamic_group() @test(depends_on=[create_dynamic_group]) def attach_dynamic_group_to_non_existent_inst(self): """Ensure attach dynamic group to non-existent inst fails.""" self.test_runner.run_attach_dynamic_group_to_non_existent_inst() @test(depends_on=[create_non_dynamic_group]) def attach_non_dynamic_group_to_non_existent_inst(self): """Ensure attach non-dynamic group to non-existent inst fails.""" self.test_runner.run_attach_non_dynamic_group_to_non_existent_inst() @test(depends_on=[create_dynamic_group, create_non_dynamic_group]) def list_configuration_groups(self): """Test list configuration groups.""" self.test_runner.run_list_configuration_groups() @test(depends_on=[create_dynamic_group]) def dynamic_configuration_show(self): """Test show on dynamic group.""" self.test_runner.run_dynamic_configuration_show() @test(depends_on=[create_non_dynamic_group]) def non_dynamic_configuration_show(self): """Test show on non-dynamic group.""" self.test_runner.run_non_dynamic_configuration_show() @test(depends_on=[create_dynamic_group]) def dynamic_conf_get_unauthorized_user(self): """Ensure show dynamic fails with unauthorized user.""" self.test_runner.run_dynamic_conf_get_unauthorized_user() @test(depends_on=[create_non_dynamic_group]) def non_dynamic_conf_get_unauthorized_user(self): """Ensure show non-dynamic fails with unauthorized user.""" self.test_runner.run_non_dynamic_conf_get_unauthorized_user() @test(depends_on=[create_dynamic_group], runs_after=[list_configuration_groups]) def list_dynamic_inst_conf_groups_before(self): """Count list instances for dynamic group before attach.""" self.test_runner.run_list_dynamic_inst_conf_groups_before() @test(depends_on=[create_dynamic_group], runs_after=[list_dynamic_inst_conf_groups_before, attach_non_existent_group, detach_group_with_none_attached]) def attach_dynamic_group(self): """Test attach dynamic group.""" self.test_runner.run_attach_dynamic_group() @test(depends_on=[attach_dynamic_group]) def list_dynamic_inst_conf_groups_after(self): """Test list instances for dynamic group after attach.""" self.test_runner.run_list_dynamic_inst_conf_groups_after() @test(depends_on=[attach_dynamic_group], runs_after=[list_dynamic_inst_conf_groups_after]) def attach_dynamic_group_again(self): """Ensure attaching dynamic group again fails.""" self.test_runner.run_attach_dynamic_group_again() @test(depends_on=[attach_dynamic_group], runs_after=[attach_dynamic_group_again]) def delete_attached_dynamic_group(self): """Ensure deleting attached dynamic group fails.""" self.test_runner.run_delete_attached_dynamic_group() @test(depends_on=[attach_dynamic_group], runs_after=[delete_attached_dynamic_group]) def update_dynamic_group(self): """Test update dynamic group.""" self.test_runner.run_update_dynamic_group() @test(depends_on=[create_dynamic_group], runs_after=[update_dynamic_group]) def detach_dynamic_group(self): """Test detach dynamic group.""" self.test_runner.run_detach_dynamic_group() @test(depends_on=[create_non_dynamic_group], runs_after=[detach_dynamic_group]) def list_non_dynamic_inst_conf_groups_before(self): """Count list instances for non-dynamic group before attach.""" self.test_runner.run_list_non_dynamic_inst_conf_groups_before() @test(depends_on=[create_non_dynamic_group], runs_after=[list_non_dynamic_inst_conf_groups_before, attach_non_existent_group]) def attach_non_dynamic_group(self): """Test attach non-dynamic group.""" self.test_runner.run_attach_non_dynamic_group() @test(depends_on=[attach_non_dynamic_group]) def list_non_dynamic_inst_conf_groups_after(self): """Test list instances for non-dynamic group after attach.""" self.test_runner.run_list_non_dynamic_inst_conf_groups_after() @test(depends_on=[attach_non_dynamic_group], runs_after=[list_non_dynamic_inst_conf_groups_after]) def attach_non_dynamic_group_again(self): """Ensure attaching non-dynamic group again fails.""" self.test_runner.run_attach_non_dynamic_group_again() @test(depends_on=[attach_non_dynamic_group], runs_after=[attach_non_dynamic_group_again]) def delete_attached_non_dynamic_group(self): """Ensure deleting attached non-dynamic group fails.""" self.test_runner.run_delete_attached_non_dynamic_group() @test(depends_on=[attach_non_dynamic_group], runs_after=[delete_attached_non_dynamic_group]) def update_non_dynamic_group(self): """Test update non-dynamic group.""" self.test_runner.run_update_non_dynamic_group() @test(depends_on=[attach_non_dynamic_group], runs_after=[update_non_dynamic_group]) def detach_non_dynamic_group(self): """Test detach non-dynamic group.""" self.test_runner.run_detach_non_dynamic_group() @test(runs_after=[create_dynamic_group, create_non_dynamic_group, update_dynamic_group, update_non_dynamic_group]) def create_instance_with_conf(self): """Test create instance with conf group.""" self.test_runner.run_create_instance_with_conf() @test(depends_on=[create_instance_with_conf], runs_after=[create_dynamic_group, create_non_dynamic_group, update_dynamic_group, update_non_dynamic_group]) def wait_for_conf_instance(self): """Test create instance with conf group completes.""" self.test_runner.run_wait_for_conf_instance() @test(depends_on=[wait_for_conf_instance]) def delete_conf_instance(self): """Test delete instance with conf group.""" self.test_runner.run_delete_conf_instance() @test(depends_on=[create_dynamic_group], runs_after=[list_configuration_groups, detach_dynamic_group, dynamic_configuration_show, dynamic_conf_get_unauthorized_user, attach_dynamic_group_to_non_existent_inst, delete_conf_instance]) def delete_dynamic_group(self): """Test delete dynamic group.""" self.test_runner.run_delete_dynamic_group() @test(depends_on=[create_non_dynamic_group], runs_after=[list_configuration_groups, detach_non_dynamic_group, non_dynamic_configuration_show, non_dynamic_conf_get_unauthorized_user, attach_non_dynamic_group_to_non_existent_inst, delete_conf_instance]) def delete_non_dynamic_group(self): """Test delete non-dynamic group.""" self.test_runner.run_delete_non_dynamic_group() trove-5.0.0/trove/tests/scenario/groups/backup_group.py0000664000567000056710000001675512701410316024503 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.backup_restore_group" GROUP_BACKUP = "scenario.backup_group" GROUP_BACKUP_LIST = "scenario.backup_list_group" GROUP_RESTORE = "scenario.restore_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP]) class BackupGroup(TestGroup): """Test Backup and Restore functionality.""" def __init__(self): super(BackupGroup, self).__init__( 'backup_runners', 'BackupRunner') @test(groups=[GROUP_BACKUP]) def backup_create_instance_invalid(self): """Ensure create backup fails with invalid instance id.""" self.test_runner.run_backup_create_instance_invalid() @test(groups=[GROUP_BACKUP]) def backup_create_instance_not_found(self): """Ensure create backup fails with unknown instance id.""" self.test_runner.run_backup_create_instance_not_found() @test(groups=[GROUP_BACKUP]) def add_data_for_backup(self): """Add data to instance for restore verification.""" self.test_runner.run_add_data_for_backup() @test(groups=[GROUP_BACKUP], runs_after=[add_data_for_backup]) def verify_data_for_backup(self): """Verify data in instance.""" self.test_runner.run_verify_data_for_backup() @test(groups=[GROUP_BACKUP], runs_after=[verify_data_for_backup]) def backup_create(self): """Check that create backup is started successfully.""" self.test_runner.run_backup_create() @test(groups=[GROUP_BACKUP], depends_on=[backup_create]) def backup_delete_while_backup_running(self): """Ensure delete backup fails while it is running.""" self.test_runner.run_backup_delete_while_backup_running() @test(groups=[GROUP_BACKUP], depends_on=[backup_create], runs_after=[backup_delete_while_backup_running]) def restore_instance_from_not_completed_backup(self): """Ensure a restore fails while the backup is running.""" self.test_runner.run_restore_instance_from_not_completed_backup() @test(groups=[GROUP_BACKUP], depends_on=[backup_create], runs_after=[restore_instance_from_not_completed_backup]) def backup_create_another_backup_running(self): """Ensure create backup fails when another backup is running.""" self.test_runner.run_backup_create_another_backup_running() @test(groups=[GROUP_BACKUP], depends_on=[backup_create], runs_after=[backup_create_another_backup_running]) def instance_action_right_after_backup_create(self): """Ensure any instance action fails while backup is running.""" self.test_runner.run_instance_action_right_after_backup_create() @test(groups=[GROUP_BACKUP], depends_on=[backup_create], runs_after=[instance_action_right_after_backup_create]) def backup_create_completed(self): """Check that the backup completes successfully.""" self.test_runner.run_backup_create_completed() @test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST], depends_on=[backup_create_completed]) def backup_list(self): """Test list backups.""" self.test_runner.run_backup_list() @test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST], depends_on=[backup_create_completed]) def backup_list_filter_datastore(self): """Test list backups and filter by datastore.""" self.test_runner.run_backup_list_filter_datastore() @test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST], depends_on=[backup_create_completed]) def backup_list_filter_different_datastore(self): """Test list backups and filter by different datastore.""" self.test_runner.run_backup_list_filter_different_datastore() @test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST], depends_on=[backup_create_completed]) def backup_list_filter_datastore_not_found(self): """Test list backups and filter by unknown datastore.""" self.test_runner.run_backup_list_filter_datastore_not_found() @test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST], depends_on=[backup_create_completed]) def backup_list_for_instance(self): """Test backup list for instance.""" self.test_runner.run_backup_list_for_instance() @test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST], depends_on=[backup_create_completed]) def backup_get(self): """Test backup show.""" self.test_runner.run_backup_get() @test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST], depends_on=[backup_create_completed]) def backup_get_unauthorized_user(self): """Ensure backup show fails for an unauthorized user.""" self.test_runner.run_backup_get_unauthorized_user() @test(groups=[GROUP_RESTORE], depends_on=[backup_create_completed], runs_after_groups=[GROUP_BACKUP_LIST]) def restore_from_backup(self): """Check that restoring an instance from a backup starts.""" self.test_runner.run_restore_from_backup() @test(groups=[GROUP_RESTORE], depends_on=[restore_from_backup]) def restore_from_backup_completed(self): """Wait until restoring an instance from a backup completes.""" self.test_runner.run_restore_from_backup_completed() @test(groups=[GROUP_RESTORE], depends_on=[restore_from_backup_completed]) def verify_data_in_restored_instance(self): """Verify data in restored instance.""" self.test_runner.run_verify_data_in_restored_instance() @test(groups=[GROUP_RESTORE], depends_on=[restore_from_backup_completed], runs_after=[verify_data_in_restored_instance]) def delete_restored_instance(self): """Test deleting the restored instance.""" self.test_runner.run_delete_restored_instance() @test(groups=[GROUP_BACKUP], depends_on=[backup_create_completed], runs_after=[delete_restored_instance]) def delete_unknown_backup(self): """Ensure deleting an unknown backup fails.""" self.test_runner.run_delete_unknown_backup() @test(groups=[GROUP_BACKUP], depends_on=[backup_create_completed], runs_after=[delete_unknown_backup]) def delete_backup_unauthorized_user(self): """Ensure deleting backup by an unauthorized user fails.""" self.test_runner.run_delete_backup_unauthorized_user() @test(groups=[GROUP_BACKUP], depends_on=[backup_create_completed], runs_after=[delete_backup_unauthorized_user]) def delete_backup(self): """Test deleting the backup.""" self.test_runner.run_delete_backup() @test(depends_on=[delete_backup]) def check_for_incremental_backup(self): """Test that backup children are deleted.""" self.test_runner.run_check_for_incremental_backup() trove-5.0.0/trove/tests/scenario/groups/__init__.py0000664000567000056710000000000012701410316023531 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/scenario/groups/negative_cluster_actions_group.py0000664000567000056710000000254212701410316030306 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.negative_cluster_actions_group" @test(groups=[GROUP]) class NegativeClusterActionsGroup(TestGroup): def __init__(self): super(NegativeClusterActionsGroup, self).__init__( 'negative_cluster_actions_runners', 'NegativeClusterActionsRunner') @test def create_constrained_size_cluster(self): """Ensure creating a cluster with wrong number of nodes fails.""" self.test_runner.run_create_constrained_size_cluster() @test def create_heterogeneous_cluster(self): """Ensure creating a cluster with unequal nodes fails.""" self.test_runner.run_create_heterogeneous_cluster() trove-5.0.0/trove/tests/scenario/groups/instance_create_group.py0000664000567000056710000000536612701410316026361 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.api.instances import InstanceSetup from trove.tests import PRE_INSTANCES from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.instance_create_group" @test(depends_on_classes=[InstanceSetup], runs_after_groups=[PRE_INSTANCES], groups=[GROUP]) class InstanceCreateGroup(TestGroup): def __init__(self): super(InstanceCreateGroup, self).__init__( 'instance_create_runners', 'InstanceCreateRunner') @test def create_empty_instance(self): """Create an empty instance.""" self.test_runner.run_empty_instance_create() @test(runs_after=[create_empty_instance]) def create_initial_configuration(self): """Create a configuration group for a new initialized instance.""" self.test_runner.run_initial_configuration_create() @test(runs_after=[create_initial_configuration]) def create_initialized_instance(self): """Create an instance with initial properties.""" self.test_runner.run_initialized_instance_create() @test(runs_after=[create_initialized_instance]) def wait_for_instances(self): """Waiting for all instances to become active.""" self.test_runner.wait_for_created_instances() @test(depends_on=[wait_for_instances]) def add_initialized_instance_data(self): """Add data to the initialized instance.""" self.test_runner.run_add_initialized_instance_data() @test(runs_after=[add_initialized_instance_data]) def validate_initialized_instance(self): """Validate the initialized instance data and properties.""" self.test_runner.run_validate_initialized_instance() @test(runs_after=[validate_initialized_instance]) def delete_initialized_instance(self): """Delete the initialized instance.""" self.test_runner.run_initialized_instance_delete() @test(depends_on=[create_initial_configuration, delete_initialized_instance]) def delete_initial_configuration(self): """Delete the initial configuration group.""" self.test_runner.run_initial_configuration_delete() trove-5.0.0/trove/tests/scenario/groups/root_actions_group.py0000664000567000056710000001031112701410316025717 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.root_actions_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP]) class RootActionsGroup(TestGroup): def __init__(self): super(RootActionsGroup, self).__init__( 'root_actions_runners', 'RootActionsRunner') self.backup_runner = self.get_runner( 'backup_runners', 'BackupRunner') @test def check_root_never_enabled(self): """Check the root has never been enabled on the instance.""" self.test_runner.run_check_root_never_enabled() @test(depends_on=[check_root_never_enabled]) def disable_root_before_enabled(self): """Ensure disable fails if root was never enabled.""" self.test_runner.run_disable_root_before_enabled() @test(depends_on=[check_root_never_enabled], runs_after=[disable_root_before_enabled]) def enable_root_no_password(self): """Enable root (without specifying a password).""" self.test_runner.run_enable_root_no_password() @test(depends_on=[enable_root_no_password]) def check_root_enabled(self): """Check the root is now enabled.""" self.test_runner.run_check_root_enabled() @test(depends_on=[check_root_enabled]) def backup_root_enabled_instance(self): """Backup the root-enabled instance.""" self.backup_runner.run_backup_create() self.backup_runner.run_backup_create_completed() @test(depends_on=[backup_root_enabled_instance]) def restore_root_enabled_instance(self): """Restore the root-enabled instance.""" self.backup_runner.run_restore_from_backup() @test(depends_on=[check_root_enabled]) def delete_root(self): """Ensure an attempt to delete the root user fails.""" self.test_runner.run_delete_root() @test(depends_on=[check_root_never_enabled], runs_after=[delete_root]) def enable_root_with_password(self): """Enable root (with a given password).""" self.test_runner.run_enable_root_with_password() @test(depends_on=[enable_root_with_password]) def check_root_still_enabled(self): """Check the root is still enabled.""" self.test_runner.run_check_root_still_enabled() @test(depends_on=[check_root_enabled], runs_after=[check_root_still_enabled]) def disable_root(self): """Disable root.""" self.test_runner.run_disable_root() @test(depends_on=[disable_root]) def check_root_still_enabled_after_disable(self): """Check the root is still marked as enabled after disable.""" self.test_runner.run_check_root_still_enabled_after_disable() @test(depends_on=[restore_root_enabled_instance], runs_after=[check_root_still_enabled_after_disable]) def wait_for_restored_instance(self): """Wait until restoring a root-enabled instance completes.""" self.backup_runner.run_restore_from_backup_completed() @test(depends_on=[wait_for_restored_instance]) def check_root_enabled_after_restore(self): """Check the root is also enabled on the restored instance.""" instance_id = self.backup_runner.restore_instance_id self.test_runner.run_check_root_enabled_after_restore(instance_id) @test(depends_on=[wait_for_restored_instance], runs_after=[check_root_enabled_after_restore]) def delete_restored_instance(self): """Delete root restored instances.""" self.backup_runner.run_delete_restored_instance() trove-5.0.0/trove/tests/scenario/groups/cluster_actions_group.py0000664000567000056710000001217212701410316026424 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.cluster_actions_group" @test(groups=[GROUP]) class ClusterActionsGroup(TestGroup): def __init__(self): super(ClusterActionsGroup, self).__init__( 'cluster_actions_runners', 'ClusterActionsRunner') @test def cluster_create(self): """Create a cluster.""" self.test_runner.run_cluster_create() @test(depends_on=[cluster_create]) def add_initial_cluster_data(self): """Add data to cluster.""" self.test_runner.run_add_initial_cluster_data() @test(depends_on=[add_initial_cluster_data]) def verify_initial_cluster_data(self): """Verify the initial data exists on cluster.""" self.test_runner.run_verify_initial_cluster_data() @test(depends_on=[cluster_create]) def cluster_root_enable(self): """Root Enable.""" self.test_runner.run_cluster_root_enable() @test(depends_on=[cluster_root_enable]) def verify_cluster_root_enable(self): """Verify Root Enable.""" self.test_runner.run_verify_cluster_root_enable() @test(depends_on=[cluster_create], runs_after=[verify_initial_cluster_data, verify_cluster_root_enable]) def cluster_grow(self): """Grow cluster.""" self.test_runner.run_cluster_grow() @test(depends_on=[cluster_grow]) def verify_cluster_root_enable_after_grow(self): """Verify Root Enabled after grow.""" self.test_runner.run_verify_cluster_root_enable() @test(depends_on=[cluster_grow, add_initial_cluster_data]) def verify_initial_cluster_data_after_grow(self): """Verify the initial data still exists after cluster grow.""" self.test_runner.run_verify_initial_cluster_data() @test(depends_on=[cluster_grow], runs_after=[verify_initial_cluster_data_after_grow]) def add_extra_cluster_data_after_grow(self): """Add more data to cluster.""" self.test_runner.run_add_extra_cluster_data() @test(depends_on=[add_extra_cluster_data_after_grow]) def verify_extra_cluster_data_after_grow(self): """Verify the data added after cluster grow.""" self.test_runner.run_verify_extra_cluster_data() @test(depends_on=[add_extra_cluster_data_after_grow], runs_after=[verify_extra_cluster_data_after_grow]) def remove_extra_cluster_data_after_grow(self): """Remove the data added after cluster grow.""" self.test_runner.run_remove_extra_cluster_data() @test(depends_on=[cluster_create], runs_after=[remove_extra_cluster_data_after_grow, verify_cluster_root_enable_after_grow]) def cluster_shrink(self): """Shrink cluster.""" self.test_runner.run_cluster_shrink() @test(depends_on=[cluster_shrink]) def verify_cluster_root_enable_after_shrink(self): """Verify Root Enable after shrink.""" self.test_runner.run_verify_cluster_root_enable() @test(depends_on=[cluster_shrink, add_initial_cluster_data]) def verify_initial_cluster_data_after_shrink(self): """Verify the initial data still exists after cluster shrink.""" self.test_runner.run_verify_initial_cluster_data() @test(depends_on=[cluster_shrink], runs_after=[verify_initial_cluster_data_after_shrink]) def add_extra_cluster_data_after_shrink(self): """Add more data to cluster.""" self.test_runner.run_add_extra_cluster_data() @test(depends_on=[add_extra_cluster_data_after_shrink]) def verify_extra_cluster_data_after_shrink(self): """Verify the data added after cluster shrink.""" self.test_runner.run_verify_extra_cluster_data() @test(depends_on=[add_extra_cluster_data_after_shrink], runs_after=[verify_extra_cluster_data_after_shrink]) def remove_extra_cluster_data_after_shrink(self): """Remove the data added after cluster shrink.""" self.test_runner.run_remove_extra_cluster_data() @test(depends_on=[add_initial_cluster_data], runs_after=[remove_extra_cluster_data_after_shrink]) def remove_initial_cluster_data(self): """Remove the initial data from cluster.""" self.test_runner.run_remove_initial_cluster_data() @test(depends_on=[cluster_create], runs_after=[remove_initial_cluster_data, verify_cluster_root_enable_after_shrink]) def cluster_delete(self): """Delete an existing cluster.""" self.test_runner.run_cluster_delete() trove-5.0.0/trove/tests/scenario/groups/instance_delete_group.py0000664000567000056710000000405112701410316026346 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import backup_group from trove.tests.scenario.groups import configuration_group from trove.tests.scenario.groups import database_actions_group from trove.tests.scenario.groups import instance_actions_group from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups import module_group from trove.tests.scenario.groups import replication_group from trove.tests.scenario.groups import root_actions_group from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.groups import user_actions_group GROUP = "scenario.instance_delete_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP], runs_after_groups=[backup_group.GROUP_BACKUP, configuration_group.GROUP, database_actions_group.GROUP, instance_actions_group.GROUP, module_group.GROUP, replication_group.GROUP, root_actions_group.GROUP, user_actions_group.GROUP]) class InstanceDeleteGroup(TestGroup): def __init__(self): super(InstanceDeleteGroup, self).__init__( 'instance_delete_runners', 'InstanceDeleteRunner') @test def instance_delete(self): """Delete an existing instance.""" self.test_runner.run_instance_delete() trove-5.0.0/trove/tests/scenario/groups/test_group.py0000664000567000056710000001027112701410316024200 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from trove.common.strategies.strategy import Strategy from trove.tests.config import CONFIG @six.add_metaclass(abc.ABCMeta) class TestGroup(object): TEST_RUNNERS_NS = 'trove.tests.scenario.runners' TEST_HELPERS_NS = 'trove.tests.scenario.helpers' TEST_HELPER_MODULE_NAME = 'test_helper' TEST_HELPER_BASE_NAME = 'TestHelper' def __init__(self, runner_module_name, runner_base_name, *args, **kwargs): self._test_runner = self.get_runner( runner_module_name, runner_base_name, *args, **kwargs) def get_runner(self, runner_module_name, runner_base_name, *args, **kwargs): class_prefix = self._get_test_datastore() runner_cls = self._load_dynamic_class( runner_module_name, class_prefix, runner_base_name, self.TEST_RUNNERS_NS) runner = runner_cls(*args, **kwargs) runner._test_helper = self.get_helper() return runner def get_helper(self): class_prefix = self._get_test_datastore() helper_cls = self._load_dynamic_class( self.TEST_HELPER_MODULE_NAME, class_prefix, self.TEST_HELPER_BASE_NAME, self.TEST_HELPERS_NS) return helper_cls(self._build_class_name( class_prefix, self.TEST_HELPER_BASE_NAME, strip_test=True)) def _get_test_datastore(self): return CONFIG.dbaas_datastore def _load_dynamic_class(self, module_name, class_prefix, base_name, namespace): """Try to load a datastore specific class if it exists; use the default otherwise. """ # This is for overridden Runner classes impl = self._build_class_path(module_name, class_prefix, base_name) cls = self._load_class('runner', impl, namespace) if not cls: # This is for overridden Helper classes module = module_name.replace('test', class_prefix.lower()) impl = self._build_class_path(module, class_prefix, base_name, strip_test=True) cls = self._load_class('helper', impl, namespace) if not cls: # Just import the base class impl = self._build_class_path(module_name, '', base_name) cls = self._load_class(None, impl, namespace) return cls def _load_class(self, load_type, impl, namespace): cls = None if not load_type or load_type in impl.lower(): try: cls = Strategy.get_strategy(impl, namespace) except ImportError as ie: # Only fail silently if it's something we expect, # such as a missing override class. Anything else # shouldn't be suppressed. l_msg = ie.message.lower() if load_type not in l_msg or ( 'no module named' not in l_msg and 'cannot be found' not in l_msg): raise return cls def _build_class_path(self, module_name, class_prefix, class_base, strip_test=False): class_name = self._build_class_name(class_prefix, class_base, strip_test) return '%s.%s' % (module_name, class_name) def _build_class_name(self, class_prefix, base_name, strip_test=False): base = (base_name.replace('Test', '') if strip_test else base_name) return '%s%s' % (class_prefix.capitalize(), base) @property def test_runner(self): return self._test_runner trove-5.0.0/trove/tests/scenario/groups/replication_group.py0000664000567000056710000001527012701410316025536 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.replication_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP]) class ReplicationGroup(TestGroup): """Test Replication functionality.""" def __init__(self): super(ReplicationGroup, self).__init__( 'replication_runners', 'ReplicationRunner') @test def add_data_for_replication(self): """Add data to master for initial replica setup.""" self.test_runner.run_add_data_for_replication() @test(depends_on=[add_data_for_replication]) def verify_data_for_replication(self): """Verify data exists on master.""" self.test_runner.run_verify_data_for_replication() @test(runs_after=[verify_data_for_replication]) def create_single_replica(self): """Test creating a single replica.""" self.test_runner.run_create_single_replica() @test(runs_after=[create_single_replica]) def create_multiple_replicas(self): """Test creating multiple replicas.""" self.test_runner.run_create_multiple_replicas() @test(depends_on=[create_single_replica, create_multiple_replicas]) def add_data_to_replicate(self): """Add data to master to verify replication.""" self.test_runner.run_add_data_to_replicate() @test(depends_on=[add_data_to_replicate]) def verify_data_to_replicate(self): """Verify data exists on master.""" self.test_runner.run_verify_data_to_replicate() @test(depends_on=[create_single_replica, create_multiple_replicas, add_data_to_replicate], runs_after=[verify_data_to_replicate]) def wait_for_data_to_replicate(self): """Wait to ensure that the data is replicated.""" self.test_runner.run_wait_for_data_to_replicate() @test(depends_on=[create_single_replica, create_multiple_replicas, add_data_to_replicate], runs_after=[wait_for_data_to_replicate]) def verify_replica_data_orig(self): """Verify original data was transferred to replicas.""" self.test_runner.run_verify_replica_data_orig() @test(depends_on=[create_single_replica, create_multiple_replicas, add_data_to_replicate], runs_after=[verify_replica_data_orig]) def verify_replica_data_new(self): """Verify new data was transferred to replicas.""" self.test_runner.run_verify_replica_data_new() @test(depends_on=[create_single_replica, create_multiple_replicas], runs_after=[verify_replica_data_new]) def promote_master(self): """Ensure promoting master fails.""" self.test_runner.run_promote_master() @test(depends_on=[create_single_replica, create_multiple_replicas], runs_after=[promote_master]) def eject_replica(self): """Ensure ejecting non master fails.""" self.test_runner.run_eject_replica() @test(depends_on=[create_single_replica, create_multiple_replicas], runs_after=[eject_replica]) def eject_valid_master(self): """Ensure ejecting valid master fails.""" self.test_runner.run_eject_valid_master() @test(depends_on=[create_single_replica, create_multiple_replicas], runs_after=[eject_valid_master]) def delete_valid_master(self): """Ensure deleting valid master fails.""" self.test_runner.run_delete_valid_master() @test(depends_on=[create_single_replica, create_multiple_replicas], runs_after=[delete_valid_master]) def promote_to_replica_source(self): """Test promoting a replica to replica source (master).""" self.test_runner.run_promote_to_replica_source() @test(depends_on=[create_single_replica, create_multiple_replicas, promote_to_replica_source]) def add_data_to_replicate2(self): """Add data to new master to verify replication.""" self.test_runner.run_add_data_to_replicate2() @test(depends_on=[add_data_to_replicate2]) def verify_data_to_replicate2(self): """Verify data exists on new master.""" self.test_runner.run_verify_data_to_replicate2() @test(depends_on=[add_data_to_replicate2], runs_after=[verify_data_to_replicate2]) def wait_for_data_to_replicate2(self): """Wait to ensure that the new data was replicated.""" self.test_runner.run_wait_for_data_to_replicate() @test(depends_on=[create_single_replica, create_multiple_replicas, add_data_to_replicate2], runs_after=[wait_for_data_to_replicate2]) def verify_replica_data_new2(self): """Verify data was transferred to new replicas.""" self.test_runner.run_verify_replica_data_new2() @test(depends_on=[promote_to_replica_source], runs_after=[verify_replica_data_new2]) def promote_original_source(self): """Test promoting back the original replica source.""" self.test_runner.run_promote_original_source() @test(depends_on=[promote_original_source]) def remove_replicated_data(self): """Remove replication data.""" self.test_runner.run_remove_replicated_data() @test(depends_on=[promote_original_source], runs_after=[remove_replicated_data]) def detach_replica_from_source(self): """Test detaching a replica from the master.""" self.test_runner.run_detach_replica_from_source() @test(depends_on=[promote_original_source], runs_after=[detach_replica_from_source]) def delete_detached_replica(self): """Test deleting the detached replica.""" self.test_runner.run_delete_detached_replica() @test(runs_after=[delete_detached_replica]) def delete_all_replicas(self): """Test deleting all the remaining replicas.""" self.test_runner.run_delete_all_replicas() @test(runs_after=[delete_all_replicas]) def test_backup_deleted(self): """Test that the created backup is now gone.""" self.test_runner.run_test_backup_deleted() trove-5.0.0/trove/tests/scenario/groups/instance_actions_group.py0000664000567000056710000000303112701410316026541 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.instance_actions_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP]) class InstanceActionsGroup(TestGroup): def __init__(self): super(InstanceActionsGroup, self).__init__( 'instance_actions_runners', 'InstanceActionsRunner') @test def instance_restart(self): """Restart an existing instance.""" self.test_runner.run_instance_restart() @test(depends_on=[instance_restart]) def instance_resize_volume(self): """Resize attached volume.""" self.test_runner.run_instance_resize_volume() @test(depends_on=[instance_resize_volume]) def instance_resize_flavor(self): """Resize instance flavor.""" self.test_runner.run_instance_resize_flavor() trove-5.0.0/trove/tests/scenario/groups/module_group.py0000664000567000056710000005077312701410316024521 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.module_group" GROUP_MODULE_CREATE = "scenario.module_create_group" GROUP_INSTANCE_MODULE = "scenario.instance_module_group" GROUP_MODULE_DELETE = "scenario.module_delete_group" @test(groups=[GROUP, GROUP_MODULE_CREATE]) class ModuleGroup(TestGroup): """Test Module functionality.""" def __init__(self): super(ModuleGroup, self).__init__( 'module_runners', 'ModuleRunner') @test(groups=[GROUP, GROUP_MODULE_CREATE]) def module_delete_existing(self): """Delete all previous test modules.""" self.test_runner.run_module_delete_existing() @test(groups=[GROUP, GROUP_MODULE_CREATE]) def module_create_bad_type(self): """Ensure create module with invalid type fails.""" self.test_runner.run_module_create_bad_type() @test(groups=[GROUP, GROUP_MODULE_CREATE]) def module_create_non_admin_auto(self): """Ensure create auto_apply module for non-admin fails.""" self.test_runner.run_module_create_non_admin_auto() @test(groups=[GROUP, GROUP_MODULE_CREATE]) def module_create_non_admin_all_tenant(self): """Ensure create all tenant module for non-admin fails.""" self.test_runner.run_module_create_non_admin_all_tenant() @test(groups=[GROUP, GROUP_MODULE_CREATE]) def module_create_non_admin_hidden(self): """Ensure create hidden module for non-admin fails.""" self.test_runner.run_module_create_non_admin_hidden() @test(groups=[GROUP, GROUP_MODULE_CREATE]) def module_create_bad_datastore(self): """Ensure create module with invalid datastore fails.""" self.test_runner.run_module_create_bad_datastore() @test(groups=[GROUP, GROUP_MODULE_CREATE]) def module_create_bad_datastore_version(self): """Ensure create module with invalid datastore_version fails.""" self.test_runner.run_module_create_bad_datastore_version() @test(groups=[GROUP, GROUP_MODULE_CREATE]) def module_create_missing_datastore(self): """Ensure create module with missing datastore fails.""" self.test_runner.run_module_create_missing_datastore() @test(groups=[GROUP, GROUP_MODULE_CREATE], runs_after=[module_delete_existing]) def module_create(self): """Check that create module works.""" self.test_runner.run_module_create() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create]) def module_create_dupe(self): """Ensure create with duplicate info fails.""" self.test_runner.run_module_create_dupe() @test(groups=[GROUP, GROUP_MODULE_CREATE], runs_after=[module_create]) def module_create_bin(self): """Check that create module with binary contents works.""" self.test_runner.run_module_create_bin() @test(groups=[GROUP, GROUP_MODULE_CREATE], runs_after=[module_create_bin]) def module_create_bin2(self): """Check that create module with other binary contents works.""" self.test_runner.run_module_create_bin2() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create]) def module_show(self): """Check that show module works.""" self.test_runner.run_module_show() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create]) def module_show_unauth_user(self): """Ensure that show module for unauth user fails.""" self.test_runner.run_module_show_unauth_user() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2]) def module_list(self): """Check that list modules works.""" self.test_runner.run_module_list() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2]) def module_list_unauth_user(self): """Ensure that list module for unauth user fails.""" self.test_runner.run_module_list_unauth_user() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_list]) def module_create_admin_all(self): """Check that create module works with all admin options.""" self.test_runner.run_module_create_admin_all() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_all]) def module_create_admin_hidden(self): """Check that create module works with hidden option.""" self.test_runner.run_module_create_admin_hidden() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_hidden]) def module_create_admin_auto(self): """Check that create module works with auto option.""" self.test_runner.run_module_create_admin_auto() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_auto]) def module_create_admin_live_update(self): """Check that create module works with live-update option.""" self.test_runner.run_module_create_admin_live_update() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_live_update]) def module_create_datastore(self): """Check that create module with datastore works.""" self.test_runner.run_module_create_datastore() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_datastore]) def module_create_ds_version(self): """Check that create module with ds version works.""" self.test_runner.run_module_create_ds_version() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_ds_version]) def module_create_all_tenant(self): """Check that create 'all' tenants with datastore module works.""" self.test_runner.run_module_create_all_tenant() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_all_tenant, module_list_unauth_user]) def module_create_different_tenant(self): """Check that create with same name on different tenant works.""" self.test_runner.run_module_create_different_tenant() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create_all_tenant], runs_after=[module_create_different_tenant]) def module_list_again(self): """Check that list modules skips invisible modules.""" self.test_runner.run_module_list_again() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create_ds_version], runs_after=[module_list_again]) def module_list_ds(self): """Check that list modules by datastore works.""" self.test_runner.run_module_list_ds() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create_ds_version], runs_after=[module_list_ds]) def module_list_ds_all(self): """Check that list modules by all datastores works.""" self.test_runner.run_module_list_ds_all() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create_admin_hidden]) def module_show_invisible(self): """Ensure that show invisible module for non-admin fails.""" self.test_runner.run_module_show_invisible() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create_all_tenant], runs_after=[module_create_different_tenant]) def module_list_admin(self): """Check that list modules for admin works.""" self.test_runner.run_module_list_admin() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_create], runs_after=[module_show]) def module_update(self): """Check that update module works.""" self.test_runner.run_module_update() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update]) def module_update_same_contents(self): """Check that update module with same contents works.""" self.test_runner.run_module_update_same_contents() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_same_contents]) def module_update_auto_toggle(self): """Check that update module works for auto apply toggle.""" self.test_runner.run_module_update_auto_toggle() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_auto_toggle]) def module_update_all_tenant_toggle(self): """Check that update module works for all tenant toggle.""" self.test_runner.run_module_update_all_tenant_toggle() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_all_tenant_toggle]) def module_update_invisible_toggle(self): """Check that update module works for invisible toggle.""" self.test_runner.run_module_update_invisible_toggle() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_unauth(self): """Ensure update module for unauth user fails.""" self.test_runner.run_module_update_unauth() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_auto(self): """Ensure update module to auto_apply for non-admin fails.""" self.test_runner.run_module_update_non_admin_auto() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_auto_off(self): """Ensure update module to auto_apply off for non-admin fails.""" self.test_runner.run_module_update_non_admin_auto_off() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_auto_any(self): """Ensure any update module to auto_apply for non-admin fails.""" self.test_runner.run_module_update_non_admin_auto_any() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_all_tenant(self): """Ensure update module to all tenant for non-admin fails.""" self.test_runner.run_module_update_non_admin_all_tenant() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_all_tenant_off(self): """Ensure update module to all tenant off for non-admin fails.""" self.test_runner.run_module_update_non_admin_all_tenant_off() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_all_tenant_any(self): """Ensure any update module to all tenant for non-admin fails.""" self.test_runner.run_module_update_non_admin_all_tenant_any() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_invisible(self): """Ensure update module to invisible for non-admin fails.""" self.test_runner.run_module_update_non_admin_invisible() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_invisible_off(self): """Ensure update module to invisible off for non-admin fails.""" self.test_runner.run_module_update_non_admin_invisible_off() @test(groups=[GROUP, GROUP_MODULE_CREATE], depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_non_admin_invisible_any(self): """Ensure any update module to invisible for non-admin fails.""" self.test_runner.run_module_update_non_admin_invisible_any() @test(depends_on_groups=[instance_create_group.GROUP, GROUP_MODULE_CREATE], groups=[GROUP, GROUP_INSTANCE_MODULE]) class ModuleInstanceGroup(TestGroup): """Test Instance Module functionality.""" def __init__(self): super(ModuleInstanceGroup, self).__init__( 'module_runners', 'ModuleRunner') @test(groups=[GROUP, GROUP_INSTANCE_MODULE]) def module_list_instance_empty(self): """Check that the instance has no modules associated.""" self.test_runner.run_module_list_instance_empty() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], runs_after=[module_list_instance_empty]) def module_instances_empty(self): """Check that the module hasn't been applied to any instances.""" self.test_runner.run_module_instances_empty() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], runs_after=[module_instances_empty]) def module_query_empty(self): """Check that the instance has no modules applied.""" self.test_runner.run_module_query_empty() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], runs_after=[module_query_empty]) def module_apply(self): """Check that module-apply works.""" self.test_runner.run_module_apply() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[module_apply]) def module_list_instance_after_apply(self): """Check that the instance has one module associated.""" self.test_runner.run_module_list_instance_after_apply() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[module_apply]) def module_query_after_apply(self): """Check that module-query works.""" self.test_runner.run_module_query_after_apply() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[module_apply], runs_after=[module_query_after_apply]) def create_inst_with_mods(self): """Check that creating an instance with modules works.""" self.test_runner.run_create_inst_with_mods() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[module_apply]) def module_delete_applied(self): """Ensure that deleting an applied module fails.""" self.test_runner.run_module_delete_applied() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[module_apply], runs_after=[module_list_instance_after_apply, module_query_after_apply]) def module_remove(self): """Check that module-remove works.""" self.test_runner.run_module_remove() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[module_remove]) def module_query_empty_after(self): """Check that the instance has no modules applied after remove.""" self.test_runner.run_module_query_empty() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[create_inst_with_mods], runs_after=[module_query_empty_after]) def wait_for_inst_with_mods(self): """Wait for create instance with modules to finish.""" self.test_runner.run_wait_for_inst_with_mods() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[wait_for_inst_with_mods]) def module_query_after_inst_create(self): """Check that module-query works on new instance.""" self.test_runner.run_module_query_after_inst_create() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[wait_for_inst_with_mods], runs_after=[module_query_after_inst_create]) def module_retrieve_after_inst_create(self): """Check that module-retrieve works on new instance.""" self.test_runner.run_module_retrieve_after_inst_create() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[wait_for_inst_with_mods], runs_after=[module_retrieve_after_inst_create]) def module_query_after_inst_create_admin(self): """Check that module-query works for admin.""" self.test_runner.run_module_query_after_inst_create_admin() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[wait_for_inst_with_mods], runs_after=[module_query_after_inst_create_admin]) def module_retrieve_after_inst_create_admin(self): """Check that module-retrieve works for admin.""" self.test_runner.run_module_retrieve_after_inst_create_admin() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[wait_for_inst_with_mods], runs_after=[module_retrieve_after_inst_create_admin]) def module_delete_auto_applied(self): """Ensure that module-delete on auto-applied module fails.""" self.test_runner.run_module_delete_auto_applied() @test(groups=[GROUP, GROUP_INSTANCE_MODULE], depends_on=[wait_for_inst_with_mods], runs_after=[module_delete_auto_applied]) def delete_inst_with_mods(self): """Check that instance with module can be deleted.""" self.test_runner.run_delete_inst_with_mods() @test(depends_on_groups=[GROUP_MODULE_CREATE], groups=[GROUP, GROUP_MODULE_DELETE]) class ModuleDeleteGroup(TestGroup): """Test Module Delete functionality.""" def __init__(self): super(ModuleDeleteGroup, self).__init__( 'module_runners', 'ModuleRunner') @test(groups=[GROUP, GROUP_MODULE_DELETE]) def module_delete_non_existent(self): """Ensure delete non-existent module fails.""" self.test_runner.run_module_delete_non_existent() @test(groups=[GROUP, GROUP_MODULE_DELETE]) def module_delete_unauth_user(self): """Ensure delete module by unauth user fails.""" self.test_runner.run_module_delete_unauth_user() @test(groups=[GROUP, GROUP_MODULE_DELETE], runs_after=[module_delete_unauth_user]) def module_delete_hidden_by_non_admin(self): """Ensure delete hidden module by non-admin user fails.""" self.test_runner.run_module_delete_hidden_by_non_admin() @test(groups=[GROUP, GROUP_MODULE_DELETE], runs_after=[module_delete_hidden_by_non_admin]) def module_delete_all_tenant_by_non_admin(self): """Ensure delete all tenant module by non-admin user fails.""" self.test_runner.run_module_delete_all_tenant_by_non_admin() @test(groups=[GROUP, GROUP_MODULE_DELETE], runs_after=[module_delete_all_tenant_by_non_admin]) def module_delete_auto_by_non_admin(self): """Ensure delete auto-apply module by non-admin user fails.""" self.test_runner.run_module_delete_auto_by_non_admin() @test(groups=[GROUP, GROUP_MODULE_DELETE], runs_after=[module_delete_auto_by_non_admin]) def module_delete(self): """Check that delete module works.""" self.test_runner.run_module_delete() @test(groups=[GROUP, GROUP_MODULE_DELETE], runs_after=[module_delete]) def module_delete_admin(self): """Check that delete module works for admin.""" self.test_runner.run_module_delete_admin() @test(groups=[GROUP, GROUP_MODULE_DELETE], runs_after=[module_delete_admin]) def module_delete_remaining(self): """Delete all remaining test modules.""" self.test_runner.run_module_delete_existing() trove-5.0.0/trove/tests/scenario/groups/database_actions_group.py0000664000567000056710000001032612701410316026506 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.database_actions_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP]) class DatabaseActionsGroup(TestGroup): def __init__(self): super(DatabaseActionsGroup, self).__init__( 'database_actions_runners', 'DatabaseActionsRunner') self.instance_create_runner = self.get_runner( 'instance_create_runners', 'InstanceCreateRunner') @test def create_initialized_instance(self): """Create an instance with initial databases.""" self.instance_create_runner.run_initialized_instance_create( with_dbs=True, with_users=False, configuration_id=None) @test(runs_after=[create_initialized_instance]) def create_databases(self): """Create databases on an existing instance.""" self.test_runner.run_databases_create() @test(depends_on=[create_databases]) def list_databases(self): """List the created databases.""" self.test_runner.run_databases_list() @test(depends_on=[create_databases], runs_after=[list_databases]) def create_database_with_no_attributes(self): """Ensure creating a database with blank specification fails.""" self.test_runner.run_database_create_with_no_attributes() @test(depends_on=[create_databases], runs_after=[create_database_with_no_attributes]) def create_database_with_blank_name(self): """Ensure creating a database with blank name fails.""" self.test_runner.run_database_create_with_blank_name() @test(depends_on=[create_databases], runs_after=[create_database_with_blank_name]) def create_existing_database(self): """Ensure creating an existing database fails.""" self.test_runner.run_existing_database_create() @test(depends_on=[create_databases], runs_after=[create_existing_database]) def delete_database(self): """Delete the created databases.""" self.test_runner.run_database_delete() @test(runs_after=[delete_database]) def delete_nonexisting_database(self): """Delete non-existing databases.""" self.test_runner.run_nonexisting_database_delete() @test(runs_after=[delete_nonexisting_database]) def create_system_database(self): """Ensure creating a system database fails.""" self.test_runner.run_system_database_create() @test(runs_after=[create_system_database]) def delete_system_database(self): """Ensure deleting a system database fails.""" self.test_runner.run_system_database_delete() @test(depends_on=[create_initialized_instance], runs_after=[delete_system_database]) def wait_for_instances(self): """Waiting for all instances to become active.""" self.instance_create_runner.wait_for_created_instances() @test(depends_on=[wait_for_instances]) def add_initialized_instance_data(self): """Add data to the initialized instance.""" self.instance_create_runner.run_add_initialized_instance_data() @test(runs_after=[add_initialized_instance_data]) def validate_initialized_instance(self): """Validate the initialized instance data and properties.""" self.instance_create_runner.run_validate_initialized_instance() @test(runs_after=[validate_initialized_instance]) def delete_initialized_instance(self): """Delete the initialized instance.""" self.instance_create_runner.run_initialized_instance_delete() trove-5.0.0/trove/tests/scenario/groups/guest_log_group.py0000664000567000056710000002215712701410316025217 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups.test_group import TestGroup GROUP = "scenario.guest_log_group" @test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP]) class GuestLogGroup(TestGroup): """Test Guest Log functionality.""" def __init__(self): super(GuestLogGroup, self).__init__( 'guest_log_runners', 'GuestLogRunner') @test def test_log_list(self): """Test that log-list works.""" self.test_runner.run_test_log_list() @test def test_admin_log_list(self): """Test that log-list works for admin user.""" self.test_runner.run_test_admin_log_list() @test def test_log_show(self): """Test that log-show works on USER log.""" self.test_runner.run_test_log_show() @test def test_log_enable_sys(self): """Ensure log-enable on SYS log fails.""" self.test_runner.run_test_log_enable_sys() @test def test_log_disable_sys(self): """Ensure log-disable on SYS log fails.""" self.test_runner.run_test_log_disable_sys() @test def test_log_show_unauth_user(self): """Ensure log-show by unauth client on USER log fails.""" self.test_runner.run_test_log_show_unauth_user() @test def test_log_list_unauth_user(self): """Ensure log-list by unauth client on USER log fails.""" self.test_runner.run_test_log_list_unauth_user() @test def test_log_generator_unauth_user(self): """Ensure log-generator by unauth client on USER log fails.""" self.test_runner.run_test_log_generator_unauth_user() @test def test_log_generator_publish_unauth_user(self): """Ensure log-generator by unauth client with publish fails.""" self.test_runner.run_test_log_generator_publish_unauth_user() @test def test_log_show_unexposed_user(self): """Ensure log-show on unexposed log fails for auth client.""" self.test_runner.run_test_log_show_unexposed_user() @test def test_log_enable_unexposed_user(self): """Ensure log-enable on unexposed log fails for auth client.""" self.test_runner.run_test_log_enable_unexposed_user() @test def test_log_disable_unexposed_user(self): """Ensure log-disable on unexposed log fails for auth client.""" self.test_runner.run_test_log_disable_unexposed_user() @test def test_log_publish_unexposed_user(self): """Ensure log-publish on unexposed log fails for auth client.""" self.test_runner.run_test_log_publish_unexposed_user() @test def test_log_discard_unexposed_user(self): """Ensure log-discard on unexposed log fails for auth client.""" self.test_runner.run_test_log_discard_unexposed_user() @test(runs_after=[test_log_show]) def test_log_enable_user(self): """Test log-enable on USER log.""" self.test_runner.run_test_log_enable_user() @test(runs_after=[test_log_enable_user]) def test_log_enable_flip_user(self): """Test that flipping restart-required log-enable works.""" self.test_runner.run_test_log_enable_flip_user() @test(runs_after=[test_log_enable_flip_user]) def test_restart_datastore(self): """Test restart datastore if required.""" self.test_runner.run_test_restart_datastore() @test(runs_after=[test_restart_datastore]) def test_wait_for_restart(self): """Wait for restart to complete.""" self.test_runner.run_test_wait_for_restart() @test(runs_after=[test_wait_for_restart]) def test_log_publish_user(self): """Test log-publish on USER log.""" self.test_runner.run_test_log_publish_user() @test(runs_after=[test_log_publish_user]) def test_add_data(self): """Add data for second log-publish on USER log.""" self.test_runner.run_test_add_data() @test(runs_after=[test_add_data]) def test_verify_data(self): """Verify data for second log-publish on USER log.""" self.test_runner.run_test_verify_data() @test(runs_after=[test_verify_data]) def test_log_publish_again_user(self): """Test log-publish again on USER log.""" self.test_runner.run_test_log_publish_again_user() @test(runs_after=[test_log_publish_again_user]) def test_log_generator_user(self): """Test log-generator on USER log.""" self.test_runner.run_test_log_generator_user() @test(runs_after=[test_log_generator_user]) def test_log_generator_publish_user(self): """Test log-generator with publish on USER log.""" self.test_runner.run_test_log_generator_publish_user() @test(runs_after=[test_log_generator_publish_user]) def test_log_generator_swift_client_user(self): """Test log-generator on USER log with passed-in Swift client.""" self.test_runner.run_test_log_generator_swift_client_user() @test(runs_after=[test_log_generator_swift_client_user]) def test_add_data_again(self): """Add more data for log-generator row-by-row test on USER log.""" self.test_runner.run_test_add_data_again() @test(runs_after=[test_add_data_again]) def test_verify_data_again(self): """Verify data for log-generator row-by-row test on USER log.""" self.test_runner.run_test_verify_data_again() @test(runs_after=[test_verify_data_again]) def test_log_generator_user_by_row(self): """Test log-generator on USER log row-by-row.""" self.test_runner.run_test_log_generator_user_by_row() @test(depends_on=[test_log_publish_user], runs_after=[test_log_generator_user_by_row]) def test_log_save_user(self): """Test log-save on USER log.""" self.test_runner.run_test_log_save_user() @test(depends_on=[test_log_publish_user], runs_after=[test_log_save_user]) def test_log_save_publish_user(self): """Test log-save on USER log with publish.""" self.test_runner.run_test_log_save_publish_user() @test(runs_after=[test_log_save_publish_user]) def test_log_discard_user(self): """Test log-discard on USER log.""" self.test_runner.run_test_log_discard_user() @test(runs_after=[test_log_discard_user]) def test_log_disable_user(self): """Test log-disable on USER log.""" self.test_runner.run_test_log_disable_user() @test(runs_after=[test_log_disable_user]) def test_restart_datastore_again(self): """Test restart datastore again if required.""" self.test_runner.run_test_restart_datastore() @test(runs_after=[test_restart_datastore_again]) def test_wait_for_restart_again(self): """Wait for restart to complete again.""" self.test_runner.run_test_wait_for_restart() @test def test_log_show_sys(self): """Test that log-show works for SYS log.""" self.test_runner.run_test_log_show_sys() @test(runs_after=[test_log_show_sys]) def test_log_publish_sys(self): """Test log-publish on SYS log.""" self.test_runner.run_test_log_publish_sys() @test(runs_after=[test_log_publish_sys]) def test_log_publish_again_sys(self): """Test log-publish again on SYS log.""" self.test_runner.run_test_log_publish_again_sys() @test(depends_on=[test_log_publish_again_sys]) def test_log_generator_sys(self): """Test log-generator on SYS log.""" self.test_runner.run_test_log_generator_sys() @test(runs_after=[test_log_generator_sys]) def test_log_generator_publish_sys(self): """Test log-generator with publish on SYS log.""" self.test_runner.run_test_log_generator_publish_sys() @test(depends_on=[test_log_publish_sys], runs_after=[test_log_generator_publish_sys]) def test_log_generator_swift_client_sys(self): """Test log-generator on SYS log with passed-in Swift client.""" self.test_runner.run_test_log_generator_swift_client_sys() @test(depends_on=[test_log_publish_sys], runs_after=[test_log_generator_swift_client_sys]) def test_log_save_sys(self): """Test log-save on SYS log.""" self.test_runner.run_test_log_save_sys() @test(runs_after=[test_log_save_sys]) def test_log_save_publish_sys(self): """Test log-save on SYS log with publish.""" self.test_runner.run_test_log_save_publish_sys() @test(runs_after=[test_log_save_publish_sys]) def test_log_discard_sys(self): """Test log-discard on SYS log.""" self.test_runner.run_test_log_discard_sys() trove-5.0.0/trove/tests/scenario/helpers/0000775000567000056710000000000012701410521021553 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/scenario/helpers/mariadb_helper.py0000664000567000056710000000211512701410316025064 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.mysql_helper import MysqlHelper class MariadbHelper(MysqlHelper): def __init__(self, expected_override_name): super(MariadbHelper, self).__init__(expected_override_name) # Mariadb currently does not support configuration groups. # see: bug/1532256 def get_dynamic_group(self): return dict() def get_non_dynamic_group(self): return dict() def get_invalid_groups(self): return [] trove-5.0.0/trove/tests/scenario/helpers/pxc_helper.py0000664000567000056710000000150612701410316024262 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.mysql_helper import MysqlHelper class PxcHelper(MysqlHelper): def __init__(self, expected_override_name): super(PxcHelper, self).__init__(expected_override_name) trove-5.0.0/trove/tests/scenario/helpers/__init__.py0000664000567000056710000000000012701410316023654 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/tests/scenario/helpers/test_helper.py0000664000567000056710000004121212701410316024445 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from enum import Enum import inspect from proboscis import SkipTest from time import sleep class DataType(Enum): """ Represent the type of data to add to a datastore. This allows for multiple 'states' of data that can be verified after actions are performed by Trove. If new entries are added here, sane values should be added to the _fn_data dictionary defined in TestHelper. """ # micro amount of data, useful for testing datastore logging, etc. micro = 1 # another micro dataset (also for datastore logging) micro2 = 2 # very tiny amount of data, useful for testing replication # propagation, etc. tiny = 3 # another tiny dataset (also for replication propagation) tiny2 = 4 # small amount of data (this can be added to each instance # after creation, for example). small = 5 # large data, enough to make creating a backup take 20s or more. large = 6 class TestHelper(object): """ Base class for all 'Helper' classes. The Helper classes are designed to do datastore specific work that can be used by multiple runner classes. Things like adding data to datastores and verifying data or internal database states, etc. should be handled by these classes. """ # Define the actions that can be done on each DataType. When adding # a new action, remember to modify _data_fns FN_ADD = 'add' FN_REMOVE = 'remove' FN_VERIFY = 'verify' FN_TYPES = [FN_ADD, FN_REMOVE, FN_VERIFY] # Artificial 'DataType' name to use for the methods that do the # actual data manipulation work. DT_ACTUAL = 'actual' def __init__(self, expected_override_name): """Initialize the helper class by creating a number of stub functions that each datastore specific class can chose to override. Basically, the functions are of the form: {FN_TYPE}_{DataType.name}_data For example: add_tiny_data add_small_data remove_small_data verify_large_data and so on. Add and remove actions throw a SkipTest if not implemented, and verify actions by default do nothing. These methods, by default, call the corresponding *_actual_data() passing in 'data_label', 'data_start' and 'data_size' as defined for each DataType in the dictionary below. """ super(TestHelper, self).__init__() self._expected_override_name = expected_override_name # For building data access functions # name/fn pairs for each action self._data_fns = {self.FN_ADD: {}, self.FN_REMOVE: {}, self.FN_VERIFY: {}} # Pattern used to create the data functions. The first parameter # is the function type (FN_TYPE), the second is the DataType # or DT_ACTUAL. self.data_fn_pattern = '%s_%s_data' # Values to distinguish between the different DataTypes. If these # values don't work for a datastore, it will need to override # the auto-generated {FN_TYPE}_{DataType.name}_data method. self.DATA_START = 'start' self.DATA_SIZE = 'size' self._fn_data = { DataType.micro.name: { self.DATA_START: 100, self.DATA_SIZE: 10}, DataType.micro2.name: { self.DATA_START: 200, self.DATA_SIZE: 10}, DataType.tiny.name: { self.DATA_START: 1000, self.DATA_SIZE: 100}, DataType.tiny2.name: { self.DATA_START: 2000, self.DATA_SIZE: 100}, DataType.small.name: { self.DATA_START: 10000, self.DATA_SIZE: 1000}, DataType.large.name: { self.DATA_START: 100000, self.DATA_SIZE: 100000}, } self._build_data_fns() ################# # Utility methods ################# def get_class_name(self): """Builds a string of the expected class name, plus the actual one being used if it's not the same. """ class_name_str = "'%s'" % self._expected_override_name if self._expected_override_name != self.__class__.__name__: class_name_str += ' (using %s)' % self.__class__.__name__ return class_name_str ################ # Client related ################ def get_client(self, host, *args, **kwargs): """Gets the datastore client. This isn't cached as the database may be restarted in between calls, causing lost connection errors. """ return self.create_client(host, *args, **kwargs) def create_client(self, host, *args, **kwargs): """Create a datastore client. This is datastore specific, so this method should be overridden if datastore access is desired. """ raise SkipTest('No client defined') def get_helper_credentials(self): """Return the credentials that the client will be using to access the database. """ return {'name': None, 'password': None, 'database': None} def get_helper_credentials_root(self): """Return the credentials that the client will be using to access the database as root. """ return {'name': None, 'password': None} ############## # Data related ############## def add_data(self, data_type, host, *args, **kwargs): """Adds data of type 'data_type' to the database. Descendant classes should implement a function for each DataType value of the form 'add_{DataType.name}_data' - for example: 'add_tiny_data' 'add_small_data' ... Since this method may be called multiple times, the implemented 'add_*_data' functions should be idempotent. """ self._perform_data_action(self.FN_ADD, data_type.name, host, *args, **kwargs) def remove_data(self, data_type, host, *args, **kwargs): """Removes all data associated with 'data_type'. See instructions for 'add_data' for implementation guidance. """ self._perform_data_action(self.FN_REMOVE, data_type.name, host, *args, **kwargs) def verify_data(self, data_type, host, *args, **kwargs): """Verify that the data of type 'data_type' exists in the datastore. This can be done by testing edge cases, and possibly some random elements within the set. See instructions for 'add_data' for implementation guidance. """ self._perform_data_action(self.FN_VERIFY, data_type.name, host, *args, **kwargs) def _perform_data_action(self, fn_type, fn_name, host, *args, **kwargs): fns = self._data_fns[fn_type] data_fn_name = self.data_fn_pattern % (fn_type, fn_name) try: fns[data_fn_name](self, host, *args, **kwargs) except SkipTest: raise except Exception as ex: raise RuntimeError("Error calling %s from class %s - %s" % (data_fn_name, self.__class__.__name__, ex)) def _build_data_fns(self): """Build the base data functions specified by FN_TYPE_* for each of the types defined in the DataType class. For example, 'add_small_data' and 'verify_large_data'. These functions are set to call '*_actual_data' and will pass in sane values for label, start and size. The '*_actual_data' methods should be overwritten by a descendant class, and are the ones that do the actual work. The original 'add_small_data', etc. methods can also be overridden if needed, and those overwritten functions will be bound before calling any data functions such as 'add_data' or 'remove_data'. """ for fn_type in self.FN_TYPES: fn_dict = self._data_fns[fn_type] for data_type in DataType: self._data_fn_builder(fn_type, data_type.name, fn_dict) self._data_fn_builder(fn_type, self.DT_ACTUAL, fn_dict) self._override_data_fns() def _data_fn_builder(self, fn_type, fn_name, fn_dict): """Builds the actual function with a SkipTest exception, and changes the name to reflect the pattern. """ data_fn_name = self.data_fn_pattern % (fn_type, fn_name) # Build the overridable 'actual' Data Manipulation methods if fn_name == self.DT_ACTUAL: def data_fn(self, data_label, data_start, data_size, host, *args, **kwargs): # default action is to skip the test cls_str = '' if self._expected_override_name != self.__class__.__name__: cls_str = (' (%s not loaded)' % self._expected_override_name) raise SkipTest("Data function '%s' not found in '%s'%s" % ( data_fn_name, self.__class__.__name__, cls_str)) else: def data_fn(self, host, *args, **kwargs): # call the corresponding 'actual' method fns = self._data_fns[fn_type] var_dict = self._fn_data[fn_name] data_start = var_dict[self.DATA_START] data_size = var_dict[self.DATA_SIZE] actual_fn_name = self.data_fn_pattern % ( fn_type, self.DT_ACTUAL) try: fns[actual_fn_name](self, fn_name, data_start, data_size, host, *args, **kwargs) except SkipTest: raise except Exception as ex: raise RuntimeError("Error calling %s from class %s: %s" % ( data_fn_name, self.__class__.__name__, ex)) data_fn.__name__ = data_fn.func_name = data_fn_name fn_dict[data_fn_name] = data_fn def _override_data_fns(self): """Bind the override methods to the dict.""" members = inspect.getmembers(self.__class__, predicate=inspect.ismethod) for fn_type in self.FN_TYPES: fns = self._data_fns[fn_type] for name, fn in members: if name in fns: fns[name] = fn ##################### # Replication related ##################### def wait_for_replicas(self): """Wait for data to propagate to all the replicas. Datastore specific overrides could increase (or decrease) this delay. """ sleep(30) ####################### # Database/User related ####################### def get_valid_database_definitions(self): """Return a list of valid database JSON definitions. These definitions will be used by tests that create databases. Return an empty list if the datastore does not support databases. """ return list() def get_valid_user_definitions(self): """Return a list of valid user JSON definitions. These definitions will be used by tests that create users. Return an empty list if the datastore does not support users. """ return list() def get_non_existing_database_definition(self): """Return a valid JSON definition for a non-existing database. This definition will be used by negative database tests. The database will not be created by any of the tests. Return None if the datastore does not support databases. """ valid_defs = self.get_valid_database_definitions() return self._get_non_existing_definition(valid_defs) def get_non_existing_user_definition(self): """Return a valid JSON definition for a non-existing user. This definition will be used by negative user tests. The user will not be created by any of the tests. Return None if the datastore does not support users. """ valid_defs = self.get_valid_user_definitions() return self._get_non_existing_definition(valid_defs) def _get_non_existing_definition(self, existing_defs): """This will create a unique definition for a non-existing object by randomizing one of an existing object. """ if existing_defs: non_existing_def = dict(existing_defs[0]) while non_existing_def in existing_defs: non_existing_def = self._randomize_on_name(non_existing_def) return non_existing_def return None def _randomize_on_name(self, definition): def_copy = dict(definition) def_copy['name'] = ''.join([def_copy['name'], 'rnd']) return def_copy ############################# # Configuration Group related ############################# def get_dynamic_group(self): """Return a definition of a dynamic configuration group. A dynamic group should contain only properties that do not require database restart. Return an empty dict if the datastore does not have any. """ return dict() def get_non_dynamic_group(self): """Return a definition of a non-dynamic configuration group. A non-dynamic group has to include at least one property that requires database restart. Return an empty dict if the datastore does not have any. """ return dict() def get_invalid_groups(self): """Return a list of configuration groups with invalid values. An empty list indicates that no 'invalid' tests should be run. """ return [] ################### # Guest Log related ################### def get_exposed_log_list(self): """Return the list of exposed logs for the datastore. This method shouldn't need to be overridden. """ logs = [] try: logs.extend(self.get_exposed_user_log_names()) except SkipTest: pass try: logs.extend(self.get_exposed_sys_log_names()) except SkipTest: pass return logs def get_full_log_list(self): """Return the full list of all logs for the datastore. This method shouldn't need to be overridden. """ logs = self.get_exposed_log_list() try: logs.extend(self.get_unexposed_user_log_names()) except SkipTest: pass try: logs.extend(self.get_unexposed_sys_log_names()) except SkipTest: pass return logs # Override these guest log methods if needed def get_exposed_user_log_names(self): """Return the names of the user logs that are visible to all users. The first log name will be used for tests. """ raise SkipTest("No exposed user log names defined.") def get_unexposed_user_log_names(self): """Return the names of the user logs that not visible to all users. The first log name will be used for tests. """ raise SkipTest("No unexposed user log names defined.") def get_exposed_sys_log_names(self): """Return the names of SYS logs that are visible to all users. The first log name will be used for tests. """ raise SkipTest("No exposed sys log names defined.") def get_unexposed_sys_log_names(self): """Return the names of the sys logs that not visible to all users. The first log name will be used for tests. """ return ['guest'] def log_enable_requires_restart(self): """Returns whether enabling or disabling a USER log requires a restart of the datastore. """ return False ############## # Root related ############## def get_valid_root_password(self): """Return a valid password that can be used by a 'root' user. """ return "RootTestPass" ############## # Module related ############## def get_valid_module_type(self): """Return a valid module type.""" return "Ping" trove-5.0.0/trove/tests/scenario/helpers/couchdb_helper.py0000664000567000056710000001010512701410316025072 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import couchdb from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner class CouchdbHelper(TestHelper): def __init__(self, expected_override_name): super(CouchdbHelper, self).__init__(expected_override_name) self._data_cache = dict() self.field_name = 'ff-%s' self.database = 'firstdb' def create_client(self, host, *args, **kwargs): username = self.get_helper_credentials()['name'] password = self.get_helper_credentials()["password"] url = 'http://%(username)s:%(password)s@%(host)s:5984/' % { 'username': username, 'password': password, 'host': host, } server = couchdb.Server(url) return server def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) db = client[self.database] doc = {} doc_id, doc_rev = db.save(doc) data = self._get_dataset(data_size) doc = db.get(doc_id) for value in data: key = self.field_name % value doc[key] = value db.save(doc) def _get_dataset(self, data_size): cache_key = str(data_size) if cache_key in self._data_cache: return self._data_cache.get(cache_key) data = self._generate_dataset(data_size) self._data_cache[cache_key] = data return data def _generate_dataset(self, data_size): return range(1, data_size + 1) def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host) db = client[self.database + "_" + data_label] client.delete(db) def verify_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): expected_data = self._get_dataset(data_size) client = self.get_client(host, *args, **kwargs) db = client[self.database] actual_data = [] TestRunner.assert_equal(len(db), 1) for i in db: items = db[i].items() actual_data = ([value for key, value in items if key not in ['_id', '_rev']]) TestRunner.assert_equal(len(expected_data), len(actual_data), "Unexpected number of result rows.") for expected_row in expected_data: TestRunner.assert_true(expected_row in actual_data, "Row not found in the result set: %s" % expected_row) def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': self.database} def get_helper_credentials_root(self): return {'name': 'root', 'password': 'rootpass'} def get_valid_database_definitions(self): return [{'name': 'db1'}, {'name': 'db2'}, {"name": 'db3'}] def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': [], 'host': '127.0.0.1'}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}], 'host': '0.0.0.0'}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] trove-5.0.0/trove/tests/scenario/helpers/sql_helper.py0000664000567000056710000001311712701410316024270 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from sqlalchemy import MetaData, Table, Column, Integer from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner class SqlHelper(TestHelper): """This mixin provides data handling helper functions for SQL datastores. """ DATA_COLUMN_NAME = 'value' def __init__(self, expected_override_name, protocol, port=None): super(SqlHelper, self).__init__(expected_override_name) self.protocol = protocol self.port = port self.credentials = self.get_helper_credentials() self.credentials_root = self.get_helper_credentials_root() self.test_schema = self.credentials['database'] self._schema_metadata = MetaData() self._data_cache = dict() def create_client(self, host, *args, **kwargs): username = kwargs.get("username") password = kwargs.get("password") if username and password: creds = {"name": username, "password": password} return sqlalchemy.create_engine( self._build_connection_string(host, creds)) return sqlalchemy.create_engine( self._build_connection_string(host, self.credentials)) def _build_connection_string(self, host, creds): if self.port: host = "%s:%d" % (host, self.port) credentials = {'protocol': self.protocol, 'host': host, 'user': creds.get('name', ''), 'password': creds.get('password', ''), 'database': creds.get('database', '')} return ('%(protocol)s://%(user)s:%(password)s@%(host)s/%(database)s' % credentials) # Add data overrides def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) self._create_data_table(client, self.test_schema, data_label) count = self._count_data_rows(client, self.test_schema, data_label) if count == 0: self._insert_data_rows(client, self.test_schema, data_label, data_size) def _create_data_table(self, client, schema_name, table_name): Table( table_name, self._schema_metadata, Column(self.DATA_COLUMN_NAME, Integer(), nullable=False, default=0), keep_existing=True, schema=schema_name ).create(client, checkfirst=True) def _count_data_rows(self, client, schema_name, table_name): data_table = self._get_schema_table(schema_name, table_name) return client.execute(data_table.count()).scalar() def _insert_data_rows(self, client, schema_name, table_name, data_size): data_table = self._get_schema_table(schema_name, table_name) client.execute(data_table.insert(), self._get_dataset(data_size)) def _get_schema_table(self, schema_name, table_name): qualified_table_name = '%s.%s' % (schema_name, table_name) return self._schema_metadata.tables.get(qualified_table_name) def _get_dataset(self, data_size): cache_key = str(data_size) if cache_key in self._data_cache: return self._data_cache.get(cache_key) data = self._generate_dataset(data_size) self._data_cache[cache_key] = data return data def _generate_dataset(self, data_size): return [{self.DATA_COLUMN_NAME: value} for value in range(1, data_size + 1)] # Remove data overrides def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host) self._drop_table(client, self.test_schema, data_label) def _drop_table(self, client, schema_name, table_name): data_table = self._get_schema_table(schema_name, table_name) data_table.drop(client, checkfirst=True) # Verify data overrides def verify_actual_data(self, data_label, data_Start, data_size, host, *args, **kwargs): expected_data = [(item[self.DATA_COLUMN_NAME],) for item in self._get_dataset(data_size)] client = self.get_client(host, *args, **kwargs) actual_data = self._select_data_rows(client, self.test_schema, data_label) TestRunner.assert_equal(len(expected_data), len(actual_data), "Unexpected number of result rows.") TestRunner.assert_list_elements_equal( expected_data, actual_data, "Unexpected rows in the result set.") def _select_data_rows(self, client, schema_name, table_name): data_table = self._get_schema_table(schema_name, table_name) return client.execute(data_table.select()).fetchall() def ping(self, host, *args, **kwargs): root_client = self.get_client(host, *args, **kwargs) root_client.execute("SELECT 1;") trove-5.0.0/trove/tests/scenario/helpers/redis_helper.py0000664000567000056710000001671012701410316024601 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import redis from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner class RedisHelper(TestHelper): def __init__(self, expected_override_name): super(RedisHelper, self).__init__(expected_override_name) self.key_patterns = ['user_a:%s', 'user_b:%s'] self.value_pattern = 'id:%s' self.label_value = 'value_set' self._ds_client_cache = dict() def get_client(self, host, *args, **kwargs): # We need to cache the Redis client in order to prevent Error 99 # (Cannot assign requested address) when working with large data sets. # A new client may be created frequently due to how the redirection # works (see '_execute_with_redirection'). # The old (now closed) connections however have to wait for about 60s # (TIME_WAIT) before the port can be released. # This is a feature of the operating system that helps it dealing with # packets that arrive after the connection is closed. if host not in self._ds_client_cache: self._ds_client_cache[host] = ( self.create_client(host, *args, **kwargs)) return self._ds_client_cache[host] def create_client(self, host, *args, **kwargs): user = self.get_helper_credentials() client = redis.StrictRedis(password=user['password'], host=host) return client # Add data overrides # We use multiple keys to make the Redis backup take longer def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): test_set = self._get_data_point(host, data_label, *args, **kwargs) if not test_set: for num in range(data_start, data_start + data_size): for key_pattern in self.key_patterns: self._set_data_point( host, key_pattern % str(num), self.value_pattern % str(num), *args, **kwargs) # now that the data is there, add the label self._set_data_point( host, data_label, self.label_value, *args, **kwargs) def _set_data_point(self, host, key, value, *args, **kwargs): def set_point(client, key, value): return client.set(key, value) self._execute_with_redirection( host, set_point, [key, value], *args, **kwargs) def _get_data_point(self, host, key, *args, **kwargs): def get_point(client, key): return client.get(key) return self._execute_with_redirection( host, get_point, [key], *args, **kwargs) def _execute_with_redirection(self, host, callback, callback_args, *args, **kwargs): """Redis clustering is a relatively new feature still not supported in a fully transparent way by all clients. The application itself is responsible for connecting to the right node when accessing a key in a Redis cluster instead. Clients may be redirected to other nodes by redirection errors: redis.exceptions.ResponseError: MOVED 10778 10.64.0.2:6379 This method tries to execute a given callback on a given host. If it gets a redirection error it parses the new host from the response and issues the same callback on this new host. """ client = self.get_client(host, *args, **kwargs) try: return callback(client, *callback_args) except redis.exceptions.ResponseError as ex: response = str(ex) if response: tokens = response.split() if tokens[0] == 'MOVED': redirected_host = tokens[2].split(':')[0] if redirected_host: return self._execute_with_redirection( redirected_host, callback, callback_args, *args, **kwargs) raise ex # Remove data overrides # We use multiple keys to make the Redis backup take longer def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): test_set = self._get_data_point(host, data_label, *args, **kwargs) if test_set: for num in range(data_start, data_start + data_size): for key_pattern in self.key_patterns: self._expire_data_point(host, key_pattern % str(num), *args, **kwargs) # now that the data is gone, remove the label self._expire_data_point(host, data_label, *args, **kwargs) def _expire_data_point(self, host, key, *args, **kwargs): def expire_point(client, key): return client.expire(key, 0) self._execute_with_redirection( host, expire_point, [key], *args, **kwargs) # Verify data overrides # We use multiple keys to make the Redis backup take longer def verify_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): # make sure the data is there - tests edge cases and a random one self._verify_data_point(host, data_label, self.label_value, *args, **kwargs) midway_num = data_start + int(data_size / 2) random_num = random.randint(data_start + 2, data_start + data_size - 3) for num in [data_start, data_start + 1, midway_num, random_num, data_start + data_size - 2, data_start + data_size - 1]: for key_pattern in self.key_patterns: self._verify_data_point(host, key_pattern % num, self.value_pattern % num, *args, **kwargs) # negative tests for num in [data_start - 1, data_start + data_size]: for key_pattern in self.key_patterns: self._verify_data_point(host, key_pattern % num, None, *args, **kwargs) def _verify_data_point(self, host, key, expected_value, *args, **kwargs): value = self._get_data_point(host, key, *args, **kwargs) TestRunner.assert_equal(expected_value, value, "Unexpected value '%s' returned from Redis " "key '%s'" % (value, key)) def get_dynamic_group(self): return {'hz': 15} def get_non_dynamic_group(self): return {'databases': 24} def get_invalid_groups(self): return [{'hz': 600}, {'databases': -1}, {'databases': 'string_value'}] trove-5.0.0/trove/tests/scenario/helpers/percona_helper.py0000664000567000056710000000151612701410316025120 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.mysql_helper import MysqlHelper class PerconaHelper(MysqlHelper): def __init__(self, expected_override_name): super(PerconaHelper, self).__init__(expected_override_name) trove-5.0.0/trove/tests/scenario/helpers/mongodb_helper.py0000664000567000056710000000312212701410316025111 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.test_helper import TestHelper class MongodbHelper(TestHelper): def __init__(self, expected_override_name): super(MongodbHelper, self).__init__(expected_override_name) def get_valid_database_definitions(self): return [{"name": 'db1'}, {"name": 'db2'}, {'name': 'db3'}] def get_valid_user_definitions(self): return [{'name': 'db0.user1', 'password': 'password1', 'databases': []}, {'name': 'db0.user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'db1.user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def get_non_dynamic_group(self): return {'systemLog.verbosity': 4} def get_invalid_groups(self): return [{'net.maxIncomingConnections': -1}, {'storage.mmapv1.nsSize': 4096}, {'storage.journal.enabled': 'string_value'}] trove-5.0.0/trove/tests/scenario/helpers/vertica_helper.py0000664000567000056710000000402112701410316025120 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario.helpers.sql_helper import SqlHelper class VerticaHelper(SqlHelper): def __init__(self, expected_override_name): super(VerticaHelper, self).__init__(expected_override_name, 'vertica') def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': 'lite'} def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': []}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def add_actual_data(self, *args, **kwargs): raise SkipTest("Adding data to Vertica is not implemented") def verify_actual_data(self, *args, **kwargs): raise SkipTest("Verifying data in Vertica is not implemented") def remove_actual_data(self, *args, **kwargs): raise SkipTest("Removing data from Vertica is not implemented") def get_dynamic_group(self): return {'ActivePartitionCount': 3} def get_non_dynamic_group(self): return {'BlockCacheSize': 1024} def get_invalid_groups(self): return [{'timezone': 997}, {"max_worker_processes": 'string_value'}, {"standard_conforming_strings": 'string_value'}] trove-5.0.0/trove/tests/scenario/helpers/cassandra_helper.py0000664000567000056710000001323612701410316025432 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cassandra.auth import PlainTextAuthProvider from cassandra.cluster import Cluster from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner class CassandraClient(object): # Cassandra 2.1 only supports protocol versions 3 and lower. NATIVE_PROTOCOL_VERSION = 3 def __init__(self, contact_points, user, password, keyspace): super(CassandraClient, self).__init__() self._cluster = None self._session = None self._cluster = Cluster( contact_points=contact_points, auth_provider=PlainTextAuthProvider(user, password), protocol_version=self.NATIVE_PROTOCOL_VERSION) self._session = self._connect(keyspace) def _connect(self, keyspace): if not self._cluster.is_shutdown: return self._cluster.connect(keyspace) else: raise Exception("Cannot perform this operation on a terminated " "cluster.") @property def session(self): return self._session def __del__(self): if self._cluster is not None: self._cluster.shutdown() if self._session is not None: self._session.shutdown() class CassandraHelper(TestHelper): DATA_COLUMN_NAME = 'value' def __init__(self, expected_override_name): super(CassandraHelper, self).__init__(expected_override_name) self._data_cache = dict() def create_client(self, host, *args, **kwargs): user = self.get_helper_credentials() return CassandraClient( [host], user['name'], user['password'], user['database']) def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) self._create_data_table(client, data_label) stmt = client.session.prepare("INSERT INTO %s (%s) VALUES (?)" % (data_label, self.DATA_COLUMN_NAME)) count = self._count_data_rows(client, data_label) if count == 0: for value in self._get_dataset(data_size): client.session.execute(stmt, [value]) def _create_data_table(self, client, table_name): client.session.execute('CREATE TABLE IF NOT EXISTS %s ' '(%s INT PRIMARY KEY)' % (table_name, self.DATA_COLUMN_NAME)) def _count_data_rows(self, client, table_name): rows = client.session.execute('SELECT COUNT(*) FROM %s' % table_name) if rows: return rows[0][0] return 0 def _get_dataset(self, data_size): cache_key = str(data_size) if cache_key in self._data_cache: return self._data_cache.get(cache_key) data = self._generate_dataset(data_size) self._data_cache[cache_key] = data return data def _generate_dataset(self, data_size): return range(1, data_size + 1) def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) self._drop_table(client, data_label) def _drop_table(self, client, table_name): client.session.execute('DROP TABLE %s' % table_name) def verify_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): expected_data = self._get_dataset(data_size) client = self.get_client(host, *args, **kwargs) actual_data = self._select_data_rows(client, data_label) TestRunner.assert_equal(len(expected_data), len(actual_data), "Unexpected number of result rows.") for expected_row in expected_data: TestRunner.assert_true(expected_row in actual_data, "Row not found in the result set: %s" % expected_row) def _select_data_rows(self, client, table_name): rows = client.session.execute('SELECT %s FROM %s' % (self.DATA_COLUMN_NAME, table_name)) return [value[0] for value in rows] def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': 'firstdb'} def get_valid_database_definitions(self): return [{"name": 'db1'}, {"name": 'db2'}, {"name": 'db3'}] def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': []}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def get_non_dynamic_group(self): return {'sstable_preemptive_open_interval_in_mb': 40} def get_invalid_groups(self): return [{'sstable_preemptive_open_interval_in_mb': -1}, {'sstable_preemptive_open_interval_in_mb': 'string_value'}] trove-5.0.0/trove/tests/scenario/helpers/mysql_helper.py0000664000567000056710000000417412701410316024641 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.sql_helper import SqlHelper class MysqlHelper(SqlHelper): def __init__(self, expected_override_name): super(MysqlHelper, self).__init__(expected_override_name, 'mysql') def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': 'firstdb'} def get_helper_credentials_root(self): return {'name': 'root', 'password': 'rootpass'} def get_valid_database_definitions(self): return [{'name': 'db1', 'character_set': 'latin2', 'collate': 'latin2_general_ci'}, {'name': 'db2'}, {"name": 'db3'}] def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': [], 'host': '127.0.0.1'}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}], 'host': '0.0.0.0'}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def get_dynamic_group(self): return {'key_buffer_size': 10485760, 'join_buffer_size': 10485760} def get_non_dynamic_group(self): return {'innodb_buffer_pool_size': 10485760} def get_invalid_groups(self): return [{'key_buffer_size': 4}, {"join_buffer_size": 'string_value'}] def get_exposed_user_log_names(self): return ['general', 'slow_query'] def get_unexposed_sys_log_names(self): return ['guest', 'error'] trove-5.0.0/trove/tests/scenario/helpers/postgresql_helper.py0000664000567000056710000000446012701410316025675 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario.helpers.sql_helper import SqlHelper class PostgresqlHelper(SqlHelper): def __init__(self, expected_override_name): super(PostgresqlHelper, self).__init__(expected_override_name, 'postgresql') def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': 'lite'} def get_valid_database_definitions(self): return [{'name': 'db1'}, {'name': 'db2'}, {'name': 'db3'}] def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': []}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def add_actual_data(self, *args, **kwargs): raise SkipTest("Adding data to PostgreSQL is broken") def verify_actual_data(self, *args, **kwargs): raise SkipTest("Verifying data in PostgreSQL is broken") def remove_actual_data(self, *args, **kwargs): raise SkipTest("Removing data from PostgreSQL is broken") def get_dynamic_group(self): return {'max_worker_processes': 11} def get_non_dynamic_group(self): return {'max_connections': 113} def get_invalid_groups(self): return [{'timezone': 997}, {"max_worker_processes": 'string_value'}, {"standard_conforming_strings": 'string_value'}] def get_exposed_user_log_names(self): return ['general'] def log_enable_requires_restart(self): return True trove-5.0.0/trove/versions.py0000664000567000056710000000603012701410316017367 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import routes from trove.common import wsgi VERSIONS = { "1.0": { "id": "v1.0", "status": "CURRENT", "updated": "2012-08-01T00:00:00Z", "links": [], }, } class VersionsController(wsgi.Controller): def index(self, request): """Respond to a request for API versions.""" versions = [] for key, data in VERSIONS.items(): v = BaseVersion( data["id"], data["status"], request.application_url, data["updated"]) versions.append(v) return wsgi.Result(VersionsDataView(versions)) def show(self, request): """Respond to a request for a specific API version.""" data = VERSIONS[request.url_version] v = Version(data["id"], data["status"], request.application_url, data["updated"]) return wsgi.Result(VersionDataView(v)) class BaseVersion(object): def __init__(self, id, status, base_url, updated): self.id = id self.status = status self.base_url = base_url self.updated = updated def data(self): return { "id": self.id, "status": self.status, "updated": self.updated, "links": [{"rel": "self", "href": self.url()}], } def url(self): url = os.path.join(self.base_url, self.id) if not url.endswith("/"): return url + "/" return url class Version(BaseVersion): def url(self): if not self.base_url.endswith("/"): return self.base_url + "/" return self.base_url class VersionDataView(object): def __init__(self, version): self.version = version def data_for_json(self): return {'version': self.version.data()} class VersionsDataView(object): def __init__(self, versions): self.versions = versions def data_for_json(self): return {'versions': [version.data() for version in self.versions]} class VersionsAPI(wsgi.Router): def __init__(self): mapper = routes.Mapper() versions_resource = VersionsController().create_resource() mapper.connect("/", controller=versions_resource, action="index") super(VersionsAPI, self).__init__(mapper) def app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return VersionsAPI() trove-5.0.0/trove/instance/0000775000567000056710000000000012701410521016750 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/instance/__init__.py0000664000567000056710000000000012701410316021051 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/instance/service.py0000664000567000056710000005212612701410316020772 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import strutils import webob.exc from trove.backup.models import Backup as backup_model from trove.backup import views as backup_views import trove.common.apischema as apischema from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.i18n import _LI from trove.common import notification from trove.common.notification import StartNotification from trove.common import pagination from trove.common.remote import create_guest_client from trove.common import utils from trove.common import wsgi from trove.datastore import models as datastore_models from trove.extensions.mysql.common import populate_users from trove.extensions.mysql.common import populate_validated_databases from trove.instance import models, views from trove.module import models as module_models from trove.module import views as module_views CONF = cfg.CONF LOG = logging.getLogger(__name__) class InstanceController(wsgi.Controller): """Controller for instance functionality.""" schemas = apischema.instance.copy() @classmethod def get_action_schema(cls, body, action_schema): action_type = list(body.keys())[0] action_schema = action_schema.get(action_type, {}) if action_type == 'resize': # volume or flavorRef resize_action = list(body[action_type].keys())[0] action_schema = action_schema.get(resize_action, {}) return action_schema @classmethod def get_schema(cls, action, body): action_schema = super(InstanceController, cls).get_schema(action, body) if action == 'action': # resize or restart action_schema = cls.get_action_schema(body, action_schema) return action_schema def action(self, req, body, tenant_id, id): """ Handles requests that modify existing instances in some manner. Actions could include 'resize', 'restart', 'reset_password' :param req: http request object :param body: deserialized body of the request as a dict :param tenant_id: the tenant id for whom owns the instance :param id: instance id """ LOG.debug("instance action req : '%s'\n\n", req) if not body: raise exception.BadRequest(_("Invalid request body.")) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) _actions = { 'restart': self._action_restart, 'resize': self._action_resize, 'reset_password': self._action_reset_password, 'promote_to_replica_source': self._action_promote_to_replica_source, 'eject_replica_source': self._action_eject_replica_source, } selected_action = None action_name = None for key in body: if key in _actions: selected_action = _actions[key] action_name = key LOG.info(_LI("Performing %(action_name)s action against " "instance %(instance_id)s for tenant '%(tenant_id)s'"), {'action_name': action_name, 'instance_id': id, 'tenant_id': tenant_id}) return selected_action(context, req, instance, body) def _action_restart(self, context, req, instance, body): context.notification = notification.DBaaSInstanceRestart(context, request=req) with StartNotification(context, instance_id=instance.id): instance.restart() return wsgi.Result(None, 202) def _action_resize(self, context, req, instance, body): """ Handles 2 cases 1. resize volume body only contains {volume: {size: x}} 2. resize instance body only contains {flavorRef: http.../2} If the body has both we will throw back an error. """ options = { 'volume': self._action_resize_volume, 'flavorRef': self._action_resize_flavor } selected_option = None args = None for key in options: if key in body['resize']: selected_option = options[key] args = body['resize'][key] break return selected_option(context, req, instance, args) def _action_resize_volume(self, context, req, instance, volume): context.notification = notification.DBaaSInstanceResizeVolume( context, request=req) with StartNotification(context, instance_id=instance.id, new_size=volume['size']): instance.resize_volume(volume['size']) return wsgi.Result(None, 202) def _action_resize_flavor(self, context, req, instance, flavorRef): context.notification = notification.DBaaSInstanceResizeInstance( context, request=req) new_flavor_id = utils.get_id_from_href(flavorRef) with StartNotification(context, instance_id=instance.id, new_flavor_id=new_flavor_id): instance.resize_flavor(new_flavor_id) return wsgi.Result(None, 202) def _action_reset_password(self, context, instance, body): raise webob.exc.HTTPNotImplemented() def _action_promote_to_replica_source(self, context, req, instance, body): context.notification = notification.DBaaSInstanceEject(context, request=req) with StartNotification(context, instance_id=instance.id): instance.promote_to_replica_source() return wsgi.Result(None, 202) def _action_eject_replica_source(self, context, req, instance, body): context.notification = notification.DBaaSInstancePromote(context, request=req) with StartNotification(context, instance_id=instance.id): instance.eject_replica_source() return wsgi.Result(None, 202) def index(self, req, tenant_id): """Return all instances.""" LOG.info(_LI("Listing database instances for tenant '%s'"), tenant_id) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] clustered_q = req.GET.get('include_clustered', '').lower() include_clustered = clustered_q == 'true' servers, marker = models.Instances.load(context, include_clustered) view = views.InstancesView(servers, req=req) paged = pagination.SimplePaginatedDataView(req.url, 'instances', view, marker) return wsgi.Result(paged.data(), 200) def backups(self, req, tenant_id, id): """Return all backups for the specified instance.""" LOG.info(_LI("Listing backups for instance '%s'"), id) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] backups, marker = backup_model.list_for_instance(context, id) view = backup_views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200) def show(self, req, tenant_id, id): """Return a single instance.""" LOG.info(_LI("Showing database instance '%(instance_id)s' for tenant " "'%(tenant_id)s'"), {'instance_id': id, 'tenant_id': tenant_id}) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] server = models.load_instance_with_guest(models.DetailInstance, context, id) return wsgi.Result(views.InstanceDetailView(server, req=req).data(), 200) def delete(self, req, tenant_id, id): """Delete a single instance.""" LOG.info(_LI("Deleting database instance '%(instance_id)s' for tenant " "'%(tenant_id)s'"), {'instance_id': id, 'tenant_id': tenant_id}) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] instance = models.load_any_instance(context, id) context.notification = notification.DBaaSInstanceDelete( context, request=req) with StartNotification(context, instance_id=instance.id): marker = 'foo' while marker: instance_modules, marker = module_models.InstanceModules.load( context, instance_id=id) for instance_module in instance_modules: instance_module = module_models.InstanceModule.load( context, instance_module['instance_id'], instance_module['module_id']) module_models.InstanceModule.delete( context, instance_module) instance.delete() return wsgi.Result(None, 202) def create(self, req, body, tenant_id): # TODO(hub-cap): turn this into middleware LOG.info(_LI("Creating a database instance for tenant '%s'"), tenant_id) LOG.debug("req : '%s'\n\n", strutils.mask_password(req)) LOG.debug("body : '%s'\n\n", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSInstanceCreate(context, request=req) datastore_args = body['instance'].get('datastore', {}) datastore, datastore_version = ( datastore_models.get_datastore_version(**datastore_args)) image_id = datastore_version.image_id name = body['instance']['name'] flavor_ref = body['instance']['flavorRef'] flavor_id = utils.get_id_from_href(flavor_ref) configuration = self._configuration_parse(context, body) databases = populate_validated_databases( body['instance'].get('databases', [])) database_names = [database.get('_name', '') for database in databases] users = None try: users = populate_users(body['instance'].get('users', []), database_names) except ValueError as ve: raise exception.BadRequest(msg=ve) if 'volume' in body['instance']: volume_info = body['instance']['volume'] volume_size = int(volume_info['size']) volume_type = volume_info.get('type') else: volume_size = None volume_type = None if 'restorePoint' in body['instance']: backupRef = body['instance']['restorePoint']['backupRef'] backup_id = utils.get_id_from_href(backupRef) else: backup_id = None availability_zone = body['instance'].get('availability_zone') nics = body['instance'].get('nics') slave_of_id = body['instance'].get('replica_of', # also check for older name body['instance'].get('slave_of')) replica_count = body['instance'].get('replica_count') modules = body['instance'].get('modules') instance = models.Instance.create(context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone, nics, configuration, slave_of_id, replica_count=replica_count, volume_type=volume_type, modules=modules) view = views.InstanceDetailView(instance, req=req) return wsgi.Result(view.data(), 200) def _configuration_parse(self, context, body): if 'configuration' in body['instance']: configuration_ref = body['instance']['configuration'] if configuration_ref: configuration_id = utils.get_id_from_href(configuration_ref) return configuration_id def _modify_instance(self, context, req, instance, **kwargs): """Modifies the instance using the specified keyword arguments 'detach_replica': ignored if not present or False, if True, specifies the instance is a replica that will be detached from its master 'configuration_id': Ignored if not present, if None, detaches an an attached configuration group, if not None, attaches the specified configuration group """ if 'detach_replica' in kwargs and kwargs['detach_replica']: LOG.debug("Detaching replica from source.") context.notification = notification.DBaaSInstanceDetach( context, request=req) with StartNotification(context, instance_id=instance.id): instance.detach_replica() if 'configuration_id' in kwargs: if kwargs['configuration_id']: context.notification = ( notification.DBaaSInstanceAttachConfiguration(context, request=req)) configuration_id = kwargs['configuration_id'] with StartNotification(context, instance_id=instance.id, configuration_id=configuration_id): instance.assign_configuration(configuration_id) else: context.notification = ( notification.DBaaSInstanceDetachConfiguration(context, request=req)) with StartNotification(context, instance_id=instance.id): instance.unassign_configuration() if kwargs: instance.update_db(**kwargs) def update(self, req, id, body, tenant_id): """Updates the instance to attach/detach configuration.""" LOG.info(_LI("Updating database instance '%(instance_id)s' for tenant " "'%(tenant_id)s'"), {'instance_id': id, 'tenant_id': tenant_id}) LOG.debug("req: %s", req) LOG.debug("body: %s", body) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) # Make sure args contains a 'configuration_id' argument, args = {} args['configuration_id'] = self._configuration_parse(context, body) self._modify_instance(context, req, instance, **args) return wsgi.Result(None, 202) def edit(self, req, id, body, tenant_id): """ Updates the instance to set or unset one or more attributes. """ LOG.info(_LI("Editing instance for tenant id %s."), tenant_id) LOG.debug("req: %s", strutils.mask_password(req)) LOG.debug("body: %s", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) args = {} args['detach_replica'] = ('replica_of' in body['instance'] or 'slave_of' in body['instance']) if 'name' in body['instance']: args['name'] = body['instance']['name'] if 'configuration' in body['instance']: args['configuration_id'] = self._configuration_parse(context, body) self._modify_instance(context, req, instance, **args) return wsgi.Result(None, 202) def configuration(self, req, tenant_id, id): """ Returns the default configuration template applied to the instance. """ LOG.info(_LI("Getting default configuration for instance %s"), id) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) LOG.debug("Server: %s", instance) config = instance.get_default_configuration_template() LOG.debug("Default config for instance %(instance_id)s is %(config)s", {'instance_id': id, 'config': config}) return wsgi.Result(views.DefaultConfigurationView( config).data(), 200) def guest_log_list(self, req, tenant_id, id): """Return all information about all logs for an instance.""" LOG.debug("Listing logs for tenant %s" % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) client = create_guest_client(context, id) guest_log_list = client.guest_log_list() return wsgi.Result({'logs': guest_log_list}, 200) def guest_log_action(self, req, body, tenant_id, id): """Processes a guest log.""" LOG.info(_("Processing log for tenant %s"), tenant_id) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) log_name = body['name'] enable = body.get('enable', None) disable = body.get('disable', None) publish = body.get('publish', None) discard = body.get('discard', None) if enable and disable: raise exception.BadRequest(_("Cannot enable and disable log.")) client = create_guest_client(context, id) guest_log = client.guest_log_action(log_name, enable, disable, publish, discard) return wsgi.Result({'log': guest_log}, 200) def module_list(self, req, tenant_id, id): """Return information about modules on an instance.""" context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) from_guest = bool(req.GET.get('from_guest', '').lower()) include_contents = bool(req.GET.get('include_contents', '').lower()) if from_guest: return self._module_list_guest( context, id, include_contents=include_contents) else: return self._module_list( context, id, include_contents=include_contents) def _module_list_guest(self, context, id, include_contents): """Return information about modules on an instance.""" client = create_guest_client(context, id) result_list = client.module_list(include_contents) return wsgi.Result({'modules': result_list}, 200) def _module_list(self, context, id, include_contents): """Return information about instnace modules.""" client = create_guest_client(context, id) result_list = client.module_list(include_contents) return wsgi.Result({'modules': result_list}, 200) def module_apply(self, req, body, tenant_id, id): """Apply modules to an instance.""" context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) module_ids = [mod['id'] for mod in body.get('modules', [])] modules = module_models.Modules.load_by_ids(context, module_ids) module_list = [] for module in modules: module.contents = module_models.Module.deprocess_contents( module.contents) module_info = module_views.DetailedModuleView(module).data( include_contents=True) module_list.append(module_info) client = create_guest_client(context, id) result_list = client.module_apply(module_list) models.Instance.add_instance_modules(context, id, modules) return wsgi.Result({'modules': result_list}, 200) def module_remove(self, req, tenant_id, id, module_id): """Remove module from an instance.""" context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) module = module_models.Module.load(context, module_id) module_info = module_views.DetailedModuleView(module).data() client = create_guest_client(context, id) client.module_remove(module_info) instance_module = module_models.InstanceModule.load( context, instance_id=id, module_id=module_id) if instance_module: module_models.InstanceModule.delete(context, instance_module) return wsgi.Result(None, 200) trove-5.0.0/trove/instance/views.py0000664000567000056710000001420512701410316020463 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.views import create_links from trove.instance import models LOG = logging.getLogger(__name__) CONF = cfg.CONF class InstanceView(object): """Uses a SimpleInstance.""" def __init__(self, instance, req=None): self.instance = instance self.req = req def data(self): instance_dict = { "id": self.instance.id, "name": self.instance.name, "status": self.instance.status, "links": self._build_links(), "flavor": self._build_flavor_info(), "datastore": {"type": self.instance.datastore.name, "version": self.instance.datastore_version.name}, } if self.instance.volume_support: instance_dict['volume'] = {'size': self.instance.volume_size} if self.instance.hostname: instance_dict['hostname'] = self.instance.hostname else: ip = self.instance.get_visible_ip_addresses() if ip: instance_dict['ip'] = ip if self.instance.slave_of_id is not None: instance_dict['replica_of'] = self._build_master_info() LOG.debug(instance_dict) return {"instance": instance_dict} def _build_links(self): return create_links("instances", self.req, self.instance.id) def _build_flavor_info(self): return { "id": self.instance.flavor_id, "links": self._build_flavor_links() } def _build_flavor_links(self): return create_links("flavors", self.req, self.instance.flavor_id) def _build_master_info(self): return { "id": self.instance.slave_of_id, "links": create_links("instances", self.req, self.instance.slave_of_id) } class InstanceDetailView(InstanceView): """Works with a full-blown instance.""" def __init__(self, instance, req): super(InstanceDetailView, self).__init__(instance, req=req) def data(self): result = super(InstanceDetailView, self).data() result['instance']['created'] = self.instance.created result['instance']['updated'] = self.instance.updated result['instance']['datastore']['version'] = (self.instance. datastore_version.name) if self.instance.slaves: result['instance']['replicas'] = self._build_slaves_info() if self.instance.configuration is not None: result['instance']['configuration'] = (self. _build_configuration_info()) if (isinstance(self.instance, models.DetailInstance) and self.instance.volume_used): used = self.instance.volume_used if self.instance.volume_support: result['instance']['volume']['used'] = used else: # either ephemeral or root partition result['instance']['local_storage'] = {'used': used} if self.instance.root_password: result['instance']['password'] = self.instance.root_password if self.instance.cluster_id: result['instance']['cluster_id'] = self.instance.cluster_id if self.instance.shard_id: result['instance']['shard_id'] = self.instance.shard_id return result def _build_slaves_info(self): data = [] for slave in self.instance.slaves: data.append({ "id": slave.id, "links": create_links("instances", self.req, slave.id) }) return data def _build_configuration_info(self): return { "id": self.instance.configuration.id, "name": self.instance.configuration.name, "links": create_links("configurations", self.req, self.instance.configuration.id) } class InstancesView(object): """Shows a list of SimpleInstance objects.""" def __init__(self, instances, req=None): self.instances = instances self.req = req def data(self): data = [] # These are model instances for instance in self.instances: data.append(self.data_for_instance(instance)) return {'instances': data} def data_for_instance(self, instance): view = InstanceView(instance, req=self.req) return view.data()['instance'] class DefaultConfigurationView(object): def __init__(self, config): self.config = config def data(self): config_dict = {} for key, val in self.config: config_dict[key] = val return {"instance": {"configuration": config_dict}} class GuestLogView(object): def __init__(self, guest_log): self.guest_log = guest_log def data(self): return { 'name': self.guest_log.name, 'type': self.guest_log.type, 'status': self.guest_log.status, 'published': self.guest_log.published, 'pending': self.guest_log.pending, 'container': self.guest_log.container, 'prefix': self.guest_log.prefix, 'metafile': self.guest_log.metafile, } class GuestLogsView(object): def __init__(self, guest_logs): self.guest_logs = guest_logs def data(self): return [GuestLogView(l).data() for l in self.guest_logs] trove-5.0.0/trove/instance/models.py0000664000567000056710000015367112701410316020624 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2013-2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes that form the core of instances functionality.""" from datetime import datetime from datetime import timedelta import re from novaclient import exceptions as nova_exceptions from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from trove.backup.models import Backup from trove.common import cfg from trove.common import exception from trove.common import i18n as i18n import trove.common.instance as tr_instance from trove.common.notification import StartNotification from trove.common.remote import create_cinder_client from trove.common.remote import create_dns_client from trove.common.remote import create_guest_client from trove.common.remote import create_nova_client from trove.common import template from trove.common import utils from trove.configuration.models import Configuration from trove.datastore import models as datastore_models from trove.datastore.models import DBDatastoreVersionMetadata from trove.db import get_db_api from trove.db import models as dbmodels from trove.extensions.security_group.models import SecurityGroup from trove.instance.tasks import InstanceTask from trove.instance.tasks import InstanceTasks from trove.module import models as module_models from trove.module import views as module_views from trove.quota.quota import run_with_quotas from trove.taskmanager import api as task_api (_, _LE, _LI, _LW) = (i18n._, i18n._LE, i18n._LI, i18n._LW) CONF = cfg.CONF LOG = logging.getLogger(__name__) def filter_ips(ips, white_list_regex, black_list_regex): """Return IPs matching white_list_regex and Filter out IPs matching black_list_regex. """ return [ip for ip in ips if re.search(white_list_regex, ip) and not re.search(black_list_regex, ip)] def load_server(context, instance_id, server_id): """ Loads a server or raises an exception. :param context: request context used to access nova :param instance_id: the trove instance id corresponding to the nova server (informational only) :param server_id: the compute instance id which will be retrieved from nova :type context: trove.common.context.TroveContext :type instance_id: unicode :type server_id: unicode :rtype: novaclient.v2.servers.Server """ client = create_nova_client(context) try: server = client.servers.get(server_id) except nova_exceptions.NotFound: LOG.error(_LE("Could not find nova server_id(%s)."), server_id) raise exception.ComputeInstanceNotFound(instance_id=instance_id, server_id=server_id) except nova_exceptions.ClientException as e: raise exception.TroveError(str(e)) return server class InstanceStatus(object): ACTIVE = "ACTIVE" BLOCKED = "BLOCKED" BUILD = "BUILD" FAILED = "FAILED" REBOOT = "REBOOT" RESIZE = "RESIZE" BACKUP = "BACKUP" SHUTDOWN = "SHUTDOWN" ERROR = "ERROR" RESTART_REQUIRED = "RESTART_REQUIRED" PROMOTE = "PROMOTE" EJECT = "EJECT" def validate_volume_size(size): if size is None: raise exception.VolumeSizeNotSpecified() max_size = CONF.max_accepted_volume_size if long(size) > max_size: msg = ("Volume 'size' cannot exceed maximum " "of %d GB, %s cannot be accepted." % (max_size, size)) raise exception.VolumeQuotaExceeded(msg) def load_simple_instance_server_status(context, db_info): """Loads a server or raises an exception.""" if 'BUILDING' == db_info.task_status.action: db_info.server_status = "BUILD" db_info.addresses = {} else: client = create_nova_client(context) try: server = client.servers.get(db_info.compute_instance_id) db_info.server_status = server.status db_info.addresses = server.addresses except nova_exceptions.NotFound: db_info.server_status = "SHUTDOWN" db_info.addresses = {} # Invalid states to contact the agent AGENT_INVALID_STATUSES = ["BUILD", "REBOOT", "RESIZE", "PROMOTE", "EJECT"] class SimpleInstance(object): """A simple view of an instance. This gets loaded directly from the local database, so its cheaper than creating the fully loaded Instance. As the name implies this class knows nothing of the underlying Nova Compute Instance (i.e. server) ----------- | | | i | | t n | | r s --------------------- | o t | datastore/guest | | v a --------------------- | e n | | c | | e | | | ----------- """ def __init__(self, context, db_info, datastore_status, root_password=None, ds_version=None, ds=None): """ :type context: trove.common.context.TroveContext :type db_info: trove.instance.models.DBInstance :type datastore_status: trove.instance.models.InstanceServiceStatus :type root_password: str """ self.context = context self.db_info = db_info self.datastore_status = datastore_status self.root_pass = root_password if ds_version is None: self.ds_version = (datastore_models.DatastoreVersion. load_by_uuid(self.db_info.datastore_version_id)) if ds is None: self.ds = (datastore_models.Datastore. load(self.ds_version.datastore_id)) self.slave_list = None @property def addresses(self): # TODO(tim.simpson): This code attaches two parts of the Nova server to # db_info: "status" and "addresses". The idea # originally was to listen to events to update this # data and store it in the Trove database. # However, it may have been unwise as a year and a # half later we still have to load the server anyway # and this makes the code confusing. if hasattr(self.db_info, 'addresses'): return self.db_info.addresses else: return None @property def created(self): return self.db_info.created @property def dns_ip_address(self): """Returns the IP address to be used with DNS.""" ips = self.get_visible_ip_addresses() if ips: return ips[0] @property def flavor_id(self): # Flavor ID is a str in the 1.0 API. return str(self.db_info.flavor_id) @property def hostname(self): return self.db_info.hostname def get_visible_ip_addresses(self): """Returns IPs that will be visible to the user.""" if self.addresses is None: return None IPs = [] for label in self.addresses: if (re.search(CONF.network_label_regex, label) and len(self.addresses[label]) > 0): IPs.extend([addr.get('addr') for addr in self.addresses[label]]) # Includes ip addresses that match the regexp pattern if CONF.ip_regex and CONF.black_list_regex: IPs = filter_ips(IPs, CONF.ip_regex, CONF.black_list_regex) return IPs @property def id(self): return self.db_info.id @property def type(self): return self.db_info.type @property def tenant_id(self): return self.db_info.tenant_id @property def is_building(self): return self.status in [InstanceStatus.BUILD] @property def is_datastore_running(self): """True if the service status indicates datastore is up and running.""" return self.datastore_status.status in MYSQL_RESPONSIVE_STATUSES def datastore_status_matches(self, service_status): return self.datastore_status.status == service_status @property def name(self): return self.db_info.name @property def server_id(self): return self.db_info.compute_instance_id @property def slave_of_id(self): return self.db_info.slave_of_id @property def datastore_status(self): """ Returns the Service Status for this instance. For example, the status of the mysql datastore which is running on the server...not the server status itself. :return: the current status of the datastore :rtype: trove.instance.models.InstanceServiceStatus """ return self.__datastore_status @datastore_status.setter def datastore_status(self, datastore_status): if datastore_status and not isinstance(datastore_status, InstanceServiceStatus): raise ValueError("datastore_status must be of type " "InstanceServiceStatus. Got %s instead." % datastore_status.__class__.__name__) self.__datastore_status = datastore_status @property def status(self): # Check for taskmanager errors. if self.db_info.task_status.is_error: return InstanceStatus.ERROR # Check for taskmanager status. action = self.db_info.task_status.action if 'BUILDING' == action: if 'ERROR' == self.db_info.server_status: return InstanceStatus.ERROR return InstanceStatus.BUILD if 'REBOOTING' == action: return InstanceStatus.REBOOT if 'RESIZING' == action: return InstanceStatus.RESIZE if 'RESTART_REQUIRED' == action: return InstanceStatus.RESTART_REQUIRED if InstanceTasks.PROMOTING.action == action: return InstanceStatus.PROMOTE if InstanceTasks.EJECTING.action == action: return InstanceStatus.EJECT if InstanceTasks.LOGGING.action == action: return InstanceStatus.LOGGING # Check for server status. if self.db_info.server_status in ["BUILD", "ERROR", "REBOOT", "RESIZE"]: return self.db_info.server_status # As far as Trove is concerned, Nova instances in VERIFY_RESIZE should # still appear as though they are in RESIZE. if self.db_info.server_status in ["VERIFY_RESIZE"]: return InstanceStatus.RESIZE # Check if there is a backup running for this instance if Backup.running(self.id): return InstanceStatus.BACKUP # Report as Shutdown while deleting, unless there's an error. if 'DELETING' == action: if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED"]: return InstanceStatus.SHUTDOWN else: LOG.error(_LE("While shutting down instance (%(instance)s): " "server had status (%(status)s)."), {'instance': self.id, 'status': self.db_info.server_status}) return InstanceStatus.ERROR # Check against the service status. # The service is only paused during a reboot. if tr_instance.ServiceStatuses.PAUSED == self.datastore_status.status: return InstanceStatus.REBOOT # If the service status is NEW, then we are building. if tr_instance.ServiceStatuses.NEW == self.datastore_status.status: return InstanceStatus.BUILD # For everything else we can look at the service status mapping. return self.datastore_status.status.api_status @property def updated(self): return self.db_info.updated @property def volume_id(self): return self.db_info.volume_id @property def volume_size(self): return self.db_info.volume_size @property def datastore_version(self): return self.ds_version @property def datastore(self): return self.ds @property def volume_support(self): return CONF.get(self.datastore_version.manager).volume_support @property def device_path(self): return CONF.get(self.datastore_version.manager).device_path @property def root_password(self): return self.root_pass @property def configuration(self): if self.db_info.configuration_id is not None: return Configuration.load(self.context, self.db_info.configuration_id) @property def slaves(self): if self.slave_list is None: self.slave_list = DBInstance.find_all(tenant_id=self.tenant_id, slave_of_id=self.id, deleted=False).all() return self.slave_list @property def cluster_id(self): return self.db_info.cluster_id @property def shard_id(self): return self.db_info.shard_id class DetailInstance(SimpleInstance): """A detailed view of an Instance. This loads a SimpleInstance and then adds additional data for the instance from the guest. """ def __init__(self, context, db_info, datastore_status): super(DetailInstance, self).__init__(context, db_info, datastore_status) self._volume_used = None self._volume_total = None @property def volume_used(self): return self._volume_used @volume_used.setter def volume_used(self, value): self._volume_used = value @property def volume_total(self): return self._volume_total @volume_total.setter def volume_total(self, value): self._volume_total = value def get_db_info(context, id, cluster_id=None, include_deleted=False): """ Retrieves an instance of the managed datastore from the persisted storage based on the ID and Context :param context: the context which owns the instance :type context: trove.common.context.TroveContext :param id: the unique ID of the instance :type id: unicode or str :param cluster_id: the unique ID of the cluster :type cluster_id: unicode or str :return: a record of the instance as its state exists in persisted storage :rtype: trove.instance.models.DBInstance """ if context is None: raise TypeError("Argument context not defined.") elif id is None: raise TypeError("Argument id not defined.") args = {'id': id} if cluster_id is not None: args['cluster_id'] = cluster_id if not include_deleted: args['deleted'] = False try: db_info = DBInstance.find_by(context=context, **args) except exception.NotFound: raise exception.NotFound(uuid=id) return db_info def load_any_instance(context, id, load_server=True): # Try to load an instance with a server. # If that fails, try to load it without the server. try: return load_instance(BuiltInstance, context, id, needs_server=load_server) except exception.UnprocessableEntity: LOG.warning(_LW("Could not load instance %s."), id) return load_instance(FreshInstance, context, id, needs_server=False) def load_instance(cls, context, id, needs_server=False, include_deleted=False): db_info = get_db_info(context, id, include_deleted=include_deleted) if not needs_server: # TODO(tim.simpson): When we have notifications this won't be # necessary and instead we'll just use the server_status field from # the instance table. load_simple_instance_server_status(context, db_info) server = None else: try: server = load_server(context, db_info.id, db_info.compute_instance_id) # TODO(tim.simpson): Remove this hack when we have notifications! db_info.server_status = server.status db_info.addresses = server.addresses except exception.ComputeInstanceNotFound: LOG.error(_LE("Could not load compute instance %s."), db_info.compute_instance_id) raise exception.UnprocessableEntity("Instance %s is not ready." % id) service_status = InstanceServiceStatus.find_by(instance_id=id) LOG.debug("Instance %(instance_id)s service status is %(service_status)s.", {'instance_id': id, 'service_status': service_status.status}) return cls(context, db_info, server, service_status) def load_instance_with_guest(cls, context, id, cluster_id=None): db_info = get_db_info(context, id, cluster_id) load_simple_instance_server_status(context, db_info) service_status = InstanceServiceStatus.find_by(instance_id=id) LOG.debug("Instance %(instance_id)s service status is %(service_status)s.", {'instance_id': id, 'service_status': service_status.status}) instance = cls(context, db_info, service_status) load_guest_info(instance, context, id) return instance def load_guest_info(instance, context, id): if instance.status not in AGENT_INVALID_STATUSES: guest = create_guest_client(context, id) try: volume_info = guest.get_volume_info() instance.volume_used = volume_info['used'] instance.volume_total = volume_info['total'] except Exception as e: LOG.error(e) return instance class BaseInstance(SimpleInstance): """Represents an instance. ----------- | | | i --------------------- | t n | compute instance | | r s --------------------- | o t | | v a | | e n --------------------- | c | datastore/guest | | e --------------------- | | ----------- """ def __init__(self, context, db_info, server, datastore_status): """ Creates a new initialized representation of an instance composed of its state in the database and its state from Nova :param context: the request context which contains the tenant that owns this instance :param db_info: the current state of this instance as it exists in the db :param server: the current state of this instance as it exists in the Nova :param datastore_status: the current state of the datastore on this instance at it exists in the db :type context: trove.common.context.TroveContext :type db_info: trove.instance.models.DBInstance :type server: novaclient.v2.servers.Server :typdatastore_statusus: trove.instance.models.InstanceServiceStatus """ super(BaseInstance, self).__init__(context, db_info, datastore_status) self.server = server self._guest = None self._nova_client = None self._volume_client = None def get_guest(self): return create_guest_client(self.context, self.db_info.id) def delete(self): def _delete_resources(): if self.is_building: raise exception.UnprocessableEntity("Instance %s is not ready." % self.id) LOG.debug("Deleting instance with compute id = %s.", self.db_info.compute_instance_id) from trove.cluster.models import is_cluster_deleting if (self.db_info.cluster_id is not None and not is_cluster_deleting(self.context, self.db_info.cluster_id)): raise exception.ClusterInstanceOperationNotSupported() if self.slaves: msg = _("Detach replicas before deleting replica source.") LOG.warning(msg) raise exception.ReplicaSourceDeleteForbidden(msg) self.update_db(task_status=InstanceTasks.DELETING, configuration_id=None) task_api.API(self.context).delete_instance(self.id) deltas = {'instances': -1} if self.volume_support: deltas['volumes'] = -self.volume_size return run_with_quotas(self.tenant_id, deltas, _delete_resources) def _delete_resources(self, deleted_at): """Implemented in subclass.""" pass def delete_async(self): deleted_at = datetime.utcnow() self._delete_resources(deleted_at) LOG.debug("Setting instance %s to be deleted.", self.id) self.update_db(deleted=True, deleted_at=deleted_at, task_status=InstanceTasks.NONE) self.set_servicestatus_deleted() # Delete associated security group if CONF.trove_security_groups_support: SecurityGroup.delete_for_instance(self.db_info.id, self.context) @property def guest(self): if not self._guest: self._guest = self.get_guest() return self._guest @property def nova_client(self): if not self._nova_client: self._nova_client = create_nova_client(self.context) return self._nova_client def update_db(self, **values): self.db_info = DBInstance.find_by(id=self.id, deleted=False) for key in values: setattr(self.db_info, key, values[key]) self.db_info.save() def set_servicestatus_deleted(self): del_instance = InstanceServiceStatus.find_by(instance_id=self.id) del_instance.set_status(tr_instance.ServiceStatuses.DELETED) del_instance.save() @property def volume_client(self): if not self._volume_client: self._volume_client = create_cinder_client(self.context) return self._volume_client def reset_task_status(self): LOG.info(_LI("Resetting task status to NONE on instance %s."), self.id) self.update_db(task_status=InstanceTasks.NONE) class FreshInstance(BaseInstance): @classmethod def load(cls, context, id): return load_instance(cls, context, id, needs_server=False) class BuiltInstance(BaseInstance): @classmethod def load(cls, context, id): return load_instance(cls, context, id, needs_server=True) class Instance(BuiltInstance): """Represents an instance. The life span of this object should be limited. Do not store them or pass them between threads. """ @classmethod def get_root_on_create(cls, datastore_manager): try: root_on_create = CONF.get(datastore_manager).root_on_create return root_on_create except NoSuchOptError: LOG.debug("root_on_create not configured for %s," " hence defaulting the value to False.", datastore_manager) return False @classmethod def create(cls, context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone=None, nics=None, configuration_id=None, slave_of_id=None, cluster_config=None, replica_count=None, volume_type=None, modules=None): call_args = { 'name': name, 'flavor_id': flavor_id, 'datastore': datastore.name if datastore else None, 'datastore_version': datastore_version.name, 'image_id': image_id, 'availability_zone': availability_zone, } # All nova flavors are permitted for a datastore-version unless one # or more entries are found in datastore_version_metadata, # in which case only those are permitted. bound_flavors = DBDatastoreVersionMetadata.find_all( datastore_version_id=datastore_version.id, key='flavor', deleted=False ) if bound_flavors.count() > 0: valid_flavors = tuple(f.value for f in bound_flavors) if flavor_id not in valid_flavors: raise exception.DatastoreFlavorAssociationNotFound( datastore=datastore.name, datastore_version=datastore_version.name, flavor_id=flavor_id) datastore_cfg = CONF.get(datastore_version.manager) client = create_nova_client(context) try: flavor = client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) deltas = {'instances': 1} volume_support = datastore_cfg.volume_support if volume_support: call_args['volume_size'] = volume_size validate_volume_size(volume_size) deltas['volumes'] = volume_size # Instance volume should have enough space for the backup # Backup, and volume sizes are in GBs target_size = volume_size else: target_size = flavor.disk # local_storage if volume_size is not None: raise exception.VolumeNotSupported() if datastore_cfg.device_path: if flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) target_size = flavor.ephemeral # ephemeral_Storage if backup_id: call_args['backup_id'] = backup_id backup_info = Backup.get_by_id(context, backup_id) if not backup_info.is_done_successfuly: raise exception.BackupNotCompleteError( backup_id=backup_id, state=backup_info.state) if backup_info.size > target_size: raise exception.BackupTooLarge( backup_size=backup_info.size, disk_size=target_size) if not backup_info.check_swift_object_exist( context, verify_checksum=CONF.verify_swift_checksum_on_restore): raise exception.BackupFileNotFound( location=backup_info.location) if (backup_info.datastore_version_id and backup_info.datastore.name != datastore.name): raise exception.BackupDatastoreMismatchError( datastore1=backup_info.datastore.name, datastore2=datastore.name) if slave_of_id: call_args['replica_of'] = slave_of_id call_args['replica_count'] = replica_count replication_support = datastore_cfg.replication_strategy if not replication_support: raise exception.ReplicationNotSupported( datastore=datastore.name) try: # looking for replica source replica_source = DBInstance.find_by( context, id=slave_of_id, deleted=False) if replica_source.slave_of_id: raise exception.Forbidden( _("Cannot create a replica of a replica %(id)s.") % {'id': slave_of_id}) # load the replica source status to check if # source is available load_simple_instance_server_status( context, replica_source) replica_source_instance = Instance( context, replica_source, None, InstanceServiceStatus.find_by( context, instance_id=slave_of_id)) replica_source_instance.validate_can_perform_action() except exception.ModelNotFoundError: LOG.exception( _("Cannot create a replica of %(id)s " "as that instance could not be found.") % {'id': slave_of_id}) raise exception.NotFound(uuid=slave_of_id) elif replica_count and replica_count != 1: raise exception.Forbidden(_( "Replica count only valid when creating replicas. Cannot " "create %(count)d instances.") % {'count': replica_count}) multi_replica = slave_of_id and replica_count and replica_count > 1 instance_count = replica_count if multi_replica else 1 if not nics: nics = [] if CONF.default_neutron_networks: nics = [{"net-id": net_id} for net_id in CONF.default_neutron_networks] + nics if nics: call_args['nics'] = nics if cluster_config: call_args['cluster_id'] = cluster_config.get("id", None) if not modules: modules = [] module_ids = [mod['id'] for mod in modules] modules = module_models.Modules.load_by_ids(context, module_ids) auto_apply_modules = module_models.Modules.load_auto_apply( context, datastore.id, datastore_version.id) for aa_module in auto_apply_modules: if aa_module.id not in module_ids: modules.append(aa_module) module_list = [] for module in modules: module.contents = module_models.Module.deprocess_contents( module.contents) module_info = module_views.DetailedModuleView(module).data( include_contents=True) module_list.append(module_info) def _create_resources(): if cluster_config: cluster_id = cluster_config.get("id", None) shard_id = cluster_config.get("shard_id", None) instance_type = cluster_config.get("instance_type", None) else: cluster_id = shard_id = instance_type = None ids = [] names = [] root_passwords = [] root_password = None for instance_index in range(0, instance_count): db_info = DBInstance.create( name=name, flavor_id=flavor_id, tenant_id=context.tenant, volume_size=volume_size, datastore_version_id=datastore_version.id, task_status=InstanceTasks.BUILDING, configuration_id=configuration_id, slave_of_id=slave_of_id, cluster_id=cluster_id, shard_id=shard_id, type=instance_type) LOG.debug("Tenant %(tenant)s created new Trove instance " "%(db)s.", {'tenant': context.tenant, 'db': db_info.id}) instance_id = db_info.id cls.add_instance_modules(context, instance_id, modules) instance_name = name ids.append(instance_id) names.append(instance_name) root_passwords.append(None) # change the name to be name + replica_number if more than one if multi_replica: replica_number = instance_index + 1 names[instance_index] += '-' + str(replica_number) setattr(db_info, 'name', names[instance_index]) db_info.save() # if a configuration group is associated with an instance, # generate an overrides dict to pass into the instance creation # method config = Configuration(context, configuration_id) overrides = config.get_configuration_overrides() service_status = InstanceServiceStatus.create( instance_id=instance_id, status=tr_instance.ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(instance_id) db_info.hostname = hostname db_info.save() if cls.get_root_on_create( datastore_version.manager) and not backup_id: root_password = utils.generate_random_password() root_passwords[instance_index] = root_password if instance_count > 1: instance_id = ids instance_name = names root_password = root_passwords task_api.API(context).create_instance( instance_id, instance_name, flavor, image_id, databases, users, datastore_version.manager, datastore_version.packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type=volume_type, modules=module_list) return SimpleInstance(context, db_info, service_status, root_password) with StartNotification(context, **call_args): return run_with_quotas(context.tenant, deltas, _create_resources) @classmethod def add_instance_modules(cls, context, instance_id, modules): for module in modules: module_models.InstanceModule.create( context, instance_id, module.id, module.md5) def get_flavor(self): client = create_nova_client(self.context) return client.flavors.get(self.flavor_id) def get_default_configuration_template(self): flavor = self.get_flavor() LOG.debug("Getting default config template for datastore version " "%(ds_version)s and flavor %(flavor)s.", {'ds_version': self.ds_version, 'flavor': flavor}) config = template.SingleInstanceConfigTemplate( self.ds_version, flavor, id) return config.render_dict() def resize_flavor(self, new_flavor_id): self.validate_can_perform_action() LOG.info(_LI("Resizing instance %(instance_id)s flavor to " "%(flavor_id)s."), {'instance_id': self.id, 'flavor_id': new_flavor_id}) if self.db_info.cluster_id is not None: raise exception.ClusterInstanceOperationNotSupported() # Validate that the old and new flavor IDs are not the same, new flavor # can be found and has ephemeral/volume support if required by the # current flavor. if self.flavor_id == new_flavor_id: raise exception.BadRequest(_("The new flavor id must be different " "than the current flavor id of '%s'.") % self.flavor_id) client = create_nova_client(self.context) try: new_flavor = client.flavors.get(new_flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=new_flavor_id) old_flavor = client.flavors.get(self.flavor_id) if self.volume_support: if new_flavor.ephemeral != 0: raise exception.LocalStorageNotSupported() elif self.device_path is not None: # ephemeral support enabled if new_flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=new_flavor_id) # Set the task to RESIZING and begin the async call before returning. self.update_db(task_status=InstanceTasks.RESIZING) LOG.debug("Instance %s set to RESIZING.", self.id) task_api.API(self.context).resize_flavor(self.id, old_flavor, new_flavor) def resize_volume(self, new_size): def _resize_resources(): self.validate_can_perform_action() LOG.info(_LI("Resizing volume of instance %s."), self.id) if self.db_info.cluster_id is not None: raise exception.ClusterInstanceOperationNotSupported() old_size = self.volume_size if int(new_size) <= old_size: raise exception.BadRequest(_("The new volume 'size' must be " "larger than the current volume " "size of '%s'.") % old_size) # Set the task to Resizing before sending off to the taskmanager self.update_db(task_status=InstanceTasks.RESIZING) task_api.API(self.context).resize_volume(new_size, self.id) if not self.volume_size: raise exception.BadRequest(_("Instance %s has no volume.") % self.id) new_size_l = long(new_size) validate_volume_size(new_size_l) return run_with_quotas(self.tenant_id, {'volumes': new_size_l - self.volume_size}, _resize_resources) def reboot(self): self.validate_can_perform_action() LOG.info(_LI("Rebooting instance %s."), self.id) if self.db_info.cluster_id is not None and not self.context.is_admin: raise exception.ClusterInstanceOperationNotSupported() self.update_db(task_status=InstanceTasks.REBOOTING) task_api.API(self.context).reboot(self.id) def restart(self): self.validate_can_perform_action() LOG.info(_LI("Restarting datastore on instance %s."), self.id) if self.db_info.cluster_id is not None and not self.context.is_admin: raise exception.ClusterInstanceOperationNotSupported() # Set our local status since Nova might not change it quick enough. # TODO(tim.simpson): Possible bad stuff can happen if this service # shuts down before it can set status to NONE. # We need a last updated time to mitigate this; # after some period of tolerance, we'll assume the # status is no longer in effect. self.update_db(task_status=InstanceTasks.REBOOTING) task_api.API(self.context).restart(self.id) def detach_replica(self): self.validate_can_perform_action() LOG.info(_LI("Detaching instance %s from its replication source."), self.id) if not self.slave_of_id: raise exception.BadRequest(_("Instance %s is not a replica.") % self.id) task_api.API(self.context).detach_replica(self.id) def promote_to_replica_source(self): self.validate_can_perform_action() LOG.info(_LI("Promoting instance %s to replication source."), self.id) if not self.slave_of_id: raise exception.BadRequest(_("Instance %s is not a replica.") % self.id) # Update task status of master and all slaves master = BuiltInstance.load(self.context, self.slave_of_id) for dbinfo in [master.db_info] + master.slaves: setattr(dbinfo, 'task_status', InstanceTasks.PROMOTING) dbinfo.save() task_api.API(self.context).promote_to_replica_source(self.id) def eject_replica_source(self): self.validate_can_perform_action() LOG.info(_LI("Ejecting replica source %s from it's replication set."), self.id) if not self.slaves: raise exception.BadRequest(_("Instance %s is not a replica" " source.") % self.id) service = InstanceServiceStatus.find_by(instance_id=self.id) last_heartbeat_delta = datetime.utcnow() - service.updated_at agent_expiry_interval = timedelta(seconds=CONF.agent_heartbeat_expiry) if last_heartbeat_delta < agent_expiry_interval: raise exception.BadRequest(_("Replica Source %s cannot be ejected" " as it has a current heartbeat") % self.id) # Update task status of master and all slaves for dbinfo in [self.db_info] + self.slaves: setattr(dbinfo, 'task_status', InstanceTasks.EJECTING) dbinfo.save() task_api.API(self.context).eject_replica_source(self.id) def migrate(self, host=None): self.validate_can_perform_action() LOG.info(_LI("Migrating instance id = %(instance_id)s " "to host = %(host)s."), {'instance_id': self.id, 'host': host}) self.update_db(task_status=InstanceTasks.MIGRATING) task_api.API(self.context).migrate(self.id, host) def validate_can_perform_action(self): """ Raises exception if an instance action cannot currently be performed. """ # cases where action cannot be performed if self.db_info.server_status != 'ACTIVE': status = self.db_info.server_status elif (self.db_info.task_status != InstanceTasks.NONE and self.db_info.task_status != InstanceTasks.RESTART_REQUIRED): status = self.db_info.task_status elif not self.datastore_status.status.action_is_allowed: status = self.status elif Backup.running(self.id): status = InstanceStatus.BACKUP else: # action can be performed return msg = (_("Instance %(instance_id)s is not currently available for an " "action to be performed (status was %(action_status)s).") % {'instance_id': self.id, 'action_status': status}) LOG.error(msg) raise exception.UnprocessableEntity(msg) def _validate_can_perform_assign(self): """ Raises exception if a configuration assign cannot currently be performed """ # check if the instance already has a configuration assigned if self.db_info.configuration_id: raise exception.ConfigurationAlreadyAttached( instance_id=self.id, configuration_id=self.db_info.configuration_id) # check if the instance is not ACTIVE or has tasks status = None if self.db_info.server_status != InstanceStatus.ACTIVE: status = self.db_info.server_status elif self.db_info.task_status != InstanceTasks.NONE: status = self.db_info.task_status.action if status: raise exception.InvalidInstanceState(instance_id=self.id, status=status) def unassign_configuration(self): LOG.debug("Unassigning the configuration from the instance %s.", self.id) if self.configuration and self.configuration.id: LOG.debug("Unassigning the configuration id %s.", self.configuration.id) self.guest.update_overrides({}, remove=True) # Dynamically reset the configuration values back to their default # values from the configuration template. # Reset the values only if the default is available for all of # them and restart is not required by any. # Mark the instance with a 'RESTART_REQUIRED' status otherwise. flavor = self.get_flavor() default_config = self._render_config_dict(flavor) current_config = Configuration(self.context, self.configuration.id) current_overrides = current_config.get_configuration_overrides() # Check the configuration template has defaults for all modified # values. has_defaults_for_all = all(key in default_config.keys() for key in current_overrides.keys()) if (not current_config.does_configuration_need_restart() and has_defaults_for_all): self.guest.apply_overrides( {k: v for k, v in default_config.items() if k in current_overrides}) else: LOG.debug( "Could not revert all configuration changes dynamically. " "A restart will be required.") self.update_db(task_status=InstanceTasks.RESTART_REQUIRED) else: LOG.debug("No configuration found on instance. Skipping.") def assign_configuration(self, configuration_id): self._validate_can_perform_assign() try: configuration = Configuration.load(self.context, configuration_id) except exception.ModelNotFoundError: raise exception.NotFound( message='Configuration group id: %s could not be found.' % configuration_id) config_ds_v = configuration.datastore_version_id inst_ds_v = self.db_info.datastore_version_id if (config_ds_v != inst_ds_v): raise exception.ConfigurationDatastoreNotMatchInstance( config_datastore_version=config_ds_v, instance_datastore_version=inst_ds_v) config = Configuration(self.context, configuration.id) LOG.debug("Config is %s.", config) self.update_overrides(config) self.update_db(configuration_id=configuration.id) def update_overrides(self, config): LOG.debug("Updating or removing overrides for instance %s.", self.id) overrides = config.get_configuration_overrides() self.guest.update_overrides(overrides) # Apply the new configuration values dynamically to the running # datastore service. # Apply overrides only if ALL values can be applied at once or mark # the instance with a 'RESTART_REQUIRED' status. if not config.does_configuration_need_restart(): self.guest.apply_overrides(overrides) else: LOG.debug("Configuration overrides has non-dynamic settings and " "will require restart to take effect.") self.update_db(task_status=InstanceTasks.RESTART_REQUIRED) def _render_config_dict(self, flavor): config = template.SingleInstanceConfigTemplate( self.datastore_version, flavor, self.id) return dict(config.render_dict()) def create_server_list_matcher(server_list): # Returns a method which finds a server from the given list. def find_server(instance_id, server_id): matches = [server for server in server_list if server.id == server_id] if len(matches) == 1: return matches[0] elif len(matches) < 1: # The instance was not found in the list and # this can happen if the instance is deleted from # nova but still in trove database raise exception.ComputeInstanceNotFound( instance_id=instance_id, server_id=server_id) else: # Should never happen, but never say never. LOG.error(_LE("Server %(server)s for instance %(instance)s was " "found twice!"), {'server': server_id, 'instance': instance_id}) raise exception.TroveError(uuid=instance_id) return find_server class Instances(object): DEFAULT_LIMIT = CONF.instances_page_size @staticmethod def load(context, include_clustered, instance_ids=None): def load_simple_instance(context, db, status, **kwargs): return SimpleInstance(context, db, status) if context is None: raise TypeError("Argument context not defined.") client = create_nova_client(context) servers = client.servers.list() query_opts = {'tenant_id': context.tenant, 'deleted': False} if not include_clustered: query_opts['cluster_id'] = None if instance_ids and len(instance_ids) > 1: raise exception.DatastoreOperationNotSupported( operation='module-instances', datastore='current') db_infos = DBInstance.query().filter_by(**query_opts) else: if instance_ids: query_opts['id'] = instance_ids[0] db_infos = DBInstance.find_all(**query_opts) limit = utils.pagination_limit(context.limit, Instances.DEFAULT_LIMIT) data_view = DBInstance.find_by_pagination('instances', db_infos, "foo", limit=limit, marker=context.marker) next_marker = data_view.next_page_marker find_server = create_server_list_matcher(servers) for db in db_infos: LOG.debug("Checking for db [id=%(db_id)s, " "compute_instance_id=%(instance_id)s].", {'db_id': db.id, 'instance_id': db.compute_instance_id}) ret = Instances._load_servers_status(load_simple_instance, context, data_view.collection, find_server) return ret, next_marker @staticmethod def load_all_by_cluster_id(context, cluster_id, load_servers=True): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False) return [load_any_instance(context, db_inst.id, load_server=load_servers) for db_inst in db_instances] @staticmethod def _load_servers_status(load_instance, context, db_items, find_server): ret = [] for db in db_items: server = None try: # TODO(tim.simpson): Delete when we get notifications working! if InstanceTasks.BUILDING == db.task_status: db.server_status = "BUILD" db.addresses = {} else: try: server = find_server(db.id, db.compute_instance_id) db.server_status = server.status db.addresses = server.addresses except exception.ComputeInstanceNotFound: db.server_status = "SHUTDOWN" # Fake it... db.addresses = {} # TODO(tim.simpson): End of hack. # volumes = find_volumes(server.id) datastore_status = InstanceServiceStatus.find_by( instance_id=db.id) if not datastore_status.status: # This should never happen. LOG.error(_LE("Server status could not be read for " "instance id(%s)."), db.id) continue LOG.debug("Server api_status(%s).", datastore_status.status.api_status) except exception.ModelNotFoundError: LOG.error(_LE("Server status could not be read for " "instance id(%s)."), db.id) continue ret.append(load_instance(context, db, datastore_status, server=server)) return ret class DBInstance(dbmodels.DatabaseModelBase): """Defines the task being executed plus the start time.""" _data_fields = ['name', 'created', 'compute_instance_id', 'task_id', 'task_description', 'task_start_time', 'volume_id', 'deleted', 'tenant_id', 'datastore_version_id', 'configuration_id', 'slave_of_id', 'cluster_id', 'shard_id', 'type'] def __init__(self, task_status, **kwargs): """ Creates a new persistable entity of the Trove Guest Instance for purposes of recording its current state and record of modifications :param task_status: the current state details of any activity or error that is running on this guest instance (e.g. resizing, deleting) :type task_status: trove.instance.tasks.InstanceTask """ kwargs["task_id"] = task_status.code kwargs["task_description"] = task_status.db_text kwargs["deleted"] = False super(DBInstance, self).__init__(**kwargs) self.set_task_status(task_status) def _validate(self, errors): if InstanceTask.from_code(self.task_id) is None: errors['task_id'] = "Not valid." if self.task_status is None: errors['task_status'] = "Cannot be None." def get_task_status(self): return InstanceTask.from_code(self.task_id) def set_task_status(self, value): self.task_id = value.code self.task_description = value.db_text task_status = property(get_task_status, set_task_status) class InstanceServiceStatus(dbmodels.DatabaseModelBase): _data_fields = ['instance_id', 'status_id', 'status_description', 'updated_at'] def __init__(self, status, **kwargs): kwargs["status_id"] = status.code kwargs["status_description"] = status.description super(InstanceServiceStatus, self).__init__(**kwargs) self.set_status(status) def _validate(self, errors): if self.status is None: errors['status'] = "Cannot be None." if tr_instance.ServiceStatus.from_code(self.status_id) is None: errors['status_id'] = "Not valid." def get_status(self): """ Returns the current enumerated status of the Service running on the instance :return: a ServiceStatus reference indicating the currently stored status of the service :rtype: trove.common.instance.ServiceStatus """ return tr_instance.ServiceStatus.from_code(self.status_id) def set_status(self, value): """ Sets the status of the hosted service :param value: current state of the hosted service :type value: trove.common.instance.ServiceStatus """ self.status_id = value.code self.status_description = value.description def save(self): self['updated_at'] = utils.utcnow() return get_db_api().save(self) status = property(get_status, set_status) def persisted_models(): return { 'instance': DBInstance, 'service_statuses': InstanceServiceStatus, } MYSQL_RESPONSIVE_STATUSES = [tr_instance.ServiceStatuses.RUNNING] trove-5.0.0/trove/instance/tasks.py0000664000567000056710000001125612701410316020456 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common instance status code used across Trove API. """ class InstanceTask(object): """ Stores the different kind of tasks being performed by an instance. """ # TODO(tim.simpson): Figure out someway to migrate this to the TaskManager # once that revs up. _lookup = {} def __init__(self, code, action, db_text, is_error=False): self._code = int(code) self._action = action self._db_text = db_text self._is_error = is_error InstanceTask._lookup[self._code] = self @property def action(self): return self._action @property def code(self): return self._code @property def db_text(self): return self._db_text @property def is_error(self): return self._is_error def __eq__(self, other): if not isinstance(other, InstanceTask): return False return self._db_text == other._db_text @classmethod def from_code(cls, code): if code not in cls._lookup: return None return cls._lookup[code] def __str__(self): return "(%d %s %s)" % (self._code, self._action, self._db_text) def __repr__(self): return "InstanceTask.%s (%s)" % (self._action, self._db_text) class InstanceTasks(object): NONE = InstanceTask(0x01, 'NONE', 'No tasks for the instance.') DELETING = InstanceTask(0x02, 'DELETING', 'Deleting the instance.') REBOOTING = InstanceTask(0x03, 'REBOOTING', 'Rebooting the instance.') RESIZING = InstanceTask(0x04, 'RESIZING', 'Resizing the instance.') BUILDING = InstanceTask(0x05, 'BUILDING', 'The instance is building.') MIGRATING = InstanceTask(0x06, 'MIGRATING', 'Migrating the instance.') RESTART_REQUIRED = InstanceTask(0x07, 'RESTART_REQUIRED', 'Instance requires a restart.') PROMOTING = InstanceTask(0x08, 'PROMOTING', 'Promoting the instance to replica source.') EJECTING = InstanceTask(0x09, 'EJECTING', 'Ejecting the replica source.') LOGGING = InstanceTask(0x0a, 'LOGGING', 'Transferring guest logs.') BUILDING_ERROR_DNS = InstanceTask(0x50, 'BUILDING', 'Build error: DNS.', is_error=True) BUILDING_ERROR_SERVER = InstanceTask(0x51, 'BUILDING', 'Build error: Server.', is_error=True) BUILDING_ERROR_VOLUME = InstanceTask(0x52, 'BUILDING', 'Build error: Volume.', is_error=True) BUILDING_ERROR_TIMEOUT_GA = InstanceTask(0x54, 'ERROR', 'Build error: ' 'guestagent timeout.', is_error=True) BUILDING_ERROR_SEC_GROUP = InstanceTask(0x53, 'BUILDING', 'Build error: Secgroup ' 'or rule.', is_error=True) BUILDING_ERROR_REPLICA = InstanceTask(0x54, 'BUILDING', 'Build error: Replica.', is_error=True) PROMOTION_ERROR = InstanceTask(0x55, 'PROMOTING', 'Replica Promotion Error.', is_error=True) EJECTION_ERROR = InstanceTask(0x56, 'EJECTING', 'Replica Source Ejection Error.', is_error=True) GROWING_ERROR = InstanceTask(0x57, 'GROWING', 'Growing Cluster Error.', is_error=True) SHRINKING_ERROR = InstanceTask(0x58, 'SHRINKING', 'Shrinking Cluster Error.', is_error=True) # Dissuade further additions at run-time. InstanceTask.__init__ = None trove-5.0.0/trove/cluster/0000775000567000056710000000000012701410521016625 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/cluster/__init__.py0000664000567000056710000000000012701410316020726 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/cluster/service.py0000664000567000056710000001743712701410316020655 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from trove.cluster import models from trove.cluster import views from trove.common import apischema from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import notification from trove.common.notification import StartNotification from trove.common import pagination from trove.common import utils from trove.common import wsgi from trove.datastore import models as datastore_models CONF = cfg.CONF LOG = logging.getLogger(__name__) class ClusterController(wsgi.Controller): """Controller for cluster functionality.""" schemas = apischema.cluster.copy() @classmethod def get_action_schema(cls, body, action_schema): action_type = list(body.keys())[0] return action_schema.get(action_type, {}) @classmethod def get_schema(cls, action, body): action_schema = super(ClusterController, cls).get_schema(action, body) if action == 'action': action_schema = cls.get_action_schema(body, action_schema) return action_schema def action(self, req, body, tenant_id, id): LOG.debug(("Committing Action Against Cluster for " "Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\nid : '%(id)s'\n\n") % {"req": req, "id": id, "tenant_id": tenant_id}) if not body: raise exception.BadRequest(_("Invalid request body.")) if len(body) != 1: raise exception.BadRequest(_("Action request should have exactly" " one action specified in body")) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.Cluster.load(context, id) cluster.action(context, req, *body.items()[0]) view = views.load_view(cluster, req=req, load_servers=False) wsgi_result = wsgi.Result(view.data(), 202) return wsgi_result def show(self, req, tenant_id, id): """Return a single cluster.""" LOG.debug(("Showing a Cluster for Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\nid : '%(id)s'\n\n") % {"req": req, "id": id, "tenant_id": tenant_id}) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.Cluster.load(context, id) return wsgi.Result(views.load_view(cluster, req=req).data(), 200) def show_instance(self, req, tenant_id, cluster_id, instance_id): """Return a single instance belonging to a cluster.""" LOG.debug(("Showing an Instance in a Cluster for Tenant " "'%(tenant_id)s'\n" "req : '%(req)s'\n\n" "cluster_id : '%(cluster_id)s'\n\n" "instance_id : '%(instance_id)s;\n\n") % {"req": req, "tenant_id": tenant_id, "cluster_id": cluster_id, "instance_id": instance_id}) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.Cluster.load(context, cluster_id) instance = models.Cluster.load_instance(context, cluster.id, instance_id) return wsgi.Result(views.ClusterInstanceDetailView( instance, req=req).data(), 200) def delete(self, req, tenant_id, id): """Delete a cluster.""" LOG.debug(("Deleting a Cluster for Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\nid : '%(id)s'\n\n") % {"req": req, "id": id, "tenant_id": tenant_id}) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.Cluster.load(context, id) context.notification = notification.DBaaSClusterDelete(context, request=req) with StartNotification(context, cluster_id=id): cluster.delete() return wsgi.Result(None, 202) def index(self, req, tenant_id): """Return a list of clusters.""" LOG.debug(("Showing a list of clusters for Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n") % {"req": req, "tenant_id": tenant_id}) context = req.environ[wsgi.CONTEXT_KEY] if not context.is_admin and context.tenant != tenant_id: raise exception.TroveOperationAuthError(tenant_id=context.tenant) # load all clusters and instances for the tenant clusters, marker = models.Cluster.load_all(context, tenant_id) view = views.ClustersView(clusters, req=req) paged = pagination.SimplePaginatedDataView(req.url, 'clusters', view, marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id): LOG.debug(("Creating a Cluster for Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\nbody : '%(body)s'\n\n") % {"tenant_id": tenant_id, "req": req, "body": body}) context = req.environ[wsgi.CONTEXT_KEY] name = body['cluster']['name'] datastore_args = body['cluster'].get('datastore', {}) datastore, datastore_version = ( datastore_models.get_datastore_version(**datastore_args)) # TODO(saurabhs): add extended_properties to apischema extended_properties = body['cluster'].get('extended_properties', {}) try: clusters_enabled = (CONF.get(datastore_version.manager) .get('cluster_support')) except NoSuchOptError: clusters_enabled = False if not clusters_enabled: raise exception.ClusterDatastoreNotSupported( datastore=datastore.name, datastore_version=datastore_version.name) nodes = body['cluster']['instances'] instances = [] for node in nodes: flavor_id = utils.get_id_from_href(node['flavorRef']) volume_size = volume_type = nics = availability_zone = None if 'volume' in node: volume_size = int(node['volume']['size']) volume_type = node['volume'].get('volume_type') if 'nics' in node: nics = node['nics'] if 'availability_zone' in node: availability_zone = node['availability_zone'] instances.append({"flavor_id": flavor_id, "volume_size": volume_size, "volume_type": volume_type, "nics": nics, "availability_zone": availability_zone}) context.notification = notification.DBaaSClusterCreate(context, request=req) with StartNotification(context, name=name, datastore=datastore.name, datastore_version=datastore_version.name): cluster = models.Cluster.create(context, name, datastore, datastore_version, instances, extended_properties) view = views.load_view(cluster, req=req, load_servers=False) return wsgi.Result(view.data(), 200) trove-5.0.0/trove/cluster/views.py0000664000567000056710000001141112701410316020334 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import strategy from trove.common.views import create_links from trove.instance.views import InstanceDetailView LOG = logging.getLogger(__name__) CONF = cfg.CONF class ClusterView(object): def __init__(self, cluster, req=None, load_servers=True): self.cluster = cluster self.req = req self.load_servers = load_servers def data(self): instances, ip_list = self.build_instances() cluster_dict = { "id": self.cluster.id, "name": self.cluster.name, "task": {"id": self.cluster.task_id, "name": self.cluster.task_name, "description": self.cluster.task_description}, "created": self.cluster.created, "updated": self.cluster.updated, "links": self._build_links(), "datastore": {"type": self.cluster.datastore.name, "version": self.cluster.datastore_version.name}, "instances": instances } if ip_list: cluster_dict["ip"] = ip_list extended_properties = self.get_extended_properties() if extended_properties: cluster_dict["extended_properties"] = extended_properties LOG.debug(cluster_dict) return {"cluster": cluster_dict} def _build_links(self): return create_links("clusters", self.req, self.cluster.id) def _build_instances(self, ip_to_be_published_for=[], instance_dict_to_be_published_for=[]): instances = [] ip_list = [] if self.load_servers: cluster_instances = self.cluster.instances else: cluster_instances = self.cluster.instances_without_server for instance in cluster_instances: instance_dict = { "id": instance.id, "name": instance.name, "type": instance.type, "links": create_links("instances", self.req, instance.id) } if instance.shard_id: instance_dict["shard_id"] = instance.shard_id if self.load_servers: instance_dict["status"] = instance.status if CONF.get(instance.datastore_version.manager).volume_support: instance_dict["volume"] = {"size": instance.volume_size} instance_dict["flavor"] = self._build_flavor_info( instance.flavor_id) instance_ips = instance.get_visible_ip_addresses() if self.load_servers and instance_ips: instance_dict["ip"] = instance_ips if instance.type in ip_to_be_published_for: ip_list.append(instance_ips[0]) if instance.type in instance_dict_to_be_published_for: instances.append(instance_dict) ip_list.sort() return instances, ip_list def build_instances(self): raise NotImplementedError() def get_extended_properties(self): return None def _build_flavor_info(self, flavor_id): return { "id": flavor_id, "links": create_links("flavors", self.req, flavor_id) } class ClusterInstanceDetailView(InstanceDetailView): def __init__(self, instance, req): super(ClusterInstanceDetailView, self).__init__(instance, req=req) def data(self): result = super(ClusterInstanceDetailView, self).data() return result class ClustersView(object): def __init__(self, clusters, req=None): self.clusters = clusters self.req = req def data(self): data = [] for cluster in self.clusters: data.append(self.data_for_cluster(cluster)) return {'clusters': data} def data_for_cluster(self, cluster): view = load_view(cluster, req=self.req, load_servers=False) return view.data()['cluster'] def load_view(cluster, req, load_servers=True): manager = cluster.datastore_version.manager return strategy.load_api_strategy(manager).cluster_view_class( cluster, req, load_servers) trove-5.0.0/trove/cluster/models.py0000664000567000056710000002740412701410316020473 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from novaclient import exceptions as nova_exceptions from trove.cluster.tasks import ClusterTask from trove.cluster.tasks import ClusterTasks from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.notification import DBaaSClusterGrow, DBaaSClusterShrink from trove.common.notification import StartNotification from trove.common import remote from trove.common.strategies.cluster import strategy from trove.common import utils from trove.datastore import models as datastore_models from trove.db import models as dbmodels from trove.instance import models as inst_models from trove.taskmanager import api as task_api CONF = cfg.CONF LOG = logging.getLogger(__name__) def persisted_models(): return { 'clusters': DBCluster, } class DBCluster(dbmodels.DatabaseModelBase): _data_fields = ['id', 'created', 'updated', 'name', 'task_id', 'tenant_id', 'datastore_version_id', 'deleted', 'deleted_at'] def __init__(self, task_status, **kwargs): """ Creates a new persistable entity of the cluster. :param task_status: the current task of the cluster. :type task_status: trove.cluster.tasks.ClusterTask """ kwargs["task_id"] = task_status.code kwargs["deleted"] = False super(DBCluster, self).__init__(**kwargs) self.task_status = task_status def _validate(self, errors): if ClusterTask.from_code(self.task_id) is None: errors['task_id'] = "Not valid." if self.task_status is None: errors['task_status'] = "Cannot be None." @property def task_status(self): return ClusterTask.from_code(self.task_id) @task_status.setter def task_status(self, task_status): self.task_id = task_status.code class Cluster(object): DEFAULT_LIMIT = CONF.clusters_page_size def __init__(self, context, db_info, datastore=None, datastore_version=None): self.context = context self.db_info = db_info self.ds = datastore self.ds_version = datastore_version if self.ds_version is None: self.ds_version = (datastore_models.DatastoreVersion. load_by_uuid(self.db_info.datastore_version_id)) if self.ds is None: self.ds = (datastore_models.Datastore. load(self.ds_version.datastore_id)) self._db_instances = None @classmethod def get_guest(cls, instance): return remote.create_guest_client(instance.context, instance.db_info.id, instance.datastore_version.manager) @classmethod def load_all(cls, context, tenant_id): db_infos = DBCluster.find_all(tenant_id=tenant_id, deleted=False) limit = utils.pagination_limit(context.limit, Cluster.DEFAULT_LIMIT) data_view = DBCluster.find_by_pagination('clusters', db_infos, "foo", limit=limit, marker=context.marker) next_marker = data_view.next_page_marker ret = [cls(context, db_info) for db_info in data_view.collection] return ret, next_marker @classmethod def load(cls, context, cluster_id, clazz=None): try: db_info = DBCluster.find_by(context=context, id=cluster_id, deleted=False) except exception.ModelNotFoundError: raise exception.ClusterNotFound(cluster=cluster_id) if not clazz: ds_version = (datastore_models.DatastoreVersion. load_by_uuid(db_info.datastore_version_id)) manager = ds_version.manager clazz = strategy.load_api_strategy(manager).cluster_class return clazz(context, db_info) def update_db(self, **values): self.db_info = DBCluster.find_by(id=self.id, deleted=False) for key in values: setattr(self.db_info, key, values[key]) self.db_info.save() def reset_task(self): LOG.info(_("Setting task to NONE on cluster %s") % self.id) self.update_db(task_status=ClusterTasks.NONE) @property def id(self): return self.db_info.id @property def created(self): return self.db_info.created @property def updated(self): return self.db_info.updated @property def name(self): return self.db_info.name @property def task_id(self): return self.db_info.task_status.code @property def task_name(self): return self.db_info.task_status.name @property def task_description(self): return self.db_info.task_status.description @property def tenant_id(self): return self.db_info.tenant_id @property def datastore(self): return self.ds @property def datastore_version(self): return self.ds_version @property def deleted(self): return self.db_info.deleted @property def deleted_at(self): return self.db_info.deleted_at @property def db_instances(self): """DBInstance objects are persistent, therefore cacheable.""" if not self._db_instances: self._db_instances = inst_models.DBInstance.find_all( cluster_id=self.id, deleted=False).all() return self._db_instances @property def instances(self): return inst_models.Instances.load_all_by_cluster_id(self.context, self.db_info.id) @property def instances_without_server(self): return inst_models.Instances.load_all_by_cluster_id( self.context, self.db_info.id, load_servers=False) @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties): api_strategy = strategy.load_api_strategy(datastore_version.manager) return api_strategy.cluster_class.create(context, name, datastore, datastore_version, instances, extended_properties) def validate_cluster_available(self, valid_states=[ClusterTasks.NONE]): if self.db_info.task_status not in valid_states: msg = (_("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") % self.db_info.task_status.name) LOG.error(msg) raise exception.UnprocessableEntity(msg) def delete(self): self.validate_cluster_available([ClusterTasks.NONE, ClusterTasks.DELETING]) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False).all() self.update_db(task_status=ClusterTasks.DELETING) for db_inst in db_insts: instance = inst_models.load_any_instance(self.context, db_inst.id) instance.delete() task_api.API(self.context).delete_cluster(self.id) def action(self, context, req, action, param): if action == 'grow': context.notification = DBaaSClusterGrow(context, request=req) with StartNotification(context, cluster_id=self.id): instances = [] for node in param: instance = { 'flavor_id': utils.get_id_from_href(node['flavorRef']) } if 'name' in node: instance['name'] = node['name'] if 'volume' in node: instance['volume_size'] = int(node['volume']['size']) instances.append(instance) return self.grow(instances) elif action == 'shrink': context.notification = DBaaSClusterShrink(context, request=req) with StartNotification(context, cluster_id=self.id): instance_ids = [instance['id'] for instance in param] return self.shrink(instance_ids) else: raise exception.BadRequest(_("Action %s not supported") % action) def grow(self, instances): raise exception.BadRequest(_("Action 'grow' not supported")) def shrink(self, instance_ids): raise exception.BadRequest(_("Action 'shrink' not supported")) @staticmethod def load_instance(context, cluster_id, instance_id): return inst_models.load_instance_with_guest( inst_models.DetailInstance, context, instance_id, cluster_id) @staticmethod def manager_from_cluster_id(context, cluster_id): db_info = DBCluster.find_by(context=context, id=cluster_id, deleted=False) ds_version = (datastore_models.DatastoreVersion. load_by_uuid(db_info.datastore_version_id)) return ds_version.manager def is_cluster_deleting(context, cluster_id): cluster = Cluster.load(context, cluster_id) return (cluster.db_info.task_status == ClusterTasks.DELETING or cluster.db_info.task_status == ClusterTasks.SHRINKING_CLUSTER) def get_flavors_from_instance_defs(context, instances, volume_enabled, ephemeral_enabled): """Load and validate flavors for given instance definitions.""" flavors = dict() nova_client = remote.create_nova_client(context) for instance in instances: flavor_id = instance['flavor_id'] if flavor_id not in flavors: try: flavor = nova_client.flavors.get(flavor_id) if (not volume_enabled and (ephemeral_enabled and flavor.ephemeral == 0)): raise exception.LocalStorageNotSpecified( flavor=flavor_id) flavors[flavor_id] = flavor except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) return flavors def get_required_volume_size(instances, volume_enabled): """Calculate the total Trove volume size for given instances.""" volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] if volume_enabled: if len(volume_sizes) != len(instances): raise exception.ClusterVolumeSizeRequired() total_volume_size = 0 for volume_size in volume_sizes: validate_volume_size(volume_size) total_volume_size += volume_size return total_volume_size if len(volume_sizes) > 0: raise exception.VolumeNotSupported() return None def validate_volume_size(size): if size is None: raise exception.VolumeSizeNotSpecified() max_size = CONF.max_accepted_volume_size if int(size) > max_size: msg = ("Volume 'size' cannot exceed maximum " "of %d Gb, %s cannot be accepted." % (max_size, size)) raise exception.VolumeQuotaExceeded(msg) trove-5.0.0/trove/cluster/tasks.py0000664000567000056710000000446512701410316020337 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ClusterTask(object): """ Stores the different kind of tasks being performed by a cluster. """ _lookup = {} def __init__(self, code, name, description): self._code = int(code) self._name = name self._description = description ClusterTask._lookup[self._code] = self @property def code(self): return self._code @property def name(self): return self._name @property def description(self): return self._description def __eq__(self, other): if not isinstance(other, ClusterTask): return False return self._code == other._code @classmethod def from_code(cls, code): if code not in cls._lookup: return None return cls._lookup[code] def __str__(self): return "(%d %s %s)" % (self._code, self._name, self._description) def __repr__(self): return "ClusterTask.%s (%s)" % (self._name, self._description) class ClusterTasks(object): NONE = ClusterTask(0x01, 'NONE', 'No tasks for the cluster.') BUILDING_INITIAL = ClusterTask( 0x02, 'BUILDING', 'Building the initial cluster.') DELETING = ClusterTask(0x03, 'DELETING', 'Deleting the cluster.') ADDING_SHARD = ClusterTask( 0x04, 'ADDING_SHARD', 'Adding a shard to the cluster.') GROWING_CLUSTER = ClusterTask( 0x05, 'GROWING_CLUSTER', 'Increasing the size of the cluster.') SHRINKING_CLUSTER = ClusterTask( 0x06, 'SHRINKING_CLUSTER', 'Decreasing the size of the cluster.') # Dissuade further additions at run-time. ClusterTask.__init__ = None trove-5.0.0/trove/__init__.py0000664000567000056710000000000012701410316017245 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/conductor/0000775000567000056710000000000012701410521017144 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/conductor/__init__.py0000664000567000056710000000000012701410316021245 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/conductor/manager.py0000664000567000056710000001320412701410316021132 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import periodic_task from trove.backup import models as bkup_models from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.instance import ServiceStatus from trove.common.rpc import version as rpc_version from trove.common.serializable_notification import SerializableNotification from trove.conductor.models import LastSeen from trove.extensions.mysql import models as mysql_models from trove.instance import models as t_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(periodic_task.PeriodicTasks): target = messaging.Target(version=rpc_version.RPC_API_VERSION) def __init__(self): super(Manager, self).__init__(CONF) def _message_too_old(self, instance_id, method_name, sent): fields = { "instance": instance_id, "method": method_name, "sent": sent, } if sent is None: LOG.error(_("[Instance %s] sent field not present. Cannot " "compare.") % instance_id) return False LOG.debug("Instance %(instance)s sent %(method)s at %(sent)s " % fields) seen = None try: seen = LastSeen.load(instance_id=instance_id, method_name=method_name) except exception.NotFound: # This is fine. pass if seen is None: LOG.debug("[Instance %s] Did not find any previous message. " "Creating." % instance_id) seen = LastSeen.create(instance_id=instance_id, method_name=method_name, sent=sent) seen.save() return False last_sent = float(seen.sent) if last_sent < sent: LOG.debug("[Instance %s] Rec'd message is younger than last " "seen. Updating." % instance_id) seen.sent = sent seen.save() return False LOG.info(_("[Instance %s] Rec'd message is older than last seen. " "Discarding.") % instance_id) return True def heartbeat(self, context, instance_id, payload, sent=None): LOG.debug("Instance ID: %(instance)s, Payload: %(payload)s" % {"instance": str(instance_id), "payload": str(payload)}) status = t_models.InstanceServiceStatus.find_by( instance_id=instance_id) if self._message_too_old(instance_id, 'heartbeat', sent): return if payload.get('service_status') is not None: status.set_status(ServiceStatus.from_description( payload['service_status'])) status.save() def update_backup(self, context, instance_id, backup_id, sent=None, **backup_fields): LOG.debug("Instance ID: %(instance)s, Backup ID: %(backup)s" % {"instance": str(instance_id), "backup": str(backup_id)}) backup = bkup_models.DBBackup.find_by(id=backup_id) # TODO(datsun180b): use context to verify tenant matches if self._message_too_old(instance_id, 'update_backup', sent): return # Some verification based on IDs if backup_id != backup.id: fields = { 'expected': backup_id, 'found': backup.id, 'instance': str(instance_id), } LOG.error(_("[Instance: %(instance)s] Backup IDs mismatch! " "Expected %(expected)s, found %(found)s") % fields) return if instance_id != backup.instance_id: fields = { 'expected': instance_id, 'found': backup.instance_id, 'instance': str(instance_id), } LOG.error(_("[Instance: %(instance)s] Backup instance IDs " "mismatch! Expected %(expected)s, found " "%(found)s") % fields) return for k, v in backup_fields.items(): if hasattr(backup, k): fields = { 'key': k, 'value': v, } LOG.debug("Backup %(key)s: %(value)s" % fields) setattr(backup, k, v) backup.save() def report_root(self, context, instance_id, user): mysql_models.RootHistory.create(context, instance_id, user) def notify_end(self, context, serialized_notification, notification_args): notification = SerializableNotification.deserialize( context, serialized_notification) notification.notify_end(**notification_args) def notify_exc_info(self, context, serialized_notification, message, exception): notification = SerializableNotification.deserialize( context, serialized_notification) notification.notify_exc_info(message, exception) trove-5.0.0/trove/conductor/models.py0000664000567000056710000000320312701410316021001 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.db import get_db_api LOG = logging.getLogger(__name__) def persisted_models(): return {'conductor_lastseen': LastSeen} class LastSeen(object): """A table used only by Conductor to discard messages that arrive late and out of order. """ _auto_generated_attrs = [] _data_fields = ['instance_id', 'method_name', 'sent'] _table_name = 'conductor_lastseen' preserve_on_delete = False def __init__(self, instance_id, method_name, sent): self.instance_id = instance_id self.method_name = method_name self.sent = sent def save(self): return get_db_api().save(self) @classmethod def load(cls, instance_id, method_name): seen = get_db_api().find_by(cls, instance_id=instance_id, method_name=method_name) return seen @classmethod def create(cls, instance_id, method_name, sent): seen = LastSeen(instance_id, method_name, sent) return seen.save() trove-5.0.0/trove/conductor/api.py0000664000567000056710000000736712701410316020306 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging as messaging from trove.common import cfg from trove.common.rpc import version as rpc_version from trove.common.serializable_notification import SerializableNotification from trove import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class API(object): """API for interacting with trove conductor.""" def __init__(self, context): self.context = context super(API, self).__init__() target = messaging.Target(topic=CONF.conductor_queue, version=rpc_version.RPC_API_VERSION) self.version_cap = rpc_version.VERSION_ALIASES.get( CONF.upgrade_levels.conductor) self.client = self.get_client(target, self.version_cap) def get_client(self, target, version_cap, serializer=None): return rpc.get_client(target, version_cap=version_cap, serializer=serializer) def heartbeat(self, instance_id, payload, sent=None): LOG.debug("Making async call to cast heartbeat for instance: %s" % instance_id) cctxt = self.client.prepare(version=self.version_cap) cctxt.cast(self.context, "heartbeat", instance_id=instance_id, sent=sent, payload=payload) def update_backup(self, instance_id, backup_id, sent=None, **backup_fields): LOG.debug("Making async call to cast update_backup for instance: %s" % instance_id) cctxt = self.client.prepare(version=self.version_cap) cctxt.cast(self.context, "update_backup", instance_id=instance_id, backup_id=backup_id, sent=sent, **backup_fields) def report_root(self, instance_id, user): LOG.debug("Making async call to cast report_root for instance: %s" % instance_id) cctxt = self.client.prepare(version=self.version_cap) cctxt.cast(self.context, "report_root", instance_id=instance_id, user=user) def notify_end(self, **notification_args): LOG.debug("Making async call to cast end notification") cctxt = self.client.prepare(version=self.version_cap) context = self.context serialized = SerializableNotification.serialize(context, context.notification) cctxt.cast(self.context, "notify_end", serialized_notification=serialized, notification_args=notification_args) def notify_exc_info(self, message, exception): LOG.debug("Making async call to cast error notification") cctxt = self.client.prepare(version=self.version_cap) context = self.context serialized = SerializableNotification.serialize(context, context.notification) cctxt.cast(self.context, "notify_exception", serialized_notification=serialized, message=message, exception=exception) trove-5.0.0/trove/cmd/0000775000567000056710000000000012701410521015707 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/cmd/conductor.py0000775000567000056710000000236312701410316020272 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_service import service as openstack_service from trove.cmd.common import with_initialize @with_initialize def main(conf): from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version topic = conf.conductor_queue server = rpc_service.RpcService( manager=conf.conductor_manager, topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION) workers = conf.trove_conductor_workers or processutils.get_worker_count() launcher = openstack_service.launch(conf, server, workers=workers) launcher.wait() trove-5.0.0/trove/cmd/fakemode.py0000775000567000056710000000425212701410316020044 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_config import cfg as openstack_cfg from trove.cmd.common import with_initialize opts = [ openstack_cfg.BoolOpt('fork', short='f', default=False, dest='fork'), openstack_cfg.StrOpt('pid-file', default='.pid'), openstack_cfg.StrOpt('override-logfile', default=None), ] def setup_logging(conf): if conf.override_logfile: conf.use_stderr = False conf.log_file = conf.override_logfile @with_initialize(extra_opts=opts, pre_logging=setup_logging) def main(conf): if conf.fork: pid = os.fork() if pid == 0: start_server(conf) else: print("Starting server:%s" % pid) pid_file = CONF.pid_file with open(pid_file, 'w') as f: f.write(str(pid)) else: start_server(conf) def start_fake_taskmanager(conf): topic = conf.taskmanager_queue from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version taskman_service = rpc_service.RpcService( topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION, manager='trove.taskmanager.manager.Manager') taskman_service.start() def start_server(conf): from trove.common import wsgi conf_file = conf.find_file(conf.api_paste_config) workers = conf.trove_api_workers or processutils.get_worker_count() launcher = wsgi.launch('trove', conf.bind_port or 8779, conf_file, workers=workers) start_fake_taskmanager(conf) launcher.wait() trove-5.0.0/trove/cmd/taskmanager.py0000775000567000056710000000257212701410316020571 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg as openstack_cfg from oslo_service import service as openstack_service from trove.cmd.common import with_initialize extra_opts = [openstack_cfg.StrOpt('taskmanager_manager')] def startup(conf, topic): from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version server = rpc_service.RpcService( manager=conf.taskmanager_manager, topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION) launcher = openstack_service.launch(conf, server) launcher.wait() @with_initialize(extra_opts=extra_opts) def main(conf): startup(conf, conf.taskmanager_queue) @with_initialize(extra_opts=extra_opts) def mgmt_main(conf): startup(conf, "mgmt-taskmanager") trove-5.0.0/trove/cmd/manage.py0000775000567000056710000002243712701410316017526 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext import inspect import sys gettext.install('trove', unicode=1) from oslo_log import log as logging from oslo_log.versionutils import deprecated from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.configuration import models as config_models from trove.datastore import models as datastore_models from trove.db import get_db_api CONF = cfg.CONF class Commands(object): def __init__(self): self.db_api = get_db_api() def db_sync(self, repo_path=None): self.db_api.db_sync(CONF, repo_path=repo_path) def db_upgrade(self, version=None, repo_path=None): self.db_api.db_upgrade(CONF, version, repo_path=repo_path) @deprecated(as_of=deprecated.MITAKA) def db_downgrade(self, version, repo_path=None): self.db_api.db_downgrade(CONF, version, repo_path=repo_path) def execute(self): exec_method = getattr(self, CONF.action.name) args = inspect.getargspec(exec_method) args.args.remove('self') kwargs = {} for arg in args.args: kwargs[arg] = getattr(CONF.action, arg) exec_method(**kwargs) def datastore_update(self, datastore_name, default_version): try: datastore_models.update_datastore(datastore_name, default_version) print("Datastore '%s' updated." % datastore_name) except exception.DatastoreVersionNotFound as e: print(e) def datastore_version_update(self, datastore, version_name, manager, image_id, packages, active): try: datastore_models.update_datastore_version(datastore, version_name, manager, image_id, packages, active) print("Datastore version '%s' updated." % version_name) except exception.DatastoreNotFound as e: print(e) def db_recreate(self, repo_path): """Drops the database and recreates it.""" self.db_api.drop_db(CONF) self.db_sync(repo_path) def db_load_datastore_config_parameters(self, datastore, datastore_version, config_file_location): print("Loading config parameters for datastore (%s) version (%s)" % (datastore, datastore_version)) config_models.load_datastore_configuration_parameters( datastore, datastore_version, config_file_location) def datastore_version_flavor_add(self, datastore_name, datastore_version_name, flavor_ids): """Adds flavors for a given datastore version id.""" try: dsmetadata = datastore_models.DatastoreVersionMetadata dsmetadata.add_datastore_version_flavor_association( datastore_name, datastore_version_name, flavor_ids.split(",")) print("Added flavors '%s' to the '%s' '%s'." % (flavor_ids, datastore_name, datastore_version_name)) except exception.DatastoreVersionNotFound as e: print(e) def datastore_version_flavor_delete(self, datastore_name, datastore_version_name, flavor_id): """Deletes a flavor's association with a given datastore.""" try: dsmetadata = datastore_models.DatastoreVersionMetadata dsmetadata.delete_datastore_version_flavor_association( datastore_name, datastore_version_name, flavor_id) print("Deleted flavor '%s' from '%s' '%s'." % (flavor_id, datastore_name, datastore_version_name)) except exception.DatastoreVersionNotFound as e: print(e) def params_of(self, command_name): if Commands.has(command_name): return utils.MethodInspector(getattr(self, command_name)) def main(): def actions(subparser): repo_path_help = 'SQLAlchemy Migrate repository path.' parser = subparser.add_parser( 'db_sync', description='Populate the database structure') parser.add_argument('--repo_path', help=repo_path_help) parser = subparser.add_parser( 'db_upgrade', description='Upgrade the database to the ' 'specified version.') parser.add_argument( '--version', help='Target version. Defaults to the ' 'latest version.') parser.add_argument('--repo_path', help=repo_path_help) parser = subparser.add_parser( 'db_downgrade', description='Downgrade the database to the ' 'specified version.') parser.add_argument('version', help='Target version.') parser.add_argument('--repo_path', help=repo_path_help) parser = subparser.add_parser( 'datastore_update', description='Add or update a datastore. ' 'If the datastore already exists, the default version will be ' 'updated.') parser.add_argument( 'datastore_name', help='Name of the datastore.') parser.add_argument( 'default_version', help='Name or ID of an existing datastore ' 'version to set as the default. When adding a new datastore, use ' 'an empty string.') parser = subparser.add_parser( 'datastore_version_update', description='Add or update a ' 'datastore version. If the datastore version already exists, all ' 'values except the datastore name and version will be updated.') parser.add_argument('datastore', help='Name of the datastore.') parser.add_argument( 'version_name', help='Name of the datastore version.') parser.add_argument( 'manager', help='Name of the manager that will administer the ' 'datastore version.') parser.add_argument( 'image_id', help='ID of the image used to create an instance of ' 'the datastore version.') parser.add_argument( 'packages', help='Packages required by the datastore version that ' 'are installed on the guest image.') parser.add_argument( 'active', help='Whether the datastore version is active or not. ' 'Accepted values are 0 and 1.') parser = subparser.add_parser( 'db_recreate', description='Drop the database and recreate it.') parser.add_argument('--repo_path', help=repo_path_help) parser = subparser.add_parser( 'db_load_datastore_config_parameters', description='Loads configuration group parameter validation rules ' 'for a datastore version into the database.') parser.add_argument( 'datastore', help='Name of the datastore.') parser.add_argument( 'datastore_version', help='Name of the datastore version.') parser.add_argument( 'config_file_location', help='Fully qualified file path to the configuration group ' 'parameter validation rules.') parser = subparser.add_parser( 'datastore_version_flavor_add', help='Adds flavor association to ' 'a given datastore and datastore version.') parser.add_argument('datastore_name', help='Name of the datastore.') parser.add_argument('datastore_version_name', help='Name of the ' 'datastore version.') parser.add_argument('flavor_ids', help='Comma separated list of ' 'flavor ids.') parser = subparser.add_parser( 'datastore_version_flavor_delete', help='Deletes a flavor ' 'associated with a given datastore and datastore version.') parser.add_argument('datastore_name', help='Name of the datastore.') parser.add_argument('datastore_version_name', help='Name of the ' 'datastore version.') parser.add_argument('flavor_id', help='The flavor to be deleted for ' 'a given datastore and datastore version.') cfg.custom_parser('action', actions) cfg.parse_args(sys.argv) try: logging.setup(CONF, None) Commands().execute() sys.exit(0) except TypeError as e: print(_("Possible wrong number of arguments supplied %s.") % e) sys.exit(2) except Exception: print(_("Command failed, please check log for more info.")) raise trove-5.0.0/trove/cmd/__init__.py0000664000567000056710000000202312701410316020017 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file implements eventlet monkey patching according to the OpenStack # guidelines and best practices found at (note the multi-line URL) # http://specs.openstack.org/openstack/ # openstack-specs/specs/eventlet-best-practices.html # # It is not safe to leave monkey patching till later. import os if not os.environ.get('NO_EVENTLET_MONKEYPATCH'): import eventlet eventlet.monkey_patch(all=True) trove-5.0.0/trove/cmd/common.py0000664000567000056710000000404312701410316017554 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def initialize(extra_opts=None, pre_logging=None): # Initialize localization support (the underscore character). import gettext gettext.install('trove', unicode=1) # Import only the modules necessary to initialize logging and determine if # debug_utils are enabled. import sys from oslo_log import log as logging from trove.common import cfg from trove.common import debug_utils conf = cfg.CONF if extra_opts: conf.register_cli_opts(extra_opts) cfg.parse_args(sys.argv) if pre_logging: pre_logging(conf) logging.setup(conf, None) debug_utils.setup() # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(conf) # Initialize Trove database. from trove.db import get_db_api get_db_api().configure_db(conf) return conf # May be used by other scripts def with_initialize(main_function=None, **kwargs): """ Decorates a script main function to make sure that dependency imports and initialization happens correctly. """ def apply(main_function): def run(): conf = initialize(**kwargs) return main_function(conf) return run if main_function: return apply(main_function) else: return apply trove-5.0.0/trove/cmd/guest.py0000775000567000056710000000401612701410316017416 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext gettext.install('trove', unicode=1) import sys from oslo_config import cfg as openstack_cfg from oslo_log import log as logging from oslo_service import service as openstack_service from trove.common import cfg from trove.common import debug_utils CONF = cfg.CONF # The guest_id opt definition must match the one in common/cfg.py CONF.register_opts([openstack_cfg.StrOpt('guest_id', default=None, help="ID of the Guest Instance.")]) def main(): cfg.parse_args(sys.argv) logging.setup(CONF, None) debug_utils.setup() from trove.guestagent import dbaas manager = dbaas.datastore_registry().get(CONF.datastore_manager) if not manager: msg = ("Manager class not registered for datastore manager %s" % CONF.datastore_manager) raise RuntimeError(msg) # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(CONF) from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version server = rpc_service.RpcService( manager=manager, host=CONF.guest_id, rpc_api_version=rpc_version.RPC_API_VERSION) launcher = openstack_service.launch(CONF, server) launcher.wait() trove-5.0.0/trove/cmd/api.py0000775000567000056710000000227312701410316017043 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from trove.cmd.common import with_initialize from trove.common import profile @with_initialize def main(CONF): from trove.common import cfg from trove.common import wsgi cfg.set_api_config_defaults() profile.setup_profiler('api', CONF.host) conf_file = CONF.find_file(CONF.api_paste_config) workers = CONF.trove_api_workers or processutils.get_worker_count() launcher = wsgi.launch('trove', CONF.bind_port, conf_file, host=CONF.bind_host, workers=workers) launcher.wait() trove-5.0.0/trove/templates/0000775000567000056710000000000012701410521017142 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/percona/0000775000567000056710000000000012701410521020571 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/percona/replica.config.template0000664000567000056710000000041312701410316025211 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log relay_log_info_repository = TABLE relay_log_recovery = 1 relay_log_purge = 1 enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON read_only = true trove-5.0.0/trove/templates/percona/override.config.template0000664000567000056710000000033712701410316025416 0ustar jenkinsjenkins00000000000000[mysqld] {% for key, value in overrides.iteritems() -%} {%- if value == True -%} {{key}} = 1 {%- elif value == False -%} {{key}} = 0 {%- elif value == "" -%} {{key}} {%- else -%} {{key}}={{value}} {%- endif %} {% endfor %} trove-5.0.0/trove/templates/percona/config.template0000664000567000056710000000271212701410316023577 0ustar jenkinsjenkins00000000000000[client] port = 3306 [mysqld_safe] nice = 0 [mysqld] user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data ####tmpdir = /tmp tmpdir = /var/tmp pid_file = /var/run/mysqld/mysqld.pid skip-external-locking = 1 key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K thread_stack = 192K thread_cache_size = {{ (4 * flavor['ram']/512)|int }} myisam-recover = BACKUP query_cache_type = 1 query_cache_limit = 1M query_cache_size = {{ (8 * flavor['ram']/512)|int }}M innodb_data_file_path = ibdata1:10M:autoextend innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M innodb_file_per_table = 1 innodb_log_files_in_group = 2 innodb_log_file_size=50M innodb_log_buffer_size=25M connect_timeout = 15 wait_timeout = 120 join_buffer_size = 1M read_buffer_size = 512K read_rnd_buffer_size = 512K sort_buffer_size = 1M tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M table_open_cache = {{ (256 * flavor['ram']/512)|int }} table_definition_cache = {{ (256 * flavor['ram']/512)|int }} open_files_limit = {{ (512 * flavor['ram']/512)|int }} max_user_connections = {{ (100 * flavor['ram']/512)|int }} max_connections = {{ (100 * flavor['ram']/512)|int }} default_storage_engine = innodb local-infile = 0 server_id = {{server_id}} [mysqldump] quick = 1 quote-names = 1 max_allowed_packet = 16M [isamchk] key_buffer = 16M !includedir /etc/mysql/conf.d/ trove-5.0.0/trove/templates/percona/5.5/0000775000567000056710000000000012701410521021100 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/percona/5.5/replica.config.template0000664000567000056710000000017212701410316025522 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log read_only = true trove-5.0.0/trove/templates/percona/5.5/replica_source.config.template0000664000567000056710000000006512701410316027103 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log trove-5.0.0/trove/templates/percona/validation-rules.json0000664000567000056710000001333012701410316024750 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "name": "innodb_file_per_table", "restart_required": true, "max": 1, "min": 0, "type": "integer" }, { "name": "autocommit", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "local_infile", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "key_buffer_size", "restart_required": false, "max": 4294967296, "min": 0, "type": "integer" }, { "name": "connect_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "join_buffer_size", "restart_required": false, "max": 4294967296, "min": 0, "type": "integer" }, { "name": "sort_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 32768, "type": "integer" }, { "name": "innodb_buffer_pool_size", "restart_required": true, "max": 68719476736, "min": 0, "type": "integer" }, { "name": "innodb_flush_log_at_trx_commit", "restart_required": false, "max": 2, "min": 0, "type": "integer" }, { "name": "innodb_log_buffer_size", "restart_required": true, "max": 4294967296, "min": 1048576, "type": "integer" }, { "name": "innodb_open_files", "restart_required": true, "max": 4294967296, "min": 10, "type": "integer" }, { "name": "innodb_thread_concurrency", "restart_required": false, "max": 1000, "min": 0, "type": "integer" }, { "name": "sync_binlog", "restart_required": false, "max": 18446744073709547520, "min": 0, "type": "integer" }, { "name": "auto_increment_increment", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "auto_increment_offset", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "bulk_insert_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 0, "type": "integer" }, { "name": "expire_logs_days", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "interactive_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "max_allowed_packet", "restart_required": false, "max": 1073741824, "min": 1024, "type": "integer" }, { "name": "max_connect_errors", "restart_required": false, "max": 18446744073709547520, "min": 1, "type": "integer" }, { "name": "max_connections", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "myisam_sort_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 4, "type": "integer" }, { "name": "max_user_connections", "restart_required": false, "max": 100000, "min": 1, "type": "integer" }, { "name": "server_id", "restart_required": true, "max": 100000, "min": 1, "type": "integer" }, { "name": "wait_timeout", "restart_required": false, "max": 31536000, "min": 1, "type": "integer" }, { "name": "character_set_client", "restart_required": false, "type": "string" }, { "name": "character_set_connection", "restart_required": false, "type": "string" }, { "name": "character_set_database", "restart_required": false, "type": "string" }, { "name": "character_set_filesystem", "restart_required": false, "type": "string" }, { "name": "character_set_results", "restart_required": false, "type": "string" }, { "name": "character_set_server", "restart_required": false, "type": "string" }, { "name": "collation_connection", "restart_required": false, "type": "string" }, { "name": "collation_database", "restart_required": false, "type": "string" }, { "name": "collation_server", "restart_required": false, "type": "string" } ] }trove-5.0.0/trove/templates/percona/replica_source.config.template0000664000567000056710000000025712701410316026577 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log binlog_format = MIXED enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON enforce_storage_engine = InnoDB trove-5.0.0/trove/templates/mariadb/0000775000567000056710000000000012701410521020541 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/mariadb/replica.config.template0000664000567000056710000000017312701410316025164 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysqld-relay-bin.log read_only = true trove-5.0.0/trove/templates/mariadb/override.config.template0000664000567000056710000000033712701410316025366 0ustar jenkinsjenkins00000000000000[mysqld] {% for key, value in overrides.iteritems() -%} {%- if value == True -%} {{key}} = 1 {%- elif value == False -%} {{key}} = 0 {%- elif value == "" -%} {{key}} {%- else -%} {{key}}={{value}} {%- endif %} {% endfor %} trove-5.0.0/trove/templates/mariadb/cluster.config.template0000664000567000056710000000111612701410316025224 0ustar jenkinsjenkins00000000000000[mysqld] bind-address=0.0.0.0 default-storage-engine=innodb [galera] binlog_format=ROW innodb_autoinc_lock_mode=2 innodb_flush_log_at_trx_commit=0 innodb_doublewrite=1 query_cache_size=0 wsrep_on=ON wsrep_slave_threads=8 wsrep_provider=/usr/lib/libgalera_smm.so wsrep_provider_options="gcache.size={{ (128 * flavor['ram']/512)|int }}M; gcache.page_size=1G" wsrep_sst_method=rsync wsrep_sst_auth="{{ replication_user_pass }}" wsrep_cluster_address="gcomm://{{ cluster_ips }}" wsrep_cluster_name={{ cluster_name }} wsrep_node_name={{ instance_name }} wsrep_node_address={{ instance_ip }} trove-5.0.0/trove/templates/mariadb/config.template0000664000567000056710000000300012701410316023536 0ustar jenkinsjenkins00000000000000[client] port = 3306 [mysqld_safe] nice = 0 [mysqld] ignore_builtin_innodb plugin_load=innodb=ha_innodb.so user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data ####tmpdir = /tmp tmpdir = /var/tmp pid_file = /var/run/mysqld/mysqld.pid skip-external-locking = 1 key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K thread_stack = 192K thread_cache_size = {{ (4 * flavor['ram']/512)|int }} myisam-recover = BACKUP query_cache_type = 1 query_cache_limit = 1M query_cache_size = {{ (8 * flavor['ram']/512)|int }}M innodb_data_file_path = ibdata1:10M:autoextend innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M innodb_file_per_table = 1 innodb_log_files_in_group = 2 innodb_log_file_size=50M innodb_log_buffer_size=25M connect_timeout = 15 wait_timeout = 120 join_buffer_size = 1M read_buffer_size = 512K read_rnd_buffer_size = 512K sort_buffer_size = 1M tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M table_open_cache = {{ (256 * flavor['ram']/512)|int }} table_definition_cache = {{ (256 * flavor['ram']/512)|int }} open_files_limit = {{ (512 * flavor['ram']/512)|int }} max_user_connections = {{ (100 * flavor['ram']/512)|int }} max_connections = {{ (100 * flavor['ram']/512)|int }} default_storage_engine = innodb local-infile = 0 server_id = {{server_id}} [mysqldump] quick = 1 quote-names = 1 max_allowed_packet = 16M [isamchk] key_buffer = 16M !includedir /etc/mysql/conf.d/ trove-5.0.0/trove/templates/mariadb/replica_source.config.template0000664000567000056710000000006712701410316026546 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mariadb-bin.log trove-5.0.0/trove/templates/default.heat.template0000664000567000056710000000470712701410316023255 0ustar jenkinsjenkins00000000000000HeatTemplateFormatVersion: '2012-12-12' Description: Instance creation template for {{datastore_manager}} Parameters: Flavor: Type: String VolumeSize: Type: Number Default : '1' InstanceId: Type: String ImageId: Type: String DatastoreManager: Type: String AvailabilityZone: Type: String Default: nova TenantId: Type: String Resources: {% for port in ports %} {{ port.name }}: Type: OS::Neutron::Port Properties: network_id: "{{ port.net_id }}" security_groups: [{Ref: DatastoreSG}] {% if port.fixed_ip %} fixed_ips: [{"ip_address": "{{ port.fixed_ip }}"}] {% endif %} {% endfor %} BaseInstance: Type: AWS::EC2::Instance Metadata: AWS::CloudFormation::Init: config: files: {% for file, content in files.iteritems() %} {{ file }}: content: | {{ content | indent(16) }} mode: '000644' owner: root group: root {% endfor %} Properties: ImageId: {Ref: ImageId} InstanceType: {Ref: Flavor} AvailabilityZone: {Ref: AvailabilityZone} SecurityGroups : [{Ref: DatastoreSG}] UserData: Fn::Base64: Fn::Join: - '' - ["#!/bin/bash -v\n", "/opt/aws/bin/cfn-init\n", "sudo service trove-guest start\n"] {% if volume_support %} DataVolume: Type: AWS::EC2::Volume Properties: Size: {Ref: VolumeSize} AvailabilityZone: {Ref: AvailabilityZone} Tags: - {Key: Usage, Value: Test} MountPoint: Type: AWS::EC2::VolumeAttachment Properties: InstanceId: {Ref: BaseInstance} VolumeId: {Ref: DataVolume} Device: /dev/vdb {% endif %} DatastoreSG: Type: AWS::EC2::SecurityGroup Properties: GroupDescription: Default Security group for {{datastore_manager}} {% if tcp_rules or udp_rules %} SecurityGroupIngress: {% for rule in tcp_rules %} - IpProtocol: "tcp" FromPort: "{{rule.from_}}" ToPort: "{{rule.to_}}" CidrIp: "{{rule.cidr}}" {% endfor %} {% for rule in udp_rules %} - IpProtocol: "udp" FromPort: "{{rule.from_}}" ToPort: "{{rule.to_}}" CidrIp: "{{rule.cidr}}" {% endfor %} {% endif %} DatabaseIPAddress: Type: AWS::EC2::EIP DatabaseIPAssoc : Type: AWS::EC2::EIPAssociation Properties: InstanceId: {Ref: BaseInstance} EIP: {Ref: DatabaseIPAddress} trove-5.0.0/trove/templates/mongodb/0000775000567000056710000000000012701410521020567 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/mongodb/override.config.template0000664000567000056710000000011612701410316025407 0ustar jenkinsjenkins00000000000000{% for key, value in overrides.iteritems() -%} {{key}}: {{value}} {% endfor %}trove-5.0.0/trove/templates/mongodb/config.template0000664000567000056710000000011712701410316023572 0ustar jenkinsjenkins00000000000000# mongodb.conf storage.mmapv1.smallFiles: false storage.journal.enabled: true trove-5.0.0/trove/templates/mongodb/validation-rules.json0000664000567000056710000002155612701410316024757 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "name": "systemLog.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.accessControl.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.command.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.control.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.geo.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.index.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.network.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.query.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.replication.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.sharding.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.storage.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.storage.journal.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.write.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.quiet", "restart_required": true, "type": "boolean" }, { "name": "systemLog.traceAllExceptions", "restart_required": true, "type": "boolean" }, { "name": "systemLog.logAppend", "restart_required": true, "type": "boolean" }, { "name": "systemLog.logRotate", "restart_required": true, "type": "string" }, { "name": "systemLog.timeStampFormat", "restart_required": true, "type": "string" }, { "name": "net.maxIncomingConnections", "restart_required": true, "min": 0, "type": "integer" }, { "name": "net.wireObjectCheck", "restart_required": true, "type": "boolean" }, { "name": "net.ipv6", "restart_required": true, "type": "boolean" }, { "name": "net.http.enabled", "restart_required": true, "type": "boolean" }, { "name": "net.http.JSONPEnabled", "restart_required": true, "type": "boolean" }, { "name": "net.http.RESTInterfaceEnabled", "restart_required": true, "type": "boolean" }, { "name": "security.authorization", "restart_required": true, "type": "string" }, { "name": "security.sasl.hostName", "restart_required": true, "type": "string" }, { "name": "security.sasl.serviceName", "restart_required": true, "type": "string" }, { "name": "security.sasl.saslauthdSocketPath", "restart_required": true, "type": "string" }, { "name": "security.javascriptEnabled", "restart_required": true, "type": "boolean" }, { "name": "operationProfiling.slowOpThresholdMs", "restart_required": true, "min": 0, "type": "integer" }, { "name": "operationProfiling.mode", "restart_required": true, "type": "string" }, { "name": "storage.indexBuildRetry", "restart_required": true, "type": "boolean" }, { "name": "storage.journal.enabled", "restart_required": true, "type": "boolean" }, { "name": "storage.directoryPerDB", "restart_required": true, "type": "boolean" }, { "name": "storage.syncPeriodSecs", "restart_required": true, "min": 0, "type": "integer" }, { "name": "storage.engine", "restart_required": true, "type": "string" }, { "name": "storage.mmapv1.nsSize", "restart_required": true, "min": 0, "max": 2047, "type": "integer" }, { "name": "storage.mmapv1.quota.enforced", "restart_required": true, "type": "boolean" }, { "name": "storage.mmapv1.quota.maxFilesPerDB", "restart_required": true, "min": 0, "type": "integer" }, { "name": "storage.mmapv1.smallFiles", "restart_required": true, "type": "boolean" }, { "name": "storage.mmapv1.journal.debugFlags", "restart_required": true, "type": "integer" }, { "name": "storage.mmapv1.journal.commitIntervalMs", "restart_required": true, "min": 2, "max": 300, "type": "integer" }, { "name": "storage.wiredTiger.engineConfig.cacheSizeGB", "restart_required": true, "min": 0, "type": "integer" }, { "name": "storage.wiredTiger.engineConfig.statisticsLogDelaySecs", "restart_required": true, "min": 0, "type": "integer" }, { "name": "storage.wiredTiger.engineConfig.journalCompressor", "restart_required": true, "type": "string" }, { "name": "storage.wiredTiger.collectionConfig.blockCompressor", "restart_required": true, "type": "string" }, { "name": "storage.wiredTiger.indexConfig.prefixCompression", "restart_required": true, "type": "boolean" }, { "name": "replication.oplogSizeMB", "restart_required": true, "min": 0, "type": "integer" }, { "name": "replication.secondaryIndexPrefetch", "restart_required": true, "type": "string" }, { "name": "sharding.clusterRole", "restart_required": true, "type": "string" }, { "name": "auditLog.format", "restart_required": true, "type": "string" }, { "name": "auditLog.filter", "restart_required": true, "type": "string" }, { "name": "snmp.subagent", "restart_required": true, "type": "boolean" }, { "name": "snmp.master", "restart_required": true, "type": "boolean" }, { "name": "replication.localPingThresholdMs", "restart_required": true, "min": 0, "type": "integer" }, { "name": "sharding.autoSplit", "restart_required": true, "type": "boolean" }, { "name": "sharding.chunkSize", "restart_required": true, "min": 0, "type": "integer" }, { "name": "setParameter", "restart_required": true, "type": "string" } ] } trove-5.0.0/trove/templates/cassandra/0000775000567000056710000000000012701410521021101 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/cassandra/override.config.template0000664000567000056710000000011712701410316025722 0ustar jenkinsjenkins00000000000000{% for key, value in overrides.iteritems() -%} {{key}}: {{value}} {% endfor %} trove-5.0.0/trove/templates/cassandra/config.template0000664000567000056710000011415412701410316024113 0ustar jenkinsjenkins00000000000000# Cassandra storage config YAML # NOTE: # See http://wiki.apache.org/cassandra/StorageConfiguration for # full explanations of configuration directives # /NOTE # The name of the cluster. This is mainly used to prevent machines in # one logical cluster from joining another. cluster_name: 'Test Cluster' # This defines the number of tokens randomly assigned to this node on the ring # The more tokens, relative to other nodes, the larger the proportion of data # that this node will store. You probably want all nodes to have the same number # of tokens assuming they have equal hardware capability. # # If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, # and will use the initial_token as described below. # # Specifying initial_token will override this setting on the node's initial start, # on subsequent starts, this setting will apply even if initial token is set. # # If you already have a cluster with 1 token per node, and wish to migrate to # multiple tokens per node, see http://wiki.apache.org/cassandra/Operations num_tokens: 256 # initial_token allows you to specify tokens manually. While you can use # it with # vnodes (num_tokens > 1, above) -- in which case you should provide a # comma-separated list -- it's primarily used when adding nodes # to legacy clusters # that do not have vnodes enabled. # initial_token: # See http://wiki.apache.org/cassandra/HintedHandoff # May either be "true" or "false" to enable globally, or contain a list # of data centers to enable per-datacenter. # hinted_handoff_enabled: DC1,DC2 hinted_handoff_enabled: true # this defines the maximum amount of time a dead host will have hints # generated. After it has been dead this long, new hints for it will not be # created until it has been seen alive and gone down again. max_hint_window_in_ms: 10800000 # 3 hours # Maximum throttle in KBs per second, per delivery thread. This will be # reduced proportionally to the number of nodes in the cluster. (If there # are two nodes in the cluster, each delivery thread will use the maximum # rate; if there are three, each will throttle to half of the maximum, # since we expect two nodes to be delivering hints simultaneously.) hinted_handoff_throttle_in_kb: 1024 # Number of threads with which to deliver hints; # Consider increasing this number when you have multi-dc deployments, since # cross-dc handoff tends to be slower max_hints_delivery_threads: 2 # Maximum throttle in KBs per second, total. This will be # reduced proportionally to the number of nodes in the cluster. batchlog_replay_throttle_in_kb: 1024 # Authentication backend, implementing IAuthenticator; used to identify users # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, # PasswordAuthenticator}. # # - AllowAllAuthenticator performs no checks - set it to disable authentication. # - PasswordAuthenticator relies on username/password pairs to authenticate # users. It keeps usernames and hashed passwords in system_auth.credentials table. # Please increase system_auth keyspace replication factor if you use this authenticator. # # Authenticator is required to support Trove user functions. authenticator: org.apache.cassandra.auth.PasswordAuthenticator # Authorization backend, implementing IAuthorizer; used to limit access/provide permissions # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, # CassandraAuthorizer}. # # - AllowAllAuthorizer allows any action to any user - set it to disable authorization. # - CassandraAuthorizer stores permissions in system_auth.permissions table. Please # increase system_auth keyspace replication factor if you use this authorizer. # # Authorizer is required to support Trove user functions. authorizer: org.apache.cassandra.auth.CassandraAuthorizer # Validity period for permissions cache (fetching permissions can be an # expensive operation depending on the authorizer, CassandraAuthorizer is # one example). Defaults to 2000, set to 0 to disable. # Will be disabled automatically for AllowAllAuthorizer. permissions_validity_in_ms: 2000 # Refresh interval for permissions cache (if enabled). # After this interval, cache entries become eligible for refresh. Upon next # access, an async reload is scheduled and the old value returned until it # completes. If permissions_validity_in_ms is non-zero, then this must be # also. # Defaults to the same value as permissions_validity_in_ms. # permissions_update_interval_in_ms: 1000 # The partitioner is responsible for distributing groups of rows (by # partition key) across nodes in the cluster. You should leave this # alone for new clusters. The partitioner can NOT be changed without # reloading all data, so when upgrading you should set this to the # same partitioner you were already using. # # Besides Murmur3Partitioner, partitioners included for backwards # compatibility include RandomPartitioner, ByteOrderedPartitioner, and # OrderPreservingPartitioner. # partitioner: org.apache.cassandra.dht.Murmur3Partitioner # Directories where Cassandra should store data on disk. Cassandra # will spread data evenly across them, subject to the granularity of # the configured compaction strategy. # If not set, the default directory is $CASSANDRA_HOME/data/data. data_file_directories: - /var/lib/cassandra/data # commit log. when running on magnetic HDD, this should be a # separate spindle than the data directories. # If not set, the default directory is $CASSANDRA_HOME/data/commitlog. commitlog_directory: /var/lib/cassandra/commitlog # policy for data disk failures: # die: shut down gossip and client transports and kill the JVM for any fs errors or # single-sstable errors, so the node can be replaced. # stop_paranoid: shut down gossip and client transports even for single-sstable errors, # kill the JVM for errors during startup. # stop: shut down gossip and client transports, leaving the node effectively dead, but # can still be inspected via JMX, kill the JVM for errors during startup. # best_effort: stop using the failed disk and respond to requests based on # remaining available sstables. This means you WILL see obsolete # data at CL.ONE! # ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra disk_failure_policy: stop # policy for commit disk failures: # die: shut down gossip and Thrift and kill the JVM, so the node can be replaced. # stop: shut down gossip and Thrift, leaving the node effectively dead, but # can still be inspected via JMX. # stop_commit: shutdown the commit log, letting writes collect but # continuing to service reads, as in pre-2.0.5 Cassandra # ignore: ignore fatal errors and let the batches fail commit_failure_policy: stop # Maximum size of the key cache in memory. # # Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the # minimum, sometimes more. The key cache is fairly tiny for the amount of # time it saves, so it's worthwhile to use it at large numbers. # The row cache saves even more time, but must contain the entire row, # so it is extremely space-intensive. It's best to only use the # row cache if you have hot rows or static rows. # # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. key_cache_size_in_mb: # Duration in seconds after which Cassandra should # save the key cache. Caches are saved to saved_caches_directory as # specified in this configuration file. # # Saved caches greatly improve cold-start speeds, and is relatively cheap in # terms of I/O for the key cache. Row cache saving is much more expensive and # has limited use. # # Default is 14400 or 4 hours. key_cache_save_period: 14400 # Number of keys from the key cache to save # Disabled by default, meaning all keys are going to be saved # key_cache_keys_to_save: 100 # Maximum size of the row cache in memory. # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is 0, to disable row caching. row_cache_size_in_mb: 0 # Duration in seconds after which Cassandra should # save the row cache. Caches are saved to saved_caches_directory as specified # in this configuration file. # # Saved caches greatly improve cold-start speeds, and is relatively cheap in # terms of I/O for the key cache. Row cache saving is much more expensive and # has limited use. # # Default is 0 to disable saving the row cache. row_cache_save_period: 0 # Number of keys from the row cache to save # Disabled by default, meaning all keys are going to be saved # row_cache_keys_to_save: 100 # Maximum size of the counter cache in memory. # # Counter cache helps to reduce counter locks' contention for hot counter cells. # In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before # write entirely. With RF > 1 a counter cache hit will still help to reduce the duration # of the lock hold, helping with hot counter cell updates, but will not allow skipping # the read entirely. Only the local (clock, count) tuple of a counter cell is kept # in memory, not the whole counter, so it's relatively cheap. # # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. # NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. counter_cache_size_in_mb: # Duration in seconds after which Cassandra should # save the counter cache (keys only). Caches are saved to saved_caches_directory as # specified in this configuration file. # # Default is 7200 or 2 hours. counter_cache_save_period: 7200 # Number of keys from the counter cache to save # Disabled by default, meaning all keys are going to be saved # counter_cache_keys_to_save: 100 # The off-heap memory allocator. Affects storage engine metadata as # well as caches. Experiments show that JEMAlloc saves some memory # than the native GCC allocator (i.e., JEMalloc is more # fragmentation-resistant). # # Supported values are: NativeAllocator, JEMallocAllocator # # If you intend to use JEMallocAllocator you have to install JEMalloc as library and # modify cassandra-env.sh as directed in the file. # # Defaults to NativeAllocator # memory_allocator: NativeAllocator # saved caches # If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. saved_caches_directory: /var/lib/cassandra/saved_caches # commitlog_sync may be either "periodic" or "batch." # # When in batch mode, Cassandra won't ack writes until the commit log # has been fsynced to disk. It will wait # commitlog_sync_batch_window_in_ms milliseconds between fsyncs. # This window should be kept short because the writer threads will # be unable to do extra work while waiting. (You may need to increase # concurrent_writes for the same reason.) # # commitlog_sync: batch # commitlog_sync_batch_window_in_ms: 2 # # the other option is "periodic" where writes may be acked immediately # and the CommitLog is simply synced every commitlog_sync_period_in_ms # milliseconds. commitlog_sync: periodic commitlog_sync_period_in_ms: 10000 # The size of the individual commitlog file segments. A commitlog # segment may be archived, deleted, or recycled once all the data # in it (potentially from each columnfamily in the system) has been # flushed to sstables. # # The default size is 32, which is almost always fine, but if you are # archiving commitlog segments (see commitlog_archiving.properties), # then you probably want a finer granularity of archiving; 8 or 16 MB # is reasonable. commitlog_segment_size_in_mb: 32 # Reuse commit log files when possible. The default is false, and this # feature will be removed entirely in future versions of Cassandra. #commitlog_segment_recycling: false # any class that implements the SeedProvider interface and has a # constructor that takes a Map of parameters will do. seed_provider: # Addresses of hosts that are deemed contact points. # Cassandra nodes use this list of hosts to find each other and learn # the topology of the ring. You must change this if you are running # multiple nodes! - class_name: org.apache.cassandra.locator.SimpleSeedProvider parameters: # seeds is actually a comma-delimited list of addresses. # Ex: ",," - seeds: "127.0.0.1" # For workloads with more data than can fit in memory, Cassandra's # bottleneck will be reads that need to fetch data from # disk. "concurrent_reads" should be set to (16 * number_of_drives) in # order to allow the operations to enqueue low enough in the stack # that the OS and drives can reorder them. Same applies to # "concurrent_counter_writes", since counter writes read the current # values before incrementing and writing them back. # # On the other hand, since writes are almost never IO bound, the ideal # number of "concurrent_writes" is dependent on the number of cores in # your system; (8 * number_of_cores) is a good rule of thumb. concurrent_reads: 32 concurrent_writes: 32 concurrent_counter_writes: 32 # Total memory to use for sstable-reading buffers. Defaults to # the smaller of 1/4 of heap or 512MB. # file_cache_size_in_mb: 512 # Total permitted memory to use for memtables. Cassandra will stop # accepting writes when the limit is exceeded until a flush completes, # and will trigger a flush based on memtable_cleanup_threshold # If omitted, Cassandra will set both to 1/4 the size of the heap. # memtable_heap_space_in_mb: 2048 # memtable_offheap_space_in_mb: 2048 # Ratio of occupied non-flushing memtable size to total permitted size # that will trigger a flush of the largest memtable. Lager mct will # mean larger flushes and hence less compaction, but also less concurrent # flush activity which can make it difficult to keep your disks fed # under heavy write load. # # memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) # memtable_cleanup_threshold: 0.11 # Specify the way Cassandra allocates and manages memtable memory. # Options are: # heap_buffers: on heap nio buffers # offheap_buffers: off heap (direct) nio buffers # offheap_objects: native memory, eliminating nio buffer heap overhead memtable_allocation_type: heap_buffers # Total space to use for commitlogs. Since commitlog segments are # mmapped, and hence use up address space, the default size is 32 # on 32-bit JVMs, and 8192 on 64-bit JVMs. # # If space gets above this value (it will round up to the next nearest # segment multiple), Cassandra will flush every dirty CF in the oldest # segment and remove it. So a small total commitlog space will tend # to cause more flush activity on less-active columnfamilies. # commitlog_total_space_in_mb: 8192 # This sets the amount of memtable flush writer threads. These will # be blocked by disk io, and each one will hold a memtable in memory # while blocked. # # memtable_flush_writers defaults to the smaller of (number of disks, # number of cores), with a minimum of 2 and a maximum of 8. # # If your data directories are backed by SSD, you should increase this # to the number of cores. #memtable_flush_writers: 8 # A fixed memory pool size in MB for for SSTable index summaries. If left # empty, this will default to 5% of the heap size. If the memory usage of # all index summaries exceeds this limit, SSTables with low read rates will # shrink their index summaries in order to meet this limit. However, this # is a best-effort process. In extreme conditions Cassandra may need to use # more than this amount of memory. index_summary_capacity_in_mb: # How frequently index summaries should be resampled. This is done # periodically to redistribute memory from the fixed-size pool to sstables # proportional their recent read rates. Setting to -1 will disable this # process, leaving existing index summaries at their current sampling level. index_summary_resize_interval_in_minutes: 60 # Whether to, when doing sequential writing, fsync() at intervals in # order to force the operating system to flush the dirty # buffers. Enable this to avoid sudden dirty buffer flushing from # impacting read latencies. Almost always a good idea on SSDs; not # necessarily on platters. trickle_fsync: false trickle_fsync_interval_in_kb: 10240 # TCP port, for commands and data # For security reasons, you should not expose this port to the internet. Firewall it if needed. storage_port: 7000 # SSL port, for encrypted communication. Unused unless enabled in # encryption_options # For security reasons, you should not expose this port to the internet. Firewall it if needed. ssl_storage_port: 7001 # Address or interface to bind to and tell other Cassandra nodes to connect to. # You _must_ change this if you want multiple nodes to be able to communicate! # # Set listen_address OR listen_interface, not both. Interfaces must correspond # to a single address, IP aliasing is not supported. # # Leaving it blank leaves it up to InetAddress.getLocalHost(). This # will always do the Right Thing _if_ the node is properly configured # (hostname, name resolution, etc), and the Right Thing is to use the # address associated with the hostname (it might not be). # # Setting listen_address to 0.0.0.0 is always wrong. # # If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address # you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 # address will be used. If true the first ipv6 address will be used. Defaults to false preferring # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. listen_address: 127.0.0.1 # listen_interface: eth0 # listen_interface_prefer_ipv6: false # Address to broadcast to other Cassandra nodes # Leaving this blank will set it to the same value as listen_address # broadcast_address: 1.2.3.4 # Internode authentication backend, implementing IInternodeAuthenticator; # used to allow/disallow connections from peer nodes. # internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator # Whether to start the native transport server. # Please note that the address on which the native transport is bound is the # same as the rpc_address. The port however is different and specified below. # # The Python driver uses the binary protocol and requires this to be enabled. start_native_transport: true # port for the CQL native transport to listen for clients on # For security reasons, you should not expose this port to the internet. Firewall it if needed. native_transport_port: 9042 # The maximum threads for handling requests when the native transport is used. # This is similar to rpc_max_threads though the default differs slightly (and # there is no native_transport_min_threads, idle threads will always be stopped # after 30 seconds). # native_transport_max_threads: 128 # # The maximum size of allowed frame. Frame (requests) larger than this will # be rejected as invalid. The default is 256MB. # native_transport_max_frame_size_in_mb: 256 # The maximum number of concurrent client connections. # The default is -1, which means unlimited. # native_transport_max_concurrent_connections: -1 # The maximum number of concurrent client connections per source ip. # The default is -1, which means unlimited. # native_transport_max_concurrent_connections_per_ip: -1 # Whether to start the thrift rpc server. start_rpc: true # The address or interface to bind the Thrift RPC service and native transport # server to. # # Set rpc_address OR rpc_interface, not both. Interfaces must correspond # to a single address, IP aliasing is not supported. # # Leaving rpc_address blank has the same effect as on listen_address # (i.e. it will be based on the configured hostname of the node). # # Note that unlike listen_address, you can specify 0.0.0.0, but you must also # set broadcast_rpc_address to a value other than 0.0.0.0. # # For security reasons, you should not expose this port to the internet. Firewall it if needed. # # If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address # you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 # address will be used. If true the first ipv6 address will be used. Defaults to false preferring # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. rpc_address: 0.0.0.0 # rpc_interface: eth1 # rpc_interface_prefer_ipv6: false # port for Thrift to listen for clients on rpc_port: 9160 # RPC address to broadcast to drivers and other Cassandra nodes. This cannot # be set to 0.0.0.0. If left blank, this will be set to the value of # rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must # be set. broadcast_rpc_address: 127.0.0.1 # enable or disable keepalive on rpc/native connections rpc_keepalive: true # Cassandra provides two out-of-the-box options for the RPC Server: # # sync -> One thread per thrift connection. For a very large number of clients, memory # will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size # per thread, and that will correspond to your use of virtual memory (but physical memory # may be limited depending on use of stack space). # # hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled # asynchronously using a small number of threads that does not vary with the amount # of thrift clients (and thus scales well to many clients). The rpc requests are still # synchronous (one thread per active request). If hsha is selected then it is essential # that rpc_max_threads is changed from the default value of unlimited. # # The default is sync because on Windows hsha is about 30% slower. On Linux, # sync/hsha performance is about the same, with hsha of course using less memory. # # Alternatively, can provide your own RPC server by providing the fully-qualified class name # of an o.a.c.t.TServerFactory that can create an instance of it. rpc_server_type: sync # Uncomment rpc_min|max_thread to set request pool size limits. # # Regardless of your choice of RPC server (see above), the number of maximum requests in the # RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync # RPC server, it also dictates the number of clients that can be connected at all). # # The default is unlimited and thus provides no protection against clients overwhelming the server. You are # encouraged to set a maximum that makes sense for you in production, but do keep in mind that # rpc_max_threads represents the maximum number of client requests this server may execute concurrently. # # rpc_min_threads: 16 # rpc_max_threads: 2048 # uncomment to set socket buffer sizes on rpc connections # rpc_send_buff_size_in_bytes: # rpc_recv_buff_size_in_bytes: # Uncomment to set socket buffer size for internode communication # Note that when setting this, the buffer size is limited by net.core.wmem_max # and when not setting it it is defined by net.ipv4.tcp_wmem # See: # /proc/sys/net/core/wmem_max # /proc/sys/net/core/rmem_max # /proc/sys/net/ipv4/tcp_wmem # /proc/sys/net/ipv4/tcp_wmem # and: man tcp # internode_send_buff_size_in_bytes: # internode_recv_buff_size_in_bytes: # Frame size for thrift (maximum message length). thrift_framed_transport_size_in_mb: 15 # Set to true to have Cassandra create a hard link to each sstable # flushed or streamed locally in a backups/ subdirectory of the # keyspace data. Removing these links is the operator's # responsibility. incremental_backups: false # Whether or not to take a snapshot before each compaction. Be # careful using this option, since Cassandra won't clean up the # snapshots for you. Mostly useful if you're paranoid when there # is a data format change. # # Trove currently does not provide any support for guestagent-local snapshots. # see comment on 'auto_snapshot' below. snapshot_before_compaction: false # Whether or not a snapshot is taken of the data before keyspace truncation # or dropping of column families. The STRONGLY advised default of true # should be used to provide data safety. If you set this flag to false, you will # lose data on truncation or drop. # # Trove currently does not provide any support for guestagent-local snapshots. # They may be used internally and/or removed (!) by backup implementations. # The operator would be also required to have remote access to the filesystem. auto_snapshot: false # When executing a scan, within or across a partition, we need to keep the # tombstones seen in memory so we can return them to the coordinator, which # will use them to make sure other replicas also know about the deleted rows. # With workloads that generate a lot of tombstones, this can cause performance # problems and even exaust the server heap. # (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) # Adjust the thresholds here if you understand the dangers and want to # scan more tombstones anyway. These thresholds may also be adjusted at runtime # using the StorageService mbean. tombstone_warn_threshold: 1000 tombstone_failure_threshold: 100000 # Granularity of the collation index of rows within a partition. # Increase if your rows are large, or if you have a very large # number of rows per partition. The competing goals are these: # 1) a smaller granularity means more index entries are generated # and looking up rows withing the partition by collation column # is faster # 2) but, Cassandra will keep the collation index in memory for hot # rows (as part of the key cache), so a larger granularity means # you can cache more hot rows column_index_size_in_kb: 64 # Log WARN on any batch size exceeding this value. 5kb per batch by default. # Caution should be taken on increasing the size of this threshold as it can lead to node instability. batch_size_warn_threshold_in_kb: 5 # Number of simultaneous compactions to allow, NOT including # validation "compactions" for anti-entropy repair. Simultaneous # compactions can help preserve read performance in a mixed read/write # workload, by mitigating the tendency of small sstables to accumulate # during a single long running compactions. The default is usually # fine and if you experience problems with compaction running too # slowly or too fast, you should look at # compaction_throughput_mb_per_sec first. # # concurrent_compactors defaults to the smaller of (number of disks, # number of cores), with a minimum of 2 and a maximum of 8. # # If your data directories are backed by SSD, you should increase this # to the number of cores. #concurrent_compactors: 1 # Throttles compaction to the given total throughput across the entire # system. The faster you insert data, the faster you need to compact in # order to keep the sstable count down, but in general, setting this to # 16 to 32 times the rate you are inserting data is more than sufficient. # Setting this to 0 disables throttling. Note that this account for all types # of compaction, including validation compaction. compaction_throughput_mb_per_sec: 16 # Log a warning when compacting partitions larger than this value compaction_large_partition_warning_threshold_mb: 100 # When compacting, the replacement sstable(s) can be opened before they # are completely written, and used in place of the prior sstables for # any range that has been written. This helps to smoothly transfer reads # between the sstables, reducing page cache churn and keeping hot rows hot sstable_preemptive_open_interval_in_mb: 50 # Throttles all outbound streaming file transfers on this node to the # given total throughput in Mbps. This is necessary because Cassandra does # mostly sequential IO when streaming data during bootstrap or repair, which # can lead to saturating the network connection and degrading rpc performance. # When unset, the default is 200 Mbps or 25 MB/s. # stream_throughput_outbound_megabits_per_sec: 200 # Throttles all streaming file transfer between the datacenters, # this setting allows users to throttle inter dc stream throughput in addition # to throttling all network stream traffic as configured with # stream_throughput_outbound_megabits_per_sec # inter_dc_stream_throughput_outbound_megabits_per_sec: # How long the coordinator should wait for read operations to complete read_request_timeout_in_ms: 5000 # How long the coordinator should wait for seq or index scans to complete range_request_timeout_in_ms: 10000 # How long the coordinator should wait for writes to complete write_request_timeout_in_ms: 2000 # How long the coordinator should wait for counter writes to complete counter_write_request_timeout_in_ms: 5000 # How long a coordinator should continue to retry a CAS operation # that contends with other proposals for the same row cas_contention_timeout_in_ms: 1000 # How long the coordinator should wait for truncates to complete # (This can be much longer, because unless auto_snapshot is disabled # we need to flush first so we can snapshot before removing the data.) truncate_request_timeout_in_ms: 60000 # The default timeout for other, miscellaneous operations request_timeout_in_ms: 10000 # Enable operation timeout information exchange between nodes to accurately # measure request timeouts. If disabled, replicas will assume that requests # were forwarded to them instantly by the coordinator, which means that # under overload conditions we will waste that much extra time processing # already-timed-out requests. # # Warning: before enabling this property make sure to ntp is installed # and the times are synchronized between the nodes. cross_node_timeout: false # Enable socket timeout for streaming operation. # When a timeout occurs during streaming, streaming is retried from the start # of the current file. This _can_ involve re-streaming an important amount of # data, so you should avoid setting the value too low. # Default value is 3600000, which means streams timeout after an hour. # streaming_socket_timeout_in_ms: 3600000 # phi value that must be reached for a host to be marked down. # most users should never need to adjust this. # phi_convict_threshold: 8 # endpoint_snitch -- Set this to a class that implements # IEndpointSnitch. The snitch has two functions: # - it teaches Cassandra enough about your network topology to route # requests efficiently # - it allows Cassandra to spread replicas around your cluster to avoid # correlated failures. It does this by grouping machines into # "datacenters" and "racks." Cassandra will do its best not to have # more than one replica on the same "rack" (which may not actually # be a physical location) # # IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, # YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS # ARE PLACED. # # IF THE RACK A REPLICA IS PLACED IN CHANGES AFTER THE REPLICA HAS BEEN # ADDED TO A RING, THE NODE MUST BE DECOMMISSIONED AND REBOOTSTRAPPED. # # Out of the box, Cassandra provides # - SimpleSnitch: # Treats Strategy order as proximity. This can improve cache # locality when disabling read repair. Only appropriate for # single-datacenter deployments. # - GossipingPropertyFileSnitch # This should be your go-to snitch for production use. The rack # and datacenter for the local node are defined in # cassandra-rackdc.properties and propagated to other nodes via # gossip. If cassandra-topology.properties exists, it is used as a # fallback, allowing migration from the PropertyFileSnitch. # - PropertyFileSnitch: # Proximity is determined by rack and data center, which are # explicitly configured in cassandra-topology.properties. # - Ec2Snitch: # Appropriate for EC2 deployments in a single Region. Loads Region # and Availability Zone information from the EC2 API. The Region is # treated as the datacenter, and the Availability Zone as the rack. # Only private IPs are used, so this will not work across multiple # Regions. # - Ec2MultiRegionSnitch: # Uses public IPs as broadcast_address to allow cross-region # connectivity. (Thus, you should set seed addresses to the public # IP as well.) You will need to open the storage_port or # ssl_storage_port on the public IP firewall. (For intra-Region # traffic, Cassandra will switch to the private IP after # establishing a connection.) # - RackInferringSnitch: # Proximity is determined by rack and data center, which are # assumed to correspond to the 3rd and 2nd octet of each node's IP # address, respectively. Unless this happens to match your # deployment conventions, this is best used as an example of # writing a custom Snitch class and is provided in that spirit. # # You can use a custom Snitch by setting this to the full class name # of the snitch, which will be assumed to be on your classpath. endpoint_snitch: SimpleSnitch # controls how often to perform the more expensive part of host score # calculation dynamic_snitch_update_interval_in_ms: 100 # controls how often to reset all host scores, allowing a bad host to # possibly recover dynamic_snitch_reset_interval_in_ms: 600000 # if set greater than zero and read_repair_chance is < 1.0, this will allow # 'pinning' of replicas to hosts in order to increase cache capacity. # The badness threshold will control how much worse the pinned host has to be # before the dynamic snitch will prefer other replicas over it. This is # expressed as a double which represents a percentage. Thus, a value of # 0.2 means Cassandra would continue to prefer the static snitch values # until the pinned host was 20% worse than the fastest. dynamic_snitch_badness_threshold: 0.1 # request_scheduler -- Set this to a class that implements # RequestScheduler, which will schedule incoming client requests # according to the specific policy. This is useful for multi-tenancy # with a single Cassandra cluster. # NOTE: This is specifically for requests from the client and does # not affect inter node communication. # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of # client requests to a node with a separate queue for each # request_scheduler_id. The scheduler is further customized by # request_scheduler_options as described below. request_scheduler: org.apache.cassandra.scheduler.NoScheduler # Scheduler Options vary based on the type of scheduler # NoScheduler - Has no options # RoundRobin # - throttle_limit -- The throttle_limit is the number of in-flight # requests per client. Requests beyond # that limit are queued up until # running requests can complete. # The value of 80 here is twice the number of # concurrent_reads + concurrent_writes. # - default_weight -- default_weight is optional and allows for # overriding the default which is 1. # - weights -- Weights are optional and will default to 1 or the # overridden default_weight. The weight translates into how # many requests are handled during each turn of the # RoundRobin, based on the scheduler id. # # request_scheduler_options: # throttle_limit: 80 # default_weight: 5 # weights: # Keyspace1: 1 # Keyspace2: 5 # request_scheduler_id -- An identifier based on which to perform # the request scheduling. Currently the only valid option is keyspace. # request_scheduler_id: keyspace # Enable or disable inter-node encryption # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher # suite for authentication, key exchange and encryption of the actual data transfers. # Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode. # NOTE: No custom encryption options are enabled at the moment # The available internode options are : all, none, dc, rack # # If set to dc cassandra will encrypt the traffic between the DCs # If set to rack cassandra will encrypt the traffic between the racks # # The passwords used in these options must match the passwords used when generating # the keystore and truststore. For instructions on generating these files, see: # http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore # server_encryption_options: internode_encryption: none keystore: conf/.keystore keystore_password: cassandra truststore: conf/.truststore truststore_password: cassandra # More advanced defaults below: # protocol: TLS # algorithm: SunX509 # store_type: JKS # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] # require_client_auth: false # enable or disable client/server encryption. client_encryption_options: enabled: false # If enabled and optional is set to true encrypted and unencrypted connections are handled. #optional: false # Only supported by versions >= 2.1.12 keystore: conf/.keystore keystore_password: cassandra # require_client_auth: false # Set trustore and truststore_password if require_client_auth is true # truststore: conf/.truststore # truststore_password: cassandra # More advanced defaults below: # protocol: TLS # algorithm: SunX509 # store_type: JKS # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] # internode_compression controls whether traffic between nodes is # compressed. # can be: all - all traffic is compressed # dc - traffic between different datacenters is compressed # none - nothing is compressed. internode_compression: all # Enable or disable tcp_nodelay for inter-dc communication. # Disabling it will result in larger (but fewer) network packets being sent, # reducing overhead from the TCP protocol itself, at the cost of increasing # latency if you block for cross-datacenter responses. inter_dc_tcp_nodelay: false # GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level # Adjust the threshold based on your application throughput requirement # By default, Cassandra logs GC Pauses greater than 200 ms at INFO level # gc_warn_threshold_in_ms: 1000 trove-5.0.0/trove/templates/cassandra/validation-rules.json0000664000567000056710000003471612701410316025273 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "name": "cluster_name", "restart_required": true, "type": "string" }, { "name": "listen_address", "restart_required": true, "type": "string" }, { "name": "commit_failure_policy", "restart_required": true, "type": "string" }, { "name": "disk_failure_policy", "restart_required": true, "type": "string" }, { "name": "endpoint_snitch", "restart_required": true, "type": "string" }, { "name": "seed_provider", "restart_required": true, "type": "list" }, { "name": "compaction_throughput_mb_per_sec", "restart_required": true, "min": 0, "type": "integer" }, { "name": "compaction_large_partition_warning_threshold_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_total_space_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "concurrent_reads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "concurrent_writes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "phi_convict_threshold", "restart_required": true, "type": "integer" }, { "name": "commitlog_sync", "restart_required": true, "type": "string" }, { "name": "commitlog_segment_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "commitlog_total_space_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "compaction_preheat_key_cache", "restart_required": true, "type": "boolean" }, { "name": "concurrent_compactors", "restart_required": true, "min": 0, "type": "integer" }, { "name": "in_memory_compaction_limit_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "preheat_kernel_page_cache", "restart_required": true, "type": "boolean" }, { "name": "sstable_preemptive_open_interval_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_allocation_type", "restart_required": true, "type": "string" }, { "name": "memtable_cleanup_threshold", "restart_required": true, "min": 0, "type": "float" }, { "name": "file_cache_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_flush_queue_size", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_flush_writers", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_heap_space_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_offheap_space_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "column_index_size_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "index_summary_capacity_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "index_summary_resize_interval_in_minutes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "reduce_cache_capacity_to", "restart_required": true, "min": 0, "max": 1.0, "type": "float" }, { "name": "reduce_cache_sizes_at", "restart_required": true, "min": 0, "max": 1.0, "type": "float" }, { "name": "stream_throughput_outbound_megabits_per_sec", "restart_required": true, "min": 0, "type": "integer" }, { "name": "inter_dc_stream_throughput_outbound_megabits_per_sec", "restart_required": true, "min": 0, "type": "integer" }, { "name": "trickle_fsync", "restart_required": true, "type": "boolean" }, { "name": "trickle_fsync_interval_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "auto_bootstrap", "restart_required": true, "type": "boolean" }, { "name": "batch_size_warn_threshold_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "broadcast_address", "restart_required": true, "type": "string" }, { "name": "initial_token", "restart_required": true, "type": "string" }, { "name": "initial_token", "restart_required": true, "type": "string" }, { "name": "num_tokens", "restart_required": true, "min": 0, "type": "integer" }, { "name": "partitioner", "restart_required": true, "type": "string" }, { "name": "key_cache_keys_to_save", "restart_required": true, "min": 0, "type": "integer" }, { "name": "key_cache_save_period", "restart_required": true, "min": 0, "type": "integer" }, { "name": "key_cache_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "row_cache_keys_to_save", "restart_required": true, "min": 0, "type": "integer" }, { "name": "row_cache_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "row_cache_save_period", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memory_allocator", "restart_required": true, "type": "string" }, { "name": "counter_cache_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "counter_cache_save_period", "restart_required": true, "min": 0, "type": "integer" }, { "name": "counter_cache_keys_to_save", "restart_required": true, "min": 0, "type": "integer" }, { "name": "counter_cache_keys_to_save", "restart_required": true, "min": 0, "type": "integer" }, { "name": "tombstone_warn_threshold", "restart_required": true, "min": 0, "type": "integer" }, { "name": "tombstone_failure_threshold", "restart_required": true, "min": 0, "type": "integer" }, { "name": "range_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "read_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "counter_write_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "cas_contention_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "truncate_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "write_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "cross_node_timeout", "restart_required": true, "type": "boolean" }, { "name": "internode_send_buff_size_in_bytes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "internode_recv_buff_size_in_bytes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "internode_compression", "restart_required": true, "type": "string" }, { "name": "inter_dc_tcp_nodelay", "restart_required": true, "type": "boolean" }, { "name": "streaming_socket_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "native_transport_max_threads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "native_transport_max_frame_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "native_transport_max_concurrent_connections", "restart_required": true, "min": -1, "type": "integer" }, { "name": "native_transport_max_concurrent_connections_per_ip", "restart_required": true, "min": -1, "type": "integer" }, { "name": "broadcast_rpc_address", "restart_required": true, "type": "string" }, { "name": "rpc_keepalive", "restart_required": true, "type": "boolean" }, { "name": "rpc_max_threads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "rpc_min_threads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "rpc_recv_buff_size_in_bytes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "rpc_send_buff_size_in_bytes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "rpc_server_type", "restart_required": true, "type": "string" }, { "name": "dynamic_snitch_badness_threshold", "restart_required": true, "min": 0, "type": "float" }, { "name": "dynamic_snitch_reset_interval_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "dynamic_snitch_update_interval_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "hinted_handoff_enabled", "restart_required": true, "type": "boolean" }, { "name": "hinted_handoff_throttle_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "max_hint_window_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "max_hints_delivery_threads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "batchlog_replay_throttle_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "request_scheduler", "restart_required": true, "type": "string" }, { "name": "request_scheduler_id", "restart_required": true, "type": "string" }, { "name": "request_scheduler_options", "restart_required": true, "type": "list" }, { "name": "thrift_framed_transport_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "thrift_max_message_length_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "permissions_validity_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "permissions_update_interval_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "gc_warn_threshold_in_ms", "restart_required": true, "min": 0, "type": "integer" } ] } trove-5.0.0/trove/templates/pxc/0000775000567000056710000000000012701410521017734 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/pxc/replica.config.template0000664000567000056710000000041312701410316024354 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log relay_log_info_repository = TABLE relay_log_recovery = 1 relay_log_purge = 1 enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON read_only = true trove-5.0.0/trove/templates/pxc/override.config.template0000664000567000056710000000033712701410316024561 0ustar jenkinsjenkins00000000000000[mysqld] {% for key, value in overrides.iteritems() -%} {%- if value == True -%} {{key}} = 1 {%- elif value == False -%} {{key}} = 0 {%- elif value == "" -%} {{key}} {%- else -%} {{key}}={{value}} {%- endif %} {% endfor %} trove-5.0.0/trove/templates/pxc/cluster.config.template0000664000567000056710000000103112701410316024413 0ustar jenkinsjenkins00000000000000[mysqld] binlog_format=ROW bind-address=0.0.0.0 default-storage-engine=innodb innodb_autoinc_lock_mode=2 innodb_flush_log_at_trx_commit=0 wsrep_slave_threads=8 wsrep_provider=/usr/lib/libgalera_smm.so wsrep_provider_options="gcache.size={{ (128 * flavor['ram']/512)|int }}M; gcache.page_size=1G" wsrep_sst_method=xtrabackup-v2 wsrep_sst_auth="{{ replication_user_pass }}" wsrep_cluster_address="gcomm://{{ cluster_ips }}" wsrep_cluster_name={{ cluster_name }} wsrep_node_name={{ instance_name }} wsrep_node_address={{ instance_ip }} trove-5.0.0/trove/templates/pxc/config.template0000664000567000056710000000271212701410316022742 0ustar jenkinsjenkins00000000000000[client] port = 3306 [mysqld_safe] nice = 0 [mysqld] user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data ####tmpdir = /tmp tmpdir = /var/tmp pid_file = /var/run/mysqld/mysqld.pid skip-external-locking = 1 key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K thread_stack = 192K thread_cache_size = {{ (4 * flavor['ram']/512)|int }} myisam-recover = BACKUP query_cache_type = 1 query_cache_limit = 1M query_cache_size = {{ (8 * flavor['ram']/512)|int }}M innodb_data_file_path = ibdata1:10M:autoextend innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M innodb_file_per_table = 1 innodb_log_files_in_group = 2 innodb_log_file_size=50M innodb_log_buffer_size=25M connect_timeout = 15 wait_timeout = 120 join_buffer_size = 1M read_buffer_size = 512K read_rnd_buffer_size = 512K sort_buffer_size = 1M tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M table_open_cache = {{ (256 * flavor['ram']/512)|int }} table_definition_cache = {{ (256 * flavor['ram']/512)|int }} open_files_limit = {{ (512 * flavor['ram']/512)|int }} max_user_connections = {{ (100 * flavor['ram']/512)|int }} max_connections = {{ (100 * flavor['ram']/512)|int }} default_storage_engine = innodb local-infile = 0 server_id = {{server_id}} [mysqldump] quick = 1 quote-names = 1 max_allowed_packet = 16M [isamchk] key_buffer = 16M !includedir /etc/mysql/conf.d/ trove-5.0.0/trove/templates/pxc/5.5/0000775000567000056710000000000012701410521020243 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/pxc/5.5/replica.config.template0000664000567000056710000000017212701410316024665 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log read_only = true trove-5.0.0/trove/templates/pxc/5.5/replica_source.config.template0000664000567000056710000000006512701410316026246 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log trove-5.0.0/trove/templates/pxc/validation-rules.json0000664000567000056710000001333112701410316024114 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "name": "innodb_file_per_table", "restart_required": true, "max": 1, "min": 0, "type": "integer" }, { "name": "autocommit", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "local_infile", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "key_buffer_size", "restart_required": false, "max": 4294967296, "min": 0, "type": "integer" }, { "name": "connect_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "join_buffer_size", "restart_required": false, "max": 4294967296, "min": 0, "type": "integer" }, { "name": "sort_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 32768, "type": "integer" }, { "name": "innodb_buffer_pool_size", "restart_required": true, "max": 68719476736, "min": 0, "type": "integer" }, { "name": "innodb_flush_log_at_trx_commit", "restart_required": false, "max": 2, "min": 0, "type": "integer" }, { "name": "innodb_log_buffer_size", "restart_required": true, "max": 4294967296, "min": 1048576, "type": "integer" }, { "name": "innodb_open_files", "restart_required": true, "max": 4294967296, "min": 10, "type": "integer" }, { "name": "innodb_thread_concurrency", "restart_required": false, "max": 1000, "min": 0, "type": "integer" }, { "name": "sync_binlog", "restart_required": false, "max": 18446744073709547520, "min": 0, "type": "integer" }, { "name": "auto_increment_increment", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "auto_increment_offset", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "bulk_insert_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 0, "type": "integer" }, { "name": "expire_logs_days", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "interactive_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "max_allowed_packet", "restart_required": false, "max": 1073741824, "min": 1024, "type": "integer" }, { "name": "max_connect_errors", "restart_required": false, "max": 18446744073709547520, "min": 1, "type": "integer" }, { "name": "max_connections", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "myisam_sort_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 4, "type": "integer" }, { "name": "max_user_connections", "restart_required": false, "max": 100000, "min": 1, "type": "integer" }, { "name": "server_id", "restart_required": true, "max": 100000, "min": 1, "type": "integer" }, { "name": "wait_timeout", "restart_required": false, "max": 31536000, "min": 1, "type": "integer" }, { "name": "character_set_client", "restart_required": false, "type": "string" }, { "name": "character_set_connection", "restart_required": false, "type": "string" }, { "name": "character_set_database", "restart_required": false, "type": "string" }, { "name": "character_set_filesystem", "restart_required": false, "type": "string" }, { "name": "character_set_results", "restart_required": false, "type": "string" }, { "name": "character_set_server", "restart_required": false, "type": "string" }, { "name": "collation_connection", "restart_required": false, "type": "string" }, { "name": "collation_database", "restart_required": false, "type": "string" }, { "name": "collation_server", "restart_required": false, "type": "string" } ] } trove-5.0.0/trove/templates/pxc/replica_source.config.template0000664000567000056710000000025712701410316025742 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log binlog_format = MIXED enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON enforce_storage_engine = InnoDB trove-5.0.0/trove/templates/redis/0000775000567000056710000000000012701410521020250 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/redis/replica.config.template0000664000567000056710000000002412701410316024666 0ustar jenkinsjenkins00000000000000slave-read-only yes trove-5.0.0/trove/templates/redis/override.config.template0000664000567000056710000000011512701410316025067 0ustar jenkinsjenkins00000000000000{% for key, value in overrides.iteritems() -%} {{key}} {{value}} {% endfor %}trove-5.0.0/trove/templates/redis/config.template0000664000567000056710000012205212701410316023256 0ustar jenkinsjenkins00000000000000# Redis configuration file example # Note on units: when memory size is needed, it is possible to specify # it in the usual form of 1k 5GB 4M and so forth: # # 1k => 1000 bytes # 1kb => 1024 bytes # 1m => 1000000 bytes # 1mb => 1024*1024 bytes # 1g => 1000000000 bytes # 1gb => 1024*1024*1024 bytes # # units are case insensitive so 1GB 1Gb 1gB are all the same. ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you # have a standard template that goes to all Redis servers but also need # to customize a few per-server settings. Include files can include # other files, so use this wisely. # # Notice option "include" won't be rewritten by command "CONFIG REWRITE" # from admin or Redis Sentinel. Since Redis always uses the last processed # line as value of a configuration directive, you'd better put includes # at the beginning of this file to avoid overwriting config change at runtime. # # If instead you are interested in using includes to override configuration # options, it is better to use include as the last line. # # include /path/to/local.conf # include /path/to/other.conf ################################ GENERAL ##################################### # By default Redis does not run as a daemon. Use 'yes' if you need it. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. # # Trove currently requires the database to run as a service. daemonize yes # When running daemonized, Redis writes a pid file in /var/run/redis.pid by # default. You can specify a custom pid file location here. # # This has to be a writable path. # Trove will override this property based on the underlying OS. pidfile /var/run/redis/redis-server.pid # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. port 6379 # TCP listen() backlog. # # In high requests-per-second environments you need an high backlog in order # to avoid slow clients connections issues. Note that the Linux kernel # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. tcp-backlog 511 # By default Redis listens for connections from all the network interfaces # available on the server. It is possible to listen to just one or multiple # interfaces using the "bind" configuration directive, followed by one or # more IP addresses. # # Examples: # # bind 192.168.1.100 10.0.0.1 # bind 127.0.0.1 # Specify the path for the Unix socket that will be used to listen for # incoming connections. There is no default, so Redis will not listen # on a unix socket when not specified. # # Trove uses Unix sockets internally to connect to the database. # Trove will override this property based on the underlying OS. # unixsocket /tmp/redis.sock # unixsocketperm 700 # Close the connection after a client is idle for N seconds (0 to disable) timeout 0 # TCP keepalive. # # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence # of communication. This is useful for two reasons: # # 1) Detect dead peers. # 2) Take the connection alive from the point of view of network # equipment in the middle. # # On Linux, the specified value (in seconds) is the period used to send ACKs. # Note that to close the connection the double of the time is needed. # On other kernels the period depends on the kernel configuration. # # A reasonable value for this option is 60 seconds. tcp-keepalive 0 # Specify the server verbosity level. # This can be one of: # debug (a lot of information, useful for development/testing) # verbose (many rarely useful info, but not a mess like the debug level) # notice (moderately verbose, what you want in production probably) # warning (only very important / critical messages are logged) loglevel notice # Specify the log file name. Also the empty string can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null # # Trove will override this property based on the underlying OS. logfile "" # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. # syslog-enabled no # Specify the syslog identity. # syslog-ident redis # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. # syslog-facility local0 # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 databases 16 ################################ SNAPSHOTTING ################################ # # Save the DB on disk: # # save # # Will save the DB if both the given number of seconds and the given # number of write operations against the DB occurred. # # In the example below the behaviour will be to save: # after 900 sec (15 min) if at least 1 key changed # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed # # Note: you can disable saving completely by commenting out all "save" lines. # # It is also possible to remove all the previously configured save # points by adding a save directive with a single empty string argument # like in the following example: # # save "" save 900 1 save 300 10 save 60 10000 # By default Redis will stop accepting writes if RDB snapshots are enabled # (at least one save point) and the latest background save failed. # This will make the user aware (in a hard way) that data is not persisting # on disk properly, otherwise chances are that no one will notice and some # disaster will happen. # # If the background saving process will start working again Redis will # automatically allow writes again. # # However if you have setup your proper monitoring of the Redis server # and persistence, you may want to disable this feature so that Redis will # continue to work as usual even if there are problems with disk, # permissions, and so forth. stop-writes-on-bgsave-error yes # Compress string objects using LZF when dump .rdb databases? # For default that's set to 'yes' as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression yes # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. # This makes the format more resistant to corruption but there is a performance # hit to pay (around 10%) when saving and loading RDB files, so you can disable it # for maximum performances. # # RDB files created with checksum disabled have a checksum of zero that will # tell the loading code to skip the check. rdbchecksum yes # The filename where to dump the DB dbfilename dump.rdb # The working directory. # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. # # The Append Only File will also be created inside this directory. # # Note that you must specify a directory here, not a file name. # # This has to be an existing path to a writable directory. # Trove will override this property based on the underlying OS. dir /tmp ################################# REPLICATION ################################# # Master-Slave replication. Use slaveof to make a Redis instance a copy of # another Redis server. A few things to understand ASAP about Redis replication. # # 1) Redis replication is asynchronous, but you can configure a master to # stop accepting writes if it appears to be not connected with at least # a given number of slaves. # 2) Redis slaves are able to perform a partial resynchronization with the # master if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a # network partition slaves automatically try to reconnect to masters # and resynchronize with them. # # slaveof # If the master is password protected (using the "requirepass" configuration # directive below) it is possible to tell the slave to authenticate before # starting the replication synchronization process, otherwise the master will # refuse the slave request. # # masterauth # When a slave loses its connection with the master, or when the replication # is still in progress, the slave can act in two different ways: # # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # # 2) if slave-serve-stale-data is set to 'no' the slave will reply with # an error "SYNC with master in progress" to all the kind of commands # but to INFO and SLAVEOF. # slave-serve-stale-data yes # You can configure a slave instance to accept writes or not. Writing against # a slave instance may be useful to store some ephemeral data (because data # written on a slave will be easily deleted after resync with the master) but # may also cause problems if clients are writing to it because of a # misconfiguration. # # Since Redis 2.6 by default slaves are read-only. # # Note: read only slaves are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. # Still a read only slave exports by default all the administrative commands # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve # security of read only slaves using 'rename-command' to shadow all the # administrative / dangerous commands. slave-read-only yes # Replication SYNC strategy: disk or socket. # # ------------------------------------------------------- # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY # ------------------------------------------------------- # # New slaves and reconnecting slaves that are not able to continue the replication # process just receiving differences, need to do what is called a "full # synchronization". An RDB file is transmitted from the master to the slaves. # The transmission can happen in two different ways: # # 1) Disk-backed: The Redis master creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent # process to the slaves incrementally. # 2) Diskless: The Redis master creates a new process that directly writes the # RDB file to slave sockets, without touching the disk at all. # # With disk-backed replication, while the RDB file is generated, more slaves # can be queued and served with the RDB file as soon as the current child producing # the RDB file finishes its work. With diskless replication instead once # the transfer starts, new slaves arriving will be queued and a new transfer # will start when the current one terminates. # # When diskless replication is used, the master waits a configurable amount of # time (in seconds) before starting the transfer in the hope that multiple slaves # will arrive and the transfer can be parallelized. # # With slow disks and fast (large bandwidth) networks, diskless replication # works better. repl-diskless-sync no # When diskless replication is enabled, it is possible to configure the delay # the server waits in order to spawn the child that transfers the RDB via socket # to the slaves. # # This is important since once the transfer starts, it is not possible to serve # new slaves arriving, that will be queued for the next RDB transfer, so the server # waits a delay in order to let more slaves arrive. # # The delay is specified in seconds, and by default is 5 seconds. To disable # it entirely just set it to 0 seconds and the transfer will start ASAP. repl-diskless-sync-delay 5 # Slaves send PINGs to server in a predefined interval. It's possible to change # this interval with the repl_ping_slave_period option. The default value is 10 # seconds. # # repl-ping-slave-period 10 # The following option sets the replication timeout for: # # 1) Bulk transfer I/O during SYNC, from the point of view of slave. # 2) Master timeout from the point of view of slaves (data, pings). # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value # specified for repl-ping-slave-period otherwise a timeout will be detected # every time there is low traffic between the master and the slave. # # repl-timeout 60 # Disable TCP_NODELAY on the slave socket after SYNC? # # If you select "yes" Redis will use a smaller number of TCP packets and # less bandwidth to send data to slaves. But this can add a delay for # the data to appear on the slave side, up to 40 milliseconds with # Linux kernels using a default configuration. # # If you select "no" the delay for data to appear on the slave side will # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions # or when the master and slaves are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no # Set the replication backlog size. The backlog is a buffer that accumulates # slave data when slaves are disconnected for some time, so that when a slave # wants to reconnect again, often a full resync is not needed, but a partial # resync is enough, just passing the portion of data the slave missed while # disconnected. # # The bigger the replication backlog, the longer the time the slave can be # disconnected and later be able to perform a partial resynchronization. # # The backlog is only allocated once there is at least a slave connected. # # repl-backlog-size 1mb # After a master has no longer connected slaves for some time, the backlog # will be freed. The following option configures the amount of seconds that # need to elapse, starting from the time the last slave disconnected, for # the backlog buffer to be freed. # # A value of 0 means to never release the backlog. # # repl-backlog-ttl 3600 # The slave priority is an integer number published by Redis in the INFO output. # It is used by Redis Sentinel in order to select a slave to promote into a # master if the master is no longer working correctly. # # A slave with a low priority number is considered better for promotion, so # for instance if there are three slaves with priority 10, 100, 25 Sentinel will # pick the one with priority 10, that is the lowest. # # However a special priority of 0 marks the slave as not able to perform the # role of master, so a slave with priority of 0 will never be selected by # Redis Sentinel for promotion. # # By default the priority is 100. slave-priority 100 # It is possible for a master to stop accepting writes if there are less than # N slaves connected, having a lag less or equal than M seconds. # # The N slaves need to be in "online" state. # # The lag in seconds, that must be <= the specified value, is calculated from # the last ping received from the slave, that is usually sent every second. # # This option does not GUARANTEE that N replicas will accept the write, but # will limit the window of exposure for lost writes in case not enough slaves # are available, to the specified number of seconds. # # For example to require at least 3 slaves with a lag <= 10 seconds use: # # min-slaves-to-write 3 # min-slaves-max-lag 10 # # Setting one or the other to 0 disables the feature. # # By default min-slaves-to-write is set to 0 (feature disabled) and # min-slaves-max-lag is set to 10. ################################## SECURITY ################################### # Require clients to issue AUTH before processing any other # commands. This might be useful in environments in which you do not trust # others with access to the host running redis-server. # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). # # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. # # requirepass foobared # Command renaming. # # It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something # hard to guess so that it will still be available for internal-use tools # but not available for general clients. # # Example: # # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 # # It is also possible to completely kill a command by renaming it into # an empty string: # # rename-command CONFIG "" # # Please note that changing the name of commands that are logged into the # AOF file or transmitted to slaves may cause problems. # # Trove uses 'rename-command' internally to hide certain commands. ################################### LIMITS #################################### # Set the max number of connected clients at the same time. By default # this limit is set to 10000 clients, however if the Redis server is not # able to configure the process file limit to allow for the specified limit # the max number of allowed clients is set to the current file limit # minus 32 (as Redis reserves a few file descriptors for internal uses). # # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # # maxclients 10000 # Don't use more memory than the specified amount of bytes. # When the memory limit is reached Redis will try to remove keys # according to the eviction policy selected (see maxmemory-policy). # # If Redis can't remove keys according to the policy, or if the policy is # set to 'noeviction', Redis will start to reply with errors to commands # that would use more memory, like SET, LPUSH, and so on, and will continue # to reply to read-only commands like GET. # # This option is usually useful when using Redis as an LRU cache, or to set # a hard memory limit for an instance (using the 'noeviction' policy). # # WARNING: If you have slaves attached to an instance with maxmemory on, # the size of the output buffers needed to feed the slaves are subtracted # from the used memory count, so that network problems / resyncs will # not trigger a loop where keys are evicted, and in turn the output # buffer of slaves is full with DELs of keys evicted triggering the deletion # of more keys, and so forth until the database is completely emptied. # # In short... if you have slaves attached it is suggested that you set a lower # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # # maxmemory # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select among five behaviors: # # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key according to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations # # Note: with any of the above policies, Redis will return an error on write # operations, when there are no suitable keys for eviction. # # At the date of writing these commands are: set setnx setex append # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby # getset mset msetnx exec sort # # The default is: # # maxmemory-policy noeviction # LRU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can tune it for speed or # accuracy. For default Redis will check five keys and pick the one that was # used less recently, you can change the sample size using the following # configuration directive. # # The default of 5 produces good enough results. 10 Approximates very closely # true LRU but costs a bit more CPU. 3 is very fast but not very accurate. # # maxmemory-samples 5 ############################## APPEND ONLY MODE ############################### # By default Redis asynchronously dumps the dataset on disk. This mode is # good enough in many applications, but an issue with the Redis process or # a power outage may result into a few minutes of writes lost (depending on # the configured save points). # # The Append Only File is an alternative persistence mode that provides # much better durability. For instance using the default data fsync policy # (see later in the config file) Redis can lose just one second of writes in a # dramatic event like a server power outage, or a single write if something # wrong with the Redis process itself happens, but the operating system is # still running correctly. # # AOF and RDB persistence can be enabled at the same time without problems. # If the AOF is enabled on startup Redis will load the AOF, that is the file # with the better durability guarantees. # # Please check http://redis.io/topics/persistence for more information. appendonly no # The name of the append only file (default: "appendonly.aof") appendfilename "appendonly.aof" # The fsync() call tells the Operating System to actually write data on disk # instead of waiting for more data in the output buffer. Some OS will really flush # data on disk, some other OS will just try to do it ASAP. # # Redis supports three different modes: # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log. Slow, Safest. # everysec: fsync only one time every second. Compromise. # # The default is "everysec", as that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to # "no" that will let the operating system flush the output buffer when # it wants, for better performances (but if you can live with the idea of # some data loss consider the default persistence mode that's snapshotting), # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # # More details please check the following article: # http://antirez.com/post/redis-persistence-demystified.html # # If unsure, use "everysec". # appendfsync always appendfsync everysec # appendfsync no # When the AOF fsync policy is set to always or everysec, and a background # saving process (a background save or AOF log background rewriting) is # performing a lot of I/O against the disk, in some Linux configurations # Redis may block too long on the fsync() call. Note that there is no fix for # this currently, as even performing fsync in a different thread will block # our synchronous write(2) call. # # In order to mitigate this problem it's possible to use the following option # that will prevent fsync() from being called in the main process while a # BGSAVE or BGREWRITEAOF is in progress. # # This means that while another child is saving, the durability of Redis is # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). # # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. # # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). # # This base size is compared to the current size. If the current size is # bigger than the specified percentage, the rewrite is triggered. Also # you need to specify a minimal size for the AOF file to be rewritten, this # is useful to avoid rewriting the AOF file even if the percentage increase # is reached but it is still pretty small. # # Specify a percentage of zero in order to disable the automatic AOF # rewrite feature. auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb # An AOF file may be found to be truncated at the end during the Redis # startup process, when the AOF data gets loaded back into memory. # This may happen when the system where Redis is running # crashes, especially when an ext4 filesystem is mounted without the # data=ordered option (however this can't happen when Redis itself # crashes or aborts but the operating system still works correctly). # # Redis can either exit with an error when this happens, or load as much # data as possible (the default now) and start if the AOF file is found # to be truncated at the end. The following option controls this behavior. # # If aof-load-truncated is set to yes, a truncated AOF file is loaded and # the Redis server starts emitting a log to inform the user of the event. # Otherwise if the option is set to no, the server aborts with an error # and refuses to start. When the option is set to no, the user requires # to fix the AOF file using the "redis-check-aof" utility before to restart # the server. # # Note that if the AOF file will be found to be corrupted in the middle # the server will still exit with an error. This option only applies when # Redis will try to read more data from the AOF file but not enough bytes # will be found. aof-load-truncated yes ################################ LUA SCRIPTING ############################### # Max execution time of a Lua script in milliseconds. # # If the maximum execution time is reached Redis will log that a script is # still in execution after the maximum allowed time and will start to # reply to queries with an error. # # When a long running script exceeds the maximum execution time only the # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be # used to stop a script that did not yet called write commands. The second # is the only way to shut down the server in the case a write command was # already issued by the script but the user doesn't want to wait for the natural # termination of the script. # # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 ################################ REDIS CLUSTER ############################### # # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however # in order to mark it as "mature" we need to wait for a non trivial percentage # of users to deploy it in production. # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # # Normal Redis instances can't be part of a Redis Cluster; only nodes that are # started as cluster nodes can. In order to start a Redis instance as a # cluster node enable the cluster support uncommenting the following: # # cluster-enabled yes # Every cluster node has a cluster configuration file. This file is not # intended to be edited by hand. It is created and updated by Redis nodes. # Every Redis Cluster node requires a different cluster configuration file. # Make sure that instances running in the same system do not have # overlapping cluster configuration file names. # # cluster-config-file nodes-6379.conf # Cluster node timeout is the amount of milliseconds a node must be unreachable # for it to be considered in failure state. # Most other internal time limits are multiple of the node timeout. # # cluster-node-timeout 15000 # A slave of a failing master will avoid to start a failover if its data # looks too old. # # There is no simple way for a slave to actually have a exact measure of # its "data age", so the following two checks are performed: # # 1) If there are multiple slaves able to failover, they exchange messages # in order to try to give an advantage to the slave with the best # replication offset (more data from the master processed). # Slaves will try to get their rank by offset, and apply to the start # of the failover a delay proportional to their rank. # # 2) Every single slave computes the time of the last interaction with # its master. This can be the last ping or command received (if the master # is still in the "connected" state), or the time that elapsed since the # disconnection with the master (if the replication link is currently down). # If the last interaction is too old, the slave will not try to failover # at all. # # The point "2" can be tuned by user. Specifically a slave will not perform # the failover if, since the last interaction with the master, the time # elapsed is greater than: # # (node-timeout * slave-validity-factor) + repl-ping-slave-period # # So for example if node-timeout is 30 seconds, and the slave-validity-factor # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the # slave will not try to failover if it was not able to talk with the master # for longer than 310 seconds. # # A large slave-validity-factor may allow slaves with too old data to failover # a master, while a too small value may prevent the cluster from being able to # elect a slave at all. # # For maximum availability, it is possible to set the slave-validity-factor # to a value of 0, which means, that slaves will always try to failover the # master regardless of the last time they interacted with the master. # (However they'll always try to apply a delay proportional to their # offset rank). # # Zero is the only value able to guarantee that when all the partitions heal # the cluster will always be able to continue. # # cluster-slave-validity-factor 10 # Cluster slaves are able to migrate to orphaned masters, that are masters # that are left without working slaves. This improves the cluster ability # to resist to failures as otherwise an orphaned master can't be failed over # in case of failure if it has no working slaves. # # Slaves migrate to orphaned masters only if there are still at least a # given number of other working slaves for their old master. This number # is the "migration barrier". A migration barrier of 1 means that a slave # will migrate only if there is at least 1 other working slave for its master # and so forth. It usually reflects the number of slaves you want for every # master in your cluster. # # Default is 1 (slaves migrate only if their masters remain with at least # one slave). To disable migration just set it to a very large value. # A value of 0 can be set but is useful only for debugging and dangerous # in production. # # cluster-migration-barrier 1 # By default Redis Cluster nodes stop accepting queries if they detect there # is at least an hash slot uncovered (no available node is serving it). # This way if the cluster is partially down (for example a range of hash slots # are no longer covered) all the cluster becomes, eventually, unavailable. # It automatically returns available as soon as all the slots are covered again. # # However sometimes you want the subset of the cluster which is working, # to continue to accept queries for the part of the key space that is still # covered. In order to do so, just set the cluster-require-full-coverage # option to no. # # cluster-require-full-coverage yes # In order to setup your cluster make sure to read the documentation # available at http://redis.io web site. ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified # execution time. The execution time does not include the I/O operations # like talking with the client, sending the reply and so forth, # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). # # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the # slow log. When a new command is logged the oldest one is removed from the # queue of logged commands. # The following time is expressed in microseconds, so 1000000 is equivalent # to one second. Note that a negative number disables the slow log, while # a value of zero forces the logging of every command. slowlog-log-slower-than 10000 # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. slowlog-max-len 128 ################################ LATENCY MONITOR ############################## # The Redis latency monitoring subsystem samples different operations # at runtime in order to collect data related to possible sources of # latency of a Redis instance. # # Via the LATENCY command this information is available to the user that can # print graphs and obtain reports. # # The system only logs operations that were performed in a time equal or # greater than the amount of milliseconds specified via the # latency-monitor-threshold configuration directive. When its value is set # to zero, the latency monitor is turned off. # # By default latency monitoring is disabled since it is mostly not needed # if you don't have latency issues, and collecting data has a performance # impact, that while very small, can be measured under big load. Latency # monitoring can easily be enabled at runtime using the command # "CONFIG SET latency-monitor-threshold " if needed. latency-monitor-threshold 0 ############################# EVENT NOTIFICATION ############################## # Redis can notify Pub/Sub clients about events happening in the key space. # This feature is documented at http://redis.io/topics/notifications # # For instance if keyspace events notification is enabled, and a client # performs a DEL operation on key "foo" stored in the Database 0, two # messages will be published via Pub/Sub: # # PUBLISH __keyspace@0__:foo del # PUBLISH __keyevent@0__:del foo # # It is possible to select the events that Redis will notify among a set # of classes. Every class is identified by a single character: # # K Keyspace events, published with __keyspace@__ prefix. # E Keyevent events, published with __keyevent@__ prefix. # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... # $ String commands # l List commands # s Set commands # h Hash commands # z Sorted set commands # x Expired events (events generated every time a key expires) # e Evicted events (events generated when a key is evicted for maxmemory) # A Alias for g$lshzxe, so that the "AKE" string means all the events. # # The "notify-keyspace-events" takes as argument a string that is composed # of zero or multiple characters. The empty string means that notifications # are disabled. # # Example: to enable list and generic events, from the point of view of the # event name, use: # # notify-keyspace-events Elg # # Example 2: to get the stream of the expired keys subscribing to channel # name __keyevent@0__:expired use: # # notify-keyspace-events Ex # # By default all notifications are disabled because most users don't need # this feature and the feature has some overhead. Note that if you don't # specify at least one of K or E, no events will be delivered. notify-keyspace-events "" ############################### ADVANCED CONFIG ############################### # Hashes are encoded using a memory efficient data structure when they have a # small number of entries, and the biggest entry does not exceed a given # threshold. These thresholds can be configured using the following directives. hash-max-ziplist-entries 512 hash-max-ziplist-value 64 # Similarly to hashes, small lists are also encoded in a special way in order # to save a lot of space. The special representation is only used when # you are under the following limits: list-max-ziplist-entries 512 list-max-ziplist-value 64 # Sets have a special encoding in just one case: when a set is composed # of just strings that happen to be integers in radix 10 in the range # of 64 bit signed integers. # The following configuration setting sets the limit in the size of the # set in order to use this special memory saving encoding. set-max-intset-entries 512 # Similarly to hashes and lists, sorted sets are also specially encoded in # order to save a lot of space. This encoding is only used when the length and # elements of a sorted set are below the following limits: zset-max-ziplist-entries 128 zset-max-ziplist-value 64 # HyperLogLog sparse representation bytes limit. The limit includes the # 16 bytes header. When an HyperLogLog using the sparse representation crosses # this limit, it is converted into the dense representation. # # A value greater than 16000 is totally useless, since at that point the # dense representation is more memory efficient. # # The suggested value is ~ 3000 in order to have the benefits of # the space efficient encoding without slowing down too much PFADD, # which is O(N) with the sparse encoding. The value can be raised to # ~ 10000 when CPU is not a concern, but space is, and the data set is # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. hll-sparse-max-bytes 3000 # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation Redis uses (see dict.c) # performs a lazy rehashing: the more operation you run into a hash table # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. # # The default is to use this millisecond 10 times every second in order to # actively rehash the main dictionaries, freeing memory when possible. # # If unsure: # use "activerehashing no" if you have hard latency requirements and it is # not a good thing in your environment that Redis can reply from time to time # to queries with 2 milliseconds delay. # # use "activerehashing yes" if you don't have such hard requirements but # want to free memory asap when possible. activerehashing yes # The client output buffer limits can be used to force disconnection of clients # that are not reading data from the server fast enough for some reason (a # common reason is that a Pub/Sub client can't consume messages as fast as the # publisher can produce them). # # The limit can be set differently for the three different classes of clients: # # normal -> normal clients including MONITOR clients # slave -> slave clients # pubsub -> clients subscribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: # # client-output-buffer-limit # # A client is immediately disconnected once the hard limit is reached, or if # the soft limit is reached and remains reached for the specified number of # seconds (continuously). # So for instance if the hard limit is 32 megabytes and the soft limit is # 16 megabytes / 10 seconds, the client will get disconnected immediately # if the size of the output buffers reach 32 megabytes, but will also get # disconnected if the client reaches 16 megabytes and continuously overcomes # the limit for 10 seconds. # # By default normal clients are not limited because they don't receive data # without asking (in a push way), but just after a request, so only # asynchronous clients may create a scenario where data is requested faster # than it can read. # # Instead there is a default limit for pubsub and slave clients, since # subscribers and slaves receive data in a push fashion. # # Both the hard or the soft limit can be disabled by setting them to zero. client-output-buffer-limit normal 0 0 0 client-output-buffer-limit slave 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 unixsocket /var/run/redis/redis.sock unixsocketperm 777 # Redis calls an internal function to perform many background tasks, like # closing connections of clients in timeout, purging expired keys that are # never requested, and so forth. # # Not all tasks are performed with the same frequency, but Redis checks for # tasks to perform according to the specified "hz" value. # # By default "hz" is set to 10. Raising the value will use more CPU when # Redis is idle, but at the same time will make Redis more responsive when # there are many keys expiring at the same time, and timeouts may be # handled with more precision. # # The range is between 1 and 500, however a value over 100 is usually not # a good idea. Most users should use the default of 10 and raise this up to # 100 only in environments where very low latency is required. hz 10 # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yestrove-5.0.0/trove/templates/redis/validation-rules.json0000664000567000056710000001720112701410316024430 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "name": "tcp-backlog", "restart_required": true, "min": 0, "type": "integer" }, { "name": "timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "tcp-keepalive", "restart_required": false, "min": 0, "type": "integer" }, { "name": "loglevel", "restart_required": false, "type": "string" }, { "name": "databases", "restart_required": true, "min": 0, "type": "integer" }, { "name": "save", "restart_required": false, "type": "string" }, { "name": "stop-writes-on-bgsave-error", "restart_required": false, "type": "boolean" }, { "name": "rdbcompression", "restart_required": false, "type": "boolean" }, { "name": "rdbchecksum", "restart_required": true, "type": "boolean" }, { "name": "slave-serve-stale-data", "restart_required": false, "type": "boolean" }, { "name": "slave-read-only", "restart_required": false, "type": "boolean" }, { "name": "repl-diskless-sync", "restart_required": false, "type": "boolean" }, { "name": "repl-diskless-sync-delay", "restart_required": false, "min": 0, "type": "integer" }, { "name": "repl-ping-slave-period", "restart_required": false, "min": 0, "type": "integer" }, { "name": "repl-timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "repl-disable-tcp-nodelay", "restart_required": false, "type": "boolean" }, { "name": "repl-backlog-size", "restart_required": false, "min": 0, "type": "integer" }, { "name": "repl-backlog-ttl", "restart_required": false, "min": 0, "type": "integer" }, { "name": "slave-priority", "restart_required": false, "min": 0, "type": "integer" }, { "name": "min-slaves-to-write", "restart_required": false, "min": 0, "type": "integer" }, { "name": "min-slaves-max-lag", "restart_required": false, "min": 0, "type": "integer" }, { "name": "requirepass", "restart_required": false, "type": "string" }, { "name": "maxclients", "restart_required": false, "min": 0, "type": "integer" }, { "name": "maxmemory", "restart_required": false, "min": 0, "type": "integer" }, { "name": "maxmemory-policy", "restart_required": false, "type": "string" }, { "name": "maxmemory-samples", "restart_required": false, "min": 0, "type": "integer" }, { "name": "appendonly", "restart_required": false, "type": "boolean" }, { "name": "appendfsync", "restart_required": false, "type": "string" }, { "name": "no-appendfsync-on-rewrite", "restart_required": false, "type": "boolean" }, { "name": "auto-aof-rewrite-percentage", "restart_required": false, "min": 0, "type": "integer" }, { "name": "auto-aof-rewrite-min-size", "restart_required": false, "min": 0, "type": "integer" }, { "name": "aof-load-truncated", "restart_required": false, "type": "boolean" }, { "name": "lua-time-limit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cluster-node-timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cluster-slave-validity-factor", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cluster-migration-barrier", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cluster-require-full-coverage", "restart_required": false, "type": "boolean" }, { "name": "slowlog-log-slower-than", "restart_required": false, "min": 0, "type": "integer" }, { "name": "slowlog-max-len", "restart_required": false, "min": 0, "type": "integer" }, { "name": "latency-monitor-threshold", "restart_required": false, "min": 0, "type": "integer" }, { "name": "notify-keyspace-events", "restart_required": false, "type": "string" }, { "name": "hash-max-ziplist-entries", "restart_required": false, "min": 0, "type": "integer" }, { "name": "hash-max-ziplist-value", "restart_required": false, "min": 0, "type": "integer" }, { "name": "list-max-ziplist-entries", "restart_required": false, "min": 0, "type": "integer" }, { "name": "list-max-ziplist-value", "restart_required": false, "min": 0, "type": "integer" }, { "name": "set-max-intset-entries", "restart_required": false, "min": 0, "type": "integer" }, { "name": "zset-max-ziplist-entries", "restart_required": false, "min": 0, "type": "integer" }, { "name": "zset-max-ziplist-value", "restart_required": false, "min": 0, "type": "integer" }, { "name": "hll-sparse-max-bytes", "restart_required": false, "min": 0, "max": 16000, "type": "integer" }, { "name": "activerehashing", "restart_required": false, "type": "boolean" }, { "name": "client-output-buffer-limit", "restart_required": false, "type": "string" }, { "name": "hz", "restart_required": false, "min": 1, "max": 500, "type": "integer" }, { "name": "aof-rewrite-incremental-fsync", "restart_required": false, "type": "boolean" } ] } trove-5.0.0/trove/templates/redis/replica_source.config.template0000664000567000056710000000006312701410316026251 0ustar jenkinsjenkins00000000000000repl-diskless-sync yes repl-diskless-sync-delay 10 trove-5.0.0/trove/templates/vertica/0000775000567000056710000000000012701410521020577 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/vertica/override.config.template0000664000567000056710000000000012701410316025407 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/vertica/config.template0000664000567000056710000000000012701410316023571 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/vertica/validation-rules.json0000664000567000056710000016042212701410316024763 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "description": "No of active partitions", "type": "integer", "name": "ActivePartitionCount", "restart_required": false }, { "description": "Interval between collection of nodes' addresses (seconds)", "type": "integer", "name": "AddressCollectorInterval", "restart_required": false }, { "description": "Interval between advancing the AHM (seconds)", "type": "integer", "name": "AdvanceAHMInterval", "restart_required": false }, { "description": "Consider backup epochs when setting new AHM", "type": "integer", "name": "AHMBackupManagement", "restart_required": false }, { "description": "Allow names containing non-ASCII UTF-8 characters", "type": "integer", "name": "AllowNonAsciiNames", "restart_required": false }, { "description": "Interval between Tuple Mover row count statistics updates (seconds)", "type": "integer", "name": "AnalyzeRowCountInterval", "restart_required": false }, { "description": "Maximum number of columns to analyze with each analyze stats plan", "type": "integer", "name": "AnalyzeStatsPlanMaxColumns", "restart_required": false }, { "description": "Number of sampling bands to use when not using entire data set.", "type": "integer", "name": "AnalyzeStatsSampleBands", "restart_required": false }, { "description": "ARC will commit only if the change is more than the percentage specified", "type": "integer", "name": "ARCCommitPercentage", "restart_required": false }, { "description": "The confidence level at which to run audits of license size utilization. Represent 99.5% as 99.5.", "type": "integer", "name": "AuditConfidenceLevel", "restart_required": false }, { "description": "The error tolerance for audits of license size utilization. Represent 4.5% as 4.5.", "type": "integer", "name": "AuditErrorTolerance", "restart_required": false }, { "description": "Use as recommended by Technical Support", "type": "integer", "name": "BasicVerticaOptions", "restart_required": false }, { "description": "size of memory managed by memory manager (in MB)", "type": "integer", "name": "BlockCacheSize", "restart_required": true }, { "description": "Buffer query output to allow possible retry of the query. Values allowed: 0(never buffer), 1(always buffer), 2(default: vertica decides before query begins executing to buffer based on certain criteria)", "type": "integer", "name": "BufferQueryOutputForPossibleRetry", "restart_required": false }, { "description": "If set to 1, position index will be cached", "type": "integer", "name": "CachePositionIndex", "restart_required": true }, { "description": "If true, cascade to a target resource pool will always lead to replanning the query on the target pool", "type": "integer", "name": "CascadeResourcePoolAlwaysReplan", "restart_required": false }, { "description": "Split catalog checkpoint into chunks of approximately this size", "type": "integer", "name": "CatalogCheckpointChunkSizeKB", "restart_required": false }, { "description": "Minimum transaction log size before a new catalog checkpoint is created", "type": "integer", "name": "CatalogCheckpointMinLogSizeKB", "restart_required": false }, { "description": "Transaction log size must be at least this fraction of the checkpoint size before a new catalog checkpoint is created", "type": "integer", "name": "CatalogCheckpointPercent", "restart_required": false }, { "description": "Rename storage files during final cleanup upon removal from the catalog", "type": "integer", "name": "CatalogDeindexRename", "restart_required": false }, { "description": "Check data integrity using CRCs (should be enabled unless performance is adversely impacted", "type": "integer", "name": "CheckCRCs", "restart_required": false }, { "description": "Check data sortedness before writing to ROS", "type": "integer", "name": "CheckDataTargetSortOrder", "restart_required": false }, { "description": "Seconds to wait for 'late' nodes to finish recovery actions and proceed with the cluster", "type": "integer", "name": "ClusterRecoveryWait", "restart_required": false }, { "description": "The collation function column width is 4 plus its data column width times CollationExpansion octets", "type": "integer", "name": "CollationExpansion", "restart_required": false }, { "description": "Catalog compression (0: off, 1: chkpt+systxnlogs, 2: chkpt+txnlogs)", "type": "integer", "name": "CompressCatalogOnDisk", "restart_required": true }, { "description": "When enabled, control traffic will be compressed", "type": "integer", "name": "CompressDistCalls", "restart_required": false }, { "description": "When enabled, data traffic will be compressed; this reduces data bandwidth at a cost of CPU time", "type": "integer", "name": "CompressNetworkData", "restart_required": false }, { "description": "Compute APPROXCOUNTDISTINCTs when analyzing statistics. Default is false", "type": "integer", "name": "ComputeApproxNDVsDuringAnalyzeStats", "restart_required": false }, { "description": "Number of ROS containers that are allowed before new ROSs are prevented (ROS pushback)", "type": "integer", "name": "ContainersPerProjectionLimit", "restart_required": false }, { "description": "When doing a COPY FROM VERTICA without an explicit columns list, include IDENTITY columns in the implicit list", "type": "integer", "name": "CopyFromVerticaWithIdentity", "restart_required": false }, { "description": "Time interval in seconds after which a node sends a heartbeat (Set to 0 to disable.)", "type": "integer", "name": "DatabaseHeartbeatInterval", "restart_required": false }, { "description": "Number of rows to be sampled for correlation analysis during DBD design. Default: 4500", "type": "integer", "name": "DBDCorrelationSampleRowCount", "restart_required": false }, { "description": "Percentage of rows to be sampled for correlation analysis during DBD design. Default: 0 Use DBDCorrelationSampleRowCount", "type": "integer", "name": "DBDCorrelationSampleRowPct", "restart_required": false }, { "description": "Number of rows to be sampled for count distinct analysis during DBD design. Default: 0 Use DBDCountDistinctSampleRowPct", "type": "integer", "name": "DBDCountDistinctSampleRowCount", "restart_required": false }, { "description": "Percentage of rows to be sampled for count distinct analysis during DBD design. Default: 100", "type": "integer", "name": "DBDCountDistinctSampleRowPct", "restart_required": false }, { "description": "Concurrency setting for the deployment/rebalance process in the DBD. Default: 0", "type": "integer", "name": "DBDDeploymentParallelism", "restart_required": false }, { "description": "Dynamic sampling scheme to be used for encoding analysis during DBD design. Default: true", "type": "integer", "name": "DBDDynamicSampling", "restart_required": false }, { "description": "Number of rows to be sampled for encoding analysis during DBD design. Default: 1000000", "type": "integer", "name": "DBDEncodingSampleRowCount", "restart_required": false }, { "description": "Percentage of rows to be sampled for encoding analysis during DBD design. Default: 0 Use DBDEncodingSampleRowCount", "type": "integer", "name": "DBDEncodingSampleRowPct", "restart_required": false }, { "description": "Number of minimum rows expected in Fact Table, default 1M rows", "type": "integer", "name": "DBDLargestTableRowCountBoundary", "restart_required": false }, { "description": "Log internal DBD design process in DC tables. Default: false DC logging of design process is turned OFF by default", "type": "integer", "name": "DBDLogInternalDesignProcess", "restart_required": false }, { "description": "Concurrency setting for the parallelism in the Storage Optimization phase of Database Designer. Default: 0", "type": "integer", "name": "DBDMaxConcurrencyForEncodingExperiment", "restart_required": false }, { "description": "If the largest fact table has more than rows, then use this percentage off the largest fact table to define the number of rows below which a table should be replicated Default: 1%", "type": "integer", "name": "DBDRepLargeRowCountPct", "restart_required": false }, { "description": "If the largest fact table has less than rows, then use this percentage off the largest fact table to define the number of rows below which a table should be replicated Default: 10%", "type": "integer", "name": "DBDRepSmallRowCountPct", "restart_required": false }, { "description": "Number of bands sampled using Dynamic Sampling Algorithm. Default: 100", "type": "integer", "name": "DBDSampleStorageBandCount", "restart_required": false }, { "description": "Number of rows to be sampled for segmentation skew analysis during DBD design. Default: 8000", "type": "integer", "name": "DBDSkewDetectionSampleRowCount", "restart_required": false }, { "description": "Percentage of rows to be sampled for segmentation skew analysis during DBD design. Default: 0 Use DBDSkewDetectionSampleRowCount", "type": "integer", "name": "DBDSkewDetectionSampleRowPct", "restart_required": false }, { "description": "Determines source for resource allocation during a designer invocation. Default: false Uses user's resource pool", "type": "integer", "name": "DBDUseOnlyDesignerResourcePool", "restart_required": false }, { "description": "Default setting for intervalstyle; 1 is UNITS; 0 is PLAIN and conforms to standard SQL", "type": "integer", "name": "DefaultIntervalStyle", "restart_required": false }, { "description": "Defines the default session startup Locale for the database", "type": "string", "name": "DefaultSessionLocale", "restart_required": false }, { "description": "Disable schema-level privileges on tables.", "type": "integer", "name": "DisableInheritedPrivileges", "restart_required": false }, { "description": "Set to disable local resegmentation", "type": "integer", "name": "DisableLocalResegmentation", "restart_required": false }, { "description": "If \u2018false\u2019, the optimizer randomly chooses nodes to do the work of any down nodes", "type": "integer", "name": "DisableNodeDownOptimization", "restart_required": false }, { "description": "Do not allow creating prejoin projections", "type": "integer", "name": "DisablePrejoinProjections", "restart_required": false }, { "description": "Disallow the MultipleActiveResultSets (MARS) feature to be enabled", "type": "integer", "name": "DisallowMars", "restart_required": false }, { "description": "Interval between disk space polls (for disk resource management) (seconds)", "type": "integer", "name": "DiskSpacePollingInterval", "restart_required": true }, { "description": "Enable DMLs to cancel conflicting TM tasks to acquire lock", "type": "integer", "name": "DMLCancelTM", "restart_required": false }, { "description": "Use as recommended by Technical Support", "type": "integer", "name": "EEVerticaOptions", "restart_required": false }, { "description": "Enables Access Policy feature", "type": "integer", "name": "EnableAccessPolicy", "restart_required": false }, { "description": "Enable all granted roles on login", "type": "integer", "name": "EnableAllRolesOnLogin", "restart_required": false }, { "description": "If true and apportionable source/parser are defined for the load, input may get split into multiple parts (portions) and loaded by multiple threads/servers in parallel", "type": "integer", "name": "EnableApportionLoad", "restart_required": false }, { "description": "Turn on/off the automatic update row count, min and max when DML queries are run", "type": "integer", "name": "EnableAutoDMLStats", "restart_required": false }, { "description": "A value of 1 enables block memory manager or 0 disables memory manager", "type": "integer", "name": "EnableBlockMemoryManager", "restart_required": true }, { "description": "If true and a chunker is defined for the corresponding parser multiple parse threads can cooperate to parse the output of a single source", "type": "integer", "name": "EnableCooperativeParse", "restart_required": false }, { "description": "Enable the usage data collector", "type": "integer", "name": "EnableDataCollector", "restart_required": false }, { "description": "Enabled cipher suites for TLS", "type": "string", "name": "EnabledCipherSuites", "restart_required": true }, { "description": "Enable EE Thread Pool to reduce threads used", "type": "integer", "name": "EnableEEThreadPool", "restart_required": false }, { "description": "Enable SIPS for early materialized merge join, on multi-block inners", "type": "integer", "name": "EnableEMMJMultiblockSIPS", "restart_required": false }, { "description": "Allow expression results to be materialized as projection columns", "type": "integer", "name": "EnableExprsInProjections", "restart_required": false }, { "description": "allow user-specified force outer rule", "type": "integer", "name": "EnableForceOuter", "restart_required": false }, { "description": "Allow aggregate projections using GROUP BY", "type": "integer", "name": "EnableGroupByProjections", "restart_required": false }, { "description": "Enable JIT compilation optimizations", "type": "integer", "name": "EnableJIT", "restart_required": false }, { "description": "Determines whether new primary key constraints will be enabled by default", "type": "integer", "name": "EnableNewPrimaryKeysByDefault", "restart_required": false }, { "description": "Determines whether new unique key constraints will be enabled by default", "type": "integer", "name": "EnableNewUniqueKeysByDefault", "restart_required": false }, { "description": "Enable Parallel Hash build to improve join performance", "type": "integer", "name": "EnableParallelHashBuild", "restart_required": false }, { "description": "Enable Parallel Sort to improve sort performance", "type": "integer", "name": "EnableParallelSort", "restart_required": false }, { "description": "0 if the special ANY_ROW event for pattern matching is not enabled. Otherwise it is enabled.", "type": "integer", "name": "EnablePatternMatchingAnyRow", "restart_required": false }, { "description": "Enable Plan Stability Feature", "type": "integer", "name": "EnablePlanStability", "restart_required": false }, { "description": "Execute active directed queries in Plan Stability Store", "type": "integer", "name": "EnablePlanStabilityLookup", "restart_required": false }, { "description": "Query threads can be restricted to executing on specific CPUs via session resource pool attributes", "type": "integer", "name": "EnableResourcePoolCPUAffinity", "restart_required": false }, { "description": "Enable runtime task priority scheduler to allow high priority queries to use more CPU time and IO bandwidth", "type": "integer", "name": "EnableRuntimePriorityScheduler", "restart_required": false }, { "description": "Enable SSL for the server", "type": "integer", "name": "EnableSSL", "restart_required": true }, { "description": "Enable bundling data files along with their index files. Also MaxBundleableROSSizeKB is effective when this is enabled.", "type": "integer", "name": "EnableStorageBundling", "restart_required": false }, { "description": "Force casts from varchar to Time,TimeTz,Timestamp,TimestampTz,Interval to error instead of returning null", "type": "integer", "name": "EnableStrictTimeCasts", "restart_required": false }, { "description": "Allow aggregate projections using Top K / LIMIT", "type": "integer", "name": "EnableTopKProjections", "restart_required": false }, { "description": "Allow aggregate projections using UDTransforms", "type": "integer", "name": "EnableUDTProjections", "restart_required": false }, { "description": "Enable optimizations based on guarantee of uniqueness", "type": "integer", "name": "EnableUniquenessOptimization", "restart_required": false }, { "description": "If set to 1 total threads will be equal to number of virtual processors else thread will be set to total real core", "type": "integer", "name": "EnableVirtualCoreCount", "restart_required": false }, { "description": "Granularity of time to epoch mapping (seconds)", "type": "integer", "name": "EpochMapInterval", "restart_required": false }, { "description": "Warn of strings which use backslash quoting; using an E'...' escape string will avoid this warning", "type": "integer", "name": "EscapeStringWarning", "restart_required": false }, { "description": "The number of bands or clusters to form when taking samples while executing evaluating_delete_performance().", "type": "integer", "name": "EvaluateDeletePerformanceSampleStorageBandCount", "restart_required": false }, { "description": "The number of samples to take while executing evaluating_delete_performance().", "type": "integer", "name": "EvaluateDeletePerformanceSampleStorageCount", "restart_required": false }, { "description": "Exclude ephemeral nodes in SELECT queries. Default is false", "type": "integer", "name": "ExcludeEphemeralNodesInQueries", "restart_required": false }, { "description": "Maximum number of rejected/exceptions records that will be written while querying an external table. Default: 100, Unlimited: -1", "type": "integer", "name": "ExternalTablesExceptionsLimit", "restart_required": false }, { "description": "Time interval to wait before replacing a DOWN node with a STANDBY node", "type": "integer", "name": "FailoverToStandbyAfter", "restart_required": false }, { "description": "Maximum memory (in MB) for each UDx side process", "type": "string", "name": "FencedUDxMemoryLimitMB", "restart_required": false }, { "description": "Number of files (per projection) that are allowed before new ROSs are prevented (ROS pushback)", "type": "integer", "name": "FilesPerProjectionLimit", "restart_required": false }, { "description": "Multiplier to pad the observed length of fields in flex tables when casting them to regular vs long types", "type": "integer", "name": "FlexTableDataTypeGuessMultiplier", "restart_required": false }, { "description": "Default size of __raw__ column in flex tables", "type": "integer", "name": "FlexTableRawSize", "restart_required": false }, { "description": "Force all UDx's to run in fenced mode", "type": "integer", "name": "ForceUDxFencedMode", "restart_required": false }, { "description": "Ensure catalog durable on disk after each commit", "type": "integer", "name": "FsyncCatalogForLuck", "restart_required": false }, { "description": "Call fsync after each data file is written", "type": "integer", "name": "FsyncDataForLuck", "restart_required": false }, { "description": "Maximum amount of memory (in megabytes) that can be used by a single GROUP BY", "type": "string", "name": "GBHashMemCapMB", "restart_required": false }, { "description": "Enables profiling for all statements at the EE operator level", "type": "integer", "name": "GlobalEEProfiling", "restart_required": false }, { "description": "A user who'll inherit objects of dropped users. Should be unset (blank) by default (opt-in)", "type": "string", "name": "GlobalHeirUsername", "restart_required": false }, { "description": "Enables profiling for all statements", "type": "integer", "name": "GlobalQueryProfiling", "restart_required": false }, { "description": "Enables profiling for all sessions", "type": "integer", "name": "GlobalSessionProfiling", "restart_required": false }, { "description": "When enabled, the hash prepass mode of the EE GroupGenerator operator will be used", "type": "integer", "name": "GroupGeneratorHashingEnabled", "restart_required": false }, { "description": "Amount of time in seconds waiting for connection to WebHCat before abort", "type": "integer", "name": "HCatConnectionTimeout", "restart_required": false }, { "description": "Name of the HCatalog User Defined Parser", "type": "string", "name": "HCatParserName", "restart_required": false }, { "description": "Slow transfer in bytes/sec limit lower than which transfer will abort after 'HCatSlowTransferTime' amount of time", "type": "integer", "name": "HCatSlowTransferLimit", "restart_required": false }, { "description": "Amount of time allowed for transfer below slow transfer limit before abort", "type": "integer", "name": "HCatSlowTransferTime", "restart_required": false }, { "description": "Name of the HCatalog User Defined Source", "type": "string", "name": "HCatSourceName", "restart_required": false }, { "description": "Name of the HCatalog webservice, used in constructing a url to query this service", "type": "string", "name": "HCatWebserviceName", "restart_required": false }, { "description": "Version of the HCatalog webservice, used in constructing a url to query this service", "type": "string", "name": "HCatWebserviceVersion", "restart_required": false }, { "description": "Upper bound on the number of epochs kept in the epoch map", "type": "integer", "name": "HistoryRetentionEpochs", "restart_required": false }, { "description": "Number of seconds of epochs kept in the epoch map (seconds)", "type": "integer", "name": "HistoryRetentionTime", "restart_required": false }, { "description": "Path to the java binary for executing UDx written in Java", "type": "string", "name": "JavaBinaryForUDx", "restart_required": false }, { "description": "Minimum heap size (in MB) for Java UDx side process", "type": "integer", "name": "JavaSideProcessMinHeapSizeMB", "restart_required": false }, { "description": "Keep ROS Min/Max values on all columns. Enables some optimizations, at a cost of catalog space.", "type": "integer", "name": "KeepMinMaxOnAllColumns", "restart_required": false }, { "description": "SNMP event when the LGE for the node lags more than LGELagThreshold seconds behind the last epoch close time", "type": "integer", "name": "LGELagThreshold", "restart_required": false }, { "description": "Controls the maximum ROS output in data load; Negative and zero will be considerred as 1.", "type": "integer", "name": "LoadMaxFinalROSCount", "restart_required": false }, { "description": "Time to wait for a table lock before giving up (seconds)", "type": "integer", "name": "LockTimeout", "restart_required": false }, { "description": "Interval (in seconds) at which heartbeat messsages are sent to vertica.log. (Set to 0 to disable.)", "type": "integer", "name": "LogHeartbeatInterval", "restart_required": false }, { "description": "SNMP event LowDiskSpace is raised when disk utilization exceeds this percentage", "type": "integer", "name": "LowDiskSpaceWarningPct", "restart_required": false }, { "description": "Max number of columns used in auto projection segmentation expression (0 is to use all columns)", "type": "integer", "name": "MaxAutoSegColumns", "restart_required": false }, { "description": "ROS files which are smaller than this size (KB) are selected for bundling. '0' means bundling of separate ROS's is disabled, though pidx and fdb files of an individual ROS will be bundled if EnableStorageBundling is set. Maximum allowed size is 1024 (1048576 bytes)", "type": "integer", "name": "MaxBundleableROSSizeKB", "restart_required": false }, { "description": "Maximum number of client sessions; in addition five dbadmin sessions are allowed", "type": "integer", "name": "MaxClientSessions", "restart_required": false }, { "description": "Defines the no. of constraint violation checks per internal query in analyze_constraints(). Default is -1", "type": "integer", "name": "MaxConstraintChecksPerQuery", "restart_required": false }, { "description": "Maximum file size for Data Collector logs in KB", "type": "integer", "name": "MaxDataCollectorFileSize", "restart_required": false }, { "description": "Maximum desired size of an EE block (used to move tuples between operators), actual block size be larger (must have capacity for at least 2 rows)", "type": "integer", "name": "MaxDesiredEEBlockSize", "restart_required": false }, { "description": "Maximum number of DVROSes attached to a single ROS container once reached dv merge out happens", "type": "integer", "name": "MaxDVROSPerContainer", "restart_required": false }, { "description": "Maximum length of individual lines (entries) in the vertica.log log file. Longer lines are truncated. '0' means no limit.", "type": "integer", "name": "MaxLogLineLength", "restart_required": false }, { "description": "The max ROS size in MB a merge out job can produce", "type": "integer", "name": "MaxMrgOutROSSizeMB", "restart_required": false }, { "description": "Maximum amount of memory used by the Optimizer; Increasing this value may help with 'Optimizer memory use exceeds allowed limit' errors (MB)", "type": "integer", "name": "MaxOptMemMB", "restart_required": false }, { "description": "Maximum amount of memory used by the Optimizer in the context of DBD; Increasing this value may help with 'Run Database Designer with more memory or increase Database Designer memory usage limit' errors (MB)", "type": "integer", "name": "MaxOptMemMBInDBD", "restart_required": false }, { "description": "Maximum amount of memory allowed for parsing a single request; Increasing this value may help with 'Request size too big' errors (MB)", "type": "integer", "name": "MaxParsedQuerySizeMB", "restart_required": false }, { "description": "Max no of partitions per projection", "type": "integer", "name": "MaxPartitionCount", "restart_required": false }, { "description": "The number of times the system might try to re-run a query if the first run does not succeed.", "type": "integer", "name": "MaxQueryRetries", "restart_required": false }, { "description": "# of failed attempts before recovery gives up", "type": "integer", "name": "MaxRecoverErrors", "restart_required": true }, { "description": "Maximum number of historic passes made by Recovery before moving to the current pass where locks are taken", "type": "integer", "name": "MaxRecoverHistoricPasses", "restart_required": true }, { "description": "# of failed attempts before refresh gives up", "type": "integer", "name": "MaxRefreshErrors", "restart_required": false }, { "description": "Maximum number of historic passes made by Refresh before moving to the current pass where locks are taken", "type": "integer", "name": "MaxRefreshHistoricPasses", "restart_required": false }, { "description": "Maximum of number of ROSes in a stratum once reached merge out happens", "type": "integer", "name": "MaxROSPerStratum", "restart_required": false }, { "description": "Memory to allocate to per-node Merge Join", "type": "integer", "name": "MergeJoinInnerInitialMB", "restart_required": false }, { "description": "If set to true cache will be used pick the projection for mergeout", "type": "integer", "name": "MergeOutCache", "restart_required": false }, { "description": "Interval between Tuple Mover checks for mergeouts to perform (seconds)", "type": "integer", "name": "MergeOutInterval", "restart_required": false }, { "description": "Minimum free before catalog writes are refused (MB) (Default(-1) is 1GB or 2% free)", "type": "integer", "name": "MinimumCatalogDiskMegabytes", "restart_required": false }, { "description": "Minimum free before column data writes are refused (MB) (Default(-1) is 4GB or 5% free)", "type": "integer", "name": "MinimumDataDiskMegabytes", "restart_required": false }, { "description": "Minimum free before temp data writes are refused (MB) (Default(-1) is 8GB or 10% free)", "type": "integer", "name": "MinimumDataDiskTempMegabytes", "restart_required": false }, { "description": "Minimum size of the inner input to a join that will trigger the Optimizer to attempt a sort merge join (MB)", "type": "integer", "name": "MinSortMergeJoinMB", "restart_required": false }, { "description": "Interval between Tuple Mover checks for moveouts to perform (seconds)", "type": "integer", "name": "MoveOutInterval", "restart_required": false }, { "description": "The number of epochs data resides in WOS before the Tuple Mover triggers a moveout based on age", "type": "integer", "name": "MoveOutMaxAgeEpochs", "restart_required": false }, { "description": "The amount of time data resides in WOS before the Tuple Mover triggers a moveout based on age (seconds)", "type": "integer", "name": "MoveOutMaxAgeTime", "restart_required": false }, { "description": "The amount of WOS used before the Tuple Mover triggers a moveout based on utilization (percent)", "type": "integer", "name": "MoveOutSizePct", "restart_required": false }, { "description": "Memory to allocate to per-node Prepass GroupBy Operator", "type": "integer", "name": "NewEEGroupBySmallMemMB", "restart_required": false }, { "description": "Number of rows the EE will assign per thread when multiple threads are processing the same large ROS", "type": "integer", "name": "NewEEROSSubdivisionRows", "restart_required": false }, { "description": "Number of threads the EE will attempt to use for processing (per plan)", "type": "integer", "name": "NewEEThreads", "restart_required": false }, { "description": "Time to wait during shutdown, when auto recovery is not possible", "type": "integer", "name": "NoRecoverShutdownWait", "restart_required": true }, { "description": "Use as recommended by Technical Support", "type": "string", "name": "OptVerticaOptions", "restart_required": false }, { "description": "If true use a DT per local segment, even when sorting", "type": "integer", "name": "ParallelizeLocalSegmentLoad", "restart_required": false }, { "description": "Override the heap memory allocator for PCRE pattern matching library", "type": "integer", "name": "PatternMatchAllocator", "restart_required": true }, { "description": "Sets the recursion limit for PCRE used by pattern matching.", "type": "integer", "name": "PatternMatchingMatchLimitRecursion", "restart_required": false }, { "description": "The max number of rows per partition for pattern matching", "type": "integer", "name": "PatternMatchingMaxPartition", "restart_required": false }, { "description": "The max number of matches per partition for pattern matching", "type": "integer", "name": "PatternMatchingMaxPartitionMatches", "restart_required": false }, { "description": "The size of ovector for pcre_exec. Increase if using ANY_ROW and there are many subpattern groupings", "type": "integer", "name": "PatternMatchingPerMatchWorkspaceSize", "restart_required": false }, { "description": "Use JIT for PCRE regex matching in queries", "type": "integer", "name": "PatternMatchingUseJit", "restart_required": false }, { "description": "Override the stack memory allocator for PCRE pattern matching library", "type": "integer", "name": "PatternMatchStackAllocator", "restart_required": true }, { "description": "Pin Vertica to given number of CPUs (-1 = no pin)", "type": "integer", "name": "PinProcessors", "restart_required": true }, { "description": "Processor index to start with when pinning processors", "type": "integer", "name": "PinProcessorsOffset", "restart_required": true }, { "description": "If set to true, allows creating unsegmented projection using pre-excavator style", "type": "integer", "name": "PreExcavatorReplicatedProjection", "restart_required": false }, { "description": "Only load DataCollector records which satisfy time based predicates", "type": "integer", "name": "PruneDataCollectorByTime", "restart_required": false }, { "description": "Only load system table columns which participate in the query into SysWOS", "type": "integer", "name": "PruneSystemTableColumns", "restart_required": false }, { "description": "Maximum % of rows that may be deleted before Tuple Mover purges the ROS through mergeout", "type": "integer", "name": "PurgeMergeoutPercent", "restart_required": false }, { "description": "The max amount of memory MIN/MAX for RANGE moving window can use in MB", "type": "integer", "name": "RangeWindowMaxMem", "restart_required": false }, { "description": "Before starting recovery, perform cleanup of deleted files", "type": "integer", "name": "ReapBeforeRecover", "restart_required": true }, { "description": "When recovering a projection with an identical buddy from scratch recovery may be able to directly copy storage containers if this feature is enabled", "type": "integer", "name": "RecoverByContainer", "restart_required": false }, { "description": "Seconds to wait for dirty transactions before cancelling the session", "type": "integer", "name": "RecoveryDirtyTxnWait", "restart_required": false }, { "description": "Trigger moveout automatically on commit of data to WOS", "type": "integer", "name": "ReflexiveMoveout", "restart_required": false }, { "description": "When refreshing a projection from an identical buddy refresh may be able to directly copy storage containers if this feature is enabled", "type": "integer", "name": "RefreshByContainer", "restart_required": false }, { "description": "Configure the buffer size (in Byte) for remote initiator to cache query result. Value zero will turn off the feature of routing one-executor query plan to remote initator", "type": "integer", "name": "RemoteInitiatorBufSize", "restart_required": false }, { "description": "Interval between checking and removing unused database snapshots (seconds)", "type": "integer", "name": "RemoveSnapshotInterval", "restart_required": false }, { "description": "Switch Replay Delete to use the new algorithm once runtime scan statistics exceeds the threshold", "type": "integer", "name": "ReplayDeleteAlgorithmSwitchThreshold", "restart_required": false }, { "description": "Fraction of total resources that can be assigned to locally initiated requests; the remainder is only used by remote requests", "type": "integer", "name": "ResLowLimPctOfHighLim", "restart_required": true }, { "description": "Restrict non-dbadmin users from viewing system-tables", "type": "integer", "name": "RestrictSystemTables", "restart_required": true }, { "description": "Number of 64K ROS blocks that may be kept in the decompression cache (default is 4096 blocks; 256MB)", "type": "integer", "name": "ROSCacheBlocks", "restart_required": true }, { "description": "Number of large (>64K) ROS blocks that may be kept in the decompression cache (for Grouped ROSs)", "type": "integer", "name": "ROSCacheLargeBlocks", "restart_required": true }, { "description": "Maximum of number of ROSes in a stratum once reached merge out happens", "type": "integer", "name": "ROSPerStratum", "restart_required": false }, { "description": "Set the query duration threshold in microseconds to save profiling information to the dc_execution_engine_profiles table.", "type": "integer", "name": "SaveDCEEProfileThresholdUS", "restart_required": false }, { "description": "NONE (Default) MD5 and SHA512", "type": "string", "name": "SecurityAlgorithm", "restart_required": false }, { "description": "Auto projections will always be segmented. Default: true", "type": "integer", "name": "SegmentAutoProjection", "restart_required": false }, { "description": "Create range segmentation on node_name for DataCollector tables", "type": "integer", "name": "SegmentDataCollector", "restart_required": false }, { "description": "# Days. Delete closed session profiling data when data SessionProfilingAgeOut days old.", "type": "integer", "name": "SessionProfilingAgeOut", "restart_required": false }, { "description": "If the average of the rows scanned squared during a delete is higher than this limit, a warning message about the projection is printed to the conole. Disable by setting to -1.", "type": "integer", "name": "SlowDeleteConsoleWarningLimit", "restart_required": false }, { "description": "If the average of the rows scanned squared during a delete is higher than this limit, a warning message about the projection is printed to the vertica.log and saved in vs_comments. This limit is also used by evaluate_delete_performance to determine if a projection is likely to have slow delete performance. Disable by setting to -1. Console delete performance warnings will still be printed to log.", "type": "integer", "name": "SlowDeleteSystemWarningLimit", "restart_required": false }, { "description": "Size per column below which a ROS is automatically stored grouped (bytes)", "type": "integer", "name": "SmallROSSize", "restart_required": false }, { "description": "Least length of time (in seconds) a snapshot has existed for before systemTask tries to remove it", "type": "integer", "name": "SnapshotRetentionTime", "restart_required": false }, { "description": "Where Vertica sends SNMP traps - 'host_name port CommunityString'. This is a comma-separated list.", "type": "string", "name": "SnmpTrapDestinationsList", "restart_required": false }, { "description": "List of events that Vertica traps: Low Disk Space, Read Only File System, Loss of K Safety, Current Fault Tolerance at Critical Level, Too Many ROS Containers, WOS Over Flow, Node State Change, Recovery Failure, Recovery Error, Recovery Lock Error, Recovery Projection Retrieval Error, Refresh Error, Tuple Mover Error, Stale Checkpoint", "type": "string", "name": "SnmpTrapEvents", "restart_required": false }, { "description": "Enable sending of SNMP traps", "type": "integer", "name": "SnmpTrapsEnabled", "restart_required": false }, { "description": "SortCheck to generate SortCheck step in plan", "type": "integer", "name": "SortCheckOption", "restart_required": false }, { "description": "Report Level for Sort Order violation. default for error. 0 for logging, 1 for error, 2 for panic", "type": "integer", "name": "SortOrderReportLevel", "restart_required": false }, { "description": "Controls the number of Sort worker threads; 0 disables background threads", "type": "integer", "name": "SortWorkerThreads", "restart_required": false }, { "description": "The server's SSL CA certificate", "type": "string", "name": "SSLCA", "restart_required": true }, { "description": "The server's SSL certificate", "type": "string", "name": "SSLCertificate", "restart_required": true }, { "description": "The server's SSL private key", "type": "string", "name": "SSLPrivateKey", "restart_required": true }, { "description": "Disable backslash quoting in string constants; required to conform to standard SQL", "type": "integer", "name": "StandardConformingStrings", "restart_required": false }, { "description": "Sets the behavior to deal with undeclared UDx function parameters", "type": "integer", "name": "StrictUDxParameterChecking", "restart_required": false }, { "description": "Enable event trapping for Syslog", "type": "integer", "name": "SyslogEnabled", "restart_required": false }, { "description": "Low Disk Space, Read Only File System, Loss of K Safety, Current Fault Tolerance at Critical Level, Too Many ROS Containers, WOS Over Flow, Node State Change, Recovery Failure, Recovery Error, Recovery Lock Error, Recovery Projection Retrieval Error, Refresh Error, Tuple Mover Error, Stale Checkpoint", "type": "string", "name": "SyslogEvents", "restart_required": false }, { "description": "auth, uucp, authpriv (Linux only) local0, cron, local1, daemon, local2, ftp (Linux only) local3, lpr, local4, mail, local5, news, local6, user (default system) local7", "type": "string", "name": "SyslogFacility", "restart_required": false }, { "description": "Interval between system resource utilization monitoring checks (seconds)", "type": "integer", "name": "SystemMonitorInterval", "restart_required": false }, { "description": "A message is logged if a monitored system resource changes by at least SystemMonitorThreshold (percent)", "type": "integer", "name": "SystemMonitorThreshold", "restart_required": false }, { "description": "Turn on terraced (multi-level) data network routing if it would reduce stream count by this factor", "type": "integer", "name": "TerraceRoutingFactor", "restart_required": false }, { "description": "When updating the index to account for deleted records in the source table, pre-compute the tokens for the deleted records. This takes time, but speeds up the actual deletion.", "type": "integer", "name": "TextIndexComputeDeletedTokens", "restart_required": false }, { "description": "Default maximum size of tokens stored for text-indexing. Only affects new indices. Larger values will decrease the performance of the index.", "type": "integer", "name": "TextIndexMaxTokenLength", "restart_required": false }, { "description": "The max amount of memory TopK(Heap) can use in MB", "type": "integer", "name": "TopKHeapMaxMem", "restart_required": false }, { "description": "READ COMMITTED (Default) - Last epoch for reads and current epoch for writes. SERIALIZABLE - Current epoch for reads and writes", "type": "string", "name": "TransactionIsolationLevel", "restart_required": false }, { "description": "Determines whether the transaction is read/write or read-only. Read/write is the default", "type": "string", "name": "TransactionMode", "restart_required": false }, { "description": "Trust pk and unique constraint as guarantee of uniqueness", "type": "integer", "name": "TrustConstraintsAsUnique", "restart_required": false }, { "description": "Number of seconds to wait for UDx to finish a block of data before give up", "type": "integer", "name": "UDxFencedBlockTimeout", "restart_required": false }, { "description": "Number of seconds to wait for UDx to finish cancel related clean up work", "type": "integer", "name": "UDxFencedCancelTimeout", "restart_required": false }, { "description": "Number of seconds to wait for external procedures in UDx fenced mode", "type": "integer", "name": "UDxFencedExternalProcedureTimeout", "restart_required": false }, { "description": "Optimizer uses modular hash for resegmenting intermediate results.", "type": "integer", "name": "UseModularHashForReseg", "restart_required": false }, { "description": "Optimizer only considers redistribution choices which are cost model resilient.", "type": "integer", "name": "UseOnlyResilientRedistribution", "restart_required": false }, { "description": "Include virtual table data from recovering nodes in monitoring query results", "type": "integer", "name": "UseRecoveringNodesInVirtualTableQueries", "restart_required": false }, { "description": "Use safer decompression scheme to reduce the chance of crashes if disk data has been corrupted", "type": "integer", "name": "UseSafeDecompression", "restart_required": false }, { "description": "Use 5.0-style truncating integer division for the '/' operator", "type": "integer", "name": "UseV50IntegerDivision", "restart_required": false }, { "description": "Execute the external procedures in the zygote process", "type": "integer", "name": "UseZygoteForExternalProcedures", "restart_required": false }, { "description": "Create a log entry each time a client opens a connection and closes it immediately; load balancers often do this for a health check", "type": "integer", "name": "WarnOnIncompleteStartupPacket", "restart_required": false }, { "description": "Use materialization strategy to support the WITH clause", "type": "integer", "name": "WithClauseMaterialization", "restart_required": false } ] } trove-5.0.0/trove/templates/couchbase/0000775000567000056710000000000012701410521021076 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/couchbase/override.config.template0000664000567000056710000000000012701410316025706 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/couchbase/config.template0000664000567000056710000000000012701410316024070 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/postgresql/0000775000567000056710000000000012701410521021345 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/postgresql/override.config.template0000664000567000056710000000011712701410316026166 0ustar jenkinsjenkins00000000000000{% for key, value in overrides.iteritems() -%} {{key}} = {{value}} {% endfor %}trove-5.0.0/trove/templates/postgresql/config.template0000664000567000056710000005450112701410316024356 0ustar jenkinsjenkins00000000000000# Pre-compute values used by the template expressions. # Note: The variables have to be in lists due to how scoping works in JINJA templates. # # The recommended amount for 'shared_buffers' on a dedicated database server is 25% of RAM. # Servers with less than 3GB of RAM require a more conservative value to save memory for other processes. {% set shared_buffers_mb = [(0.25 if flavor['ram'] >= 3072 else 0.10) * flavor['ram']] %} # # ----------------------------- # PostgreSQL configuration file # ----------------------------- # # This file consists of lines of the form: # # name = value # # (The "=" is optional.) Whitespace may be used. Comments are introduced with # "#" anywhere on a line. The complete list of parameter names and allowed # values can be found in the PostgreSQL documentation. # # The commented-out settings shown in this file represent the default values. # Re-commenting a setting is NOT sufficient to revert it to the default value; # you need to reload the server. # # This file is read on server startup and when the server receives a SIGHUP # signal. If you edit the file on a running system, you have to SIGHUP the # server for the changes to take effect, or use "pg_ctl reload". Some # parameters, which are marked below, require a server shutdown and restart to # take effect. # # Any parameter can also be given as a command-line option to the server, e.g., # "postgres -c log_connections=on". Some parameters can be changed at run time # with the "SET" SQL command. # # Memory units: kB = kilobytes Time units: ms = milliseconds # MB = megabytes s = seconds # GB = gigabytes min = minutes # TB = terabytes h = hours # d = days # # The properties marked as controlled by Trove are managed by the Trove # guest-agent. Any changes to them will be overwritten. #------------------------------------------------------------------------------ # FILE LOCATIONS #------------------------------------------------------------------------------ # The default values of these variables are driven from the -D command-line # option or PGDATA environment variable, represented here as ConfigDir. #data_directory = 'ConfigDir' # use data in another directory # (change requires restart) # (controlled by Trove) #hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file # (change requires restart) # (controlled by Trove) #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file # (change requires restart) # (controlled by Trove) # If external_pid_file is not explicitly set, no extra PID file is written. #external_pid_file = '' # write an extra PID file # (change requires restart) # (controlled by Trove) #------------------------------------------------------------------------------ # CONNECTIONS AND AUTHENTICATION #------------------------------------------------------------------------------ # - Connection Settings - #listen_addresses = 'localhost' # what IP address(es) to listen on; # comma-separated list of addresses; # defaults to 'localhost'; use '*' for all # (change requires restart) # (controlled by Trove) #port = 5432 # (change requires restart) # (controlled by Trove) #max_connections = 100 # (change requires restart) # Note: Increasing max_connections costs ~400 bytes of shared memory per # connection slot, plus lock space (see max_locks_per_transaction). #superuser_reserved_connections = 3 # (change requires restart) #unix_socket_directories = '/tmp' # comma-separated list of directories # (change requires restart) # (controlled by Trove) #unix_socket_group = '' # (change requires restart) # (controlled by Trove) #unix_socket_permissions = 0777 # begin with 0 to use octal notation # (change requires restart) # (controlled by Trove) #bonjour = off # advertise server via Bonjour # (change requires restart) #bonjour_name = '' # defaults to the computer name # (change requires restart) # - Security and Authentication - #authentication_timeout = 1min # 1s-600s #ssl = off # (change requires restart) #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers # (change requires restart) #ssl_prefer_server_ciphers = on # (change requires restart) #ssl_ecdh_curve = 'prime256v1' # (change requires restart) #ssl_renegotiation_limit = 0 # amount of data between renegotiations #ssl_cert_file = 'server.crt' # (change requires restart) #ssl_key_file = 'server.key' # (change requires restart) #ssl_ca_file = '' # (change requires restart) #ssl_crl_file = '' # (change requires restart) #password_encryption = on #db_user_namespace = off # GSSAPI using Kerberos #krb_server_keyfile = '' #krb_caseins_users = off # - TCP Keepalives - # see "man 7 tcp" for details #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; # 0 selects the system default #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; # 0 selects the system default #tcp_keepalives_count = 0 # TCP_KEEPCNT; # 0 selects the system default #------------------------------------------------------------------------------ # RESOURCE USAGE (except WAL) #------------------------------------------------------------------------------ # - Memory - shared_buffers = {{ shared_buffers_mb[0]|int }}MB # min 128kB # (change requires restart) #huge_pages = try # on, off, or try # (change requires restart) #temp_buffers = 8MB # min 800kB #max_prepared_transactions = 0 # zero disables the feature # (change requires restart) # Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory # per transaction slot, plus lock space (see max_locks_per_transaction). # It is not advisable to set max_prepared_transactions nonzero unless you # actively intend to use prepared transactions. #work_mem = 4MB # min 64kB #maintenance_work_mem = 64MB # min 1MB #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem max_stack_depth = 7MB # min 100kB # The ideal value is the actual limit enforced # by the OS (8MB on 64-bit flavors) less a safety # margin of 1MB or so. #dynamic_shared_memory_type = posix # the default is the first option # supported by the operating system: # posix # sysv # windows # mmap # use none to disable dynamic shared memory # - Disk - #temp_file_limit = -1 # limits per-session temp file space # in kB, or -1 for no limit # - Kernel Resource Usage - #max_files_per_process = 1000 # min 25 # (change requires restart) #shared_preload_libraries = '' # (change requires restart) # - Cost-Based Vacuum Delay - #vacuum_cost_delay = 0 # 0-100 milliseconds #vacuum_cost_page_hit = 1 # 0-10000 credits #vacuum_cost_page_miss = 10 # 0-10000 credits #vacuum_cost_page_dirty = 20 # 0-10000 credits #vacuum_cost_limit = 200 # 1-10000 credits # - Background Writer - #bgwriter_delay = 200ms # 10-10000ms between rounds #bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round #bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round # - Asynchronous Behavior - #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching #max_worker_processes = 8 #------------------------------------------------------------------------------ # WRITE AHEAD LOG #------------------------------------------------------------------------------ # - Settings - wal_level = minimal # minimal, archive, hot_standby, or logical # (change requires restart) # (controlled by Trove) #fsync = on # turns forced synchronization on or off #synchronous_commit = on # synchronization level; # off, local, remote_write, or on #wal_sync_method = fsync # the default is the first option # supported by the operating system: # open_datasync # fdatasync (default on Linux) # fsync # fsync_writethrough # open_sync #full_page_writes = on # recover from partial page writes #wal_log_hints = off # also do full page writes of non-critical updates # (change requires restart) #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers # (change requires restart) #wal_writer_delay = 200ms # 1-10000 milliseconds #commit_delay = 0 # range 0-100000, in microseconds #commit_siblings = 5 # range 1-1000 # - Checkpoints - checkpoint_segments = {{ (shared_buffers_mb[0] / 16 + 1)|int }} # in logfile segments, min 1, 16MB each # Each segment is normally 16MB long. # The number of segments should be enough to # span the 'shared_buffers' size. # We set the default to (shared_buffers / 16 + 1). #checkpoint_timeout = 5min # range 30s-1h #checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 #checkpoint_warning = 30s # 0 disables # - Archiving - archive_mode = off # allows archiving to be done # (change requires restart) # (controlled by Trove) #archive_command = '' # command to use to archive a logfile segment # placeholders: %p = path of file to archive # %f = file name only # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' # (controlled by Trove) #archive_timeout = 0 # force a logfile segment switch after this # number of seconds; 0 disables # (controlled by Trove) #------------------------------------------------------------------------------ # REPLICATION #------------------------------------------------------------------------------ # - Sending Server(s) - # Set these on the master and on any standby that will send replication data. #max_wal_senders = 0 # max number of walsender processes # (change requires restart) #wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables #wal_sender_timeout = 60s # in milliseconds; 0 disables #max_replication_slots = 0 # max number of replication slots # (change requires restart) # - Master Server - # These settings are ignored on a standby server. #synchronous_standby_names = '' # standby servers that provide sync rep # comma-separated list of application_name # from standby(s); '*' = all #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed # - Standby Servers - # These settings are ignored on a master server. #hot_standby = off # "on" allows queries during recovery # (change requires restart) #max_standby_archive_delay = 30s # max delay before canceling queries # when reading WAL from archive; # -1 allows indefinite delay #max_standby_streaming_delay = 30s # max delay before canceling queries # when reading streaming WAL; # -1 allows indefinite delay #wal_receiver_status_interval = 10s # send replies at least this often # 0 disables #hot_standby_feedback = off # send info from standby to prevent # query conflicts #wal_receiver_timeout = 60s # time that receiver waits for # communication from master # in milliseconds; 0 disables #------------------------------------------------------------------------------ # QUERY TUNING #------------------------------------------------------------------------------ # - Planner Method Configuration - #enable_bitmapscan = on #enable_hashagg = on #enable_hashjoin = on #enable_indexscan = on #enable_indexonlyscan = on #enable_material = on #enable_mergejoin = on #enable_nestloop = on #enable_seqscan = on #enable_sort = on #enable_tidscan = on # - Planner Cost Constants - #seq_page_cost = 1.0 # measured on an arbitrary scale #random_page_cost = 4.0 # same scale as above #cpu_tuple_cost = 0.01 # same scale as above #cpu_index_tuple_cost = 0.005 # same scale as above #cpu_operator_cost = 0.0025 # same scale as above effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the amount of available RAM # less the minimum required for other processes or 512MB. # - Genetic Query Optimizer - #geqo = on #geqo_threshold = 12 #geqo_effort = 5 # range 1-10 #geqo_pool_size = 0 # selects default based on effort #geqo_generations = 0 # selects default based on effort #geqo_selection_bias = 2.0 # range 1.5-2.0 #geqo_seed = 0.0 # range 0.0-1.0 # - Other Planner Options - #default_statistics_target = 100 # range 1-10000 #constraint_exclusion = partition # on, off, or partition #cursor_tuple_fraction = 0.1 # range 0.0-1.0 #from_collapse_limit = 8 #join_collapse_limit = 8 # 1 disables collapsing of explicit # JOIN clauses #------------------------------------------------------------------------------ # ERROR REPORTING AND LOGGING #------------------------------------------------------------------------------ # - Where to Log - #log_destination = 'stderr' # Valid values are combinations of # stderr, csvlog, syslog, and eventlog, # depending on platform. csvlog # requires logging_collector to be on. # (controlled by Trove) # This is used when logging to stderr: #logging_collector = off # Enable capturing of stderr and csvlog # into log files. Required to be on for # csvlogs. # (change requires restart) # (controlled by Trove) # These are only used if logging_collector is on: #log_directory = 'pg_log' # directory where log files are written, # can be absolute or relative to PGDATA # (controlled by Trove) #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, # can include strftime() escapes #log_file_mode = 0600 # creation mode for log files, # begin with 0 to use octal notation # (controlled by Trove) #log_truncate_on_rotation = off # If on, an existing log file with the # same name as the new log file will be # truncated rather than appended to. # But such truncation only occurs on # time-driven rotation, not on restarts # or size-driven rotation. Default is # off, meaning append to existing files # in all cases. #log_rotation_age = 1d # Automatic rotation of logfiles will # happen after that time. 0 disables. #log_rotation_size = 10MB # Automatic rotation of logfiles will # happen after that much log output. # 0 disables. # These are relevant when logging to syslog: #syslog_facility = 'LOCAL0' #syslog_ident = 'postgres' # This is only relevant when logging to eventlog (win32): #event_source = 'PostgreSQL' # - When to Log - #client_min_messages = notice # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # log # notice # warning # error #log_min_messages = warning # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # info # notice # warning # error # log # fatal # panic #log_min_error_statement = error # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # info # notice # warning # error # log # fatal # panic (effectively off) #log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements # and their durations, > 0 logs only # statements running at least this number # of milliseconds # - What to Log - #debug_print_parse = off #debug_print_rewritten = off #debug_print_plan = off #debug_pretty_print = on #log_checkpoints = off #log_connections = off #log_disconnections = off #log_duration = off #log_error_verbosity = default # terse, default, or verbose messages #log_hostname = off #log_line_prefix = '' # special values: # %a = application name # %u = user name # %d = database name # %r = remote host and port # %h = remote host # %p = process ID # %t = timestamp without milliseconds # %m = timestamp with milliseconds # %i = command tag # %e = SQL state # %c = session ID # %l = session line number # %s = session start timestamp # %v = virtual transaction ID # %x = transaction ID (0 if none) # %q = stop here in non-session # processes # %% = '%' # e.g. '<%u%%%d> ' #log_lock_waits = off # log lock waits >= deadlock_timeout #log_statement = 'none' # none, ddl, mod, all #log_temp_files = -1 # log temporary files equal or larger # than the specified size in kilobytes; # -1 disables, 0 logs all temp files #log_timezone = 'GMT' #------------------------------------------------------------------------------ # RUNTIME STATISTICS #------------------------------------------------------------------------------ # - Query/Index Statistics Collector - #track_activities = on #track_counts = on #track_io_timing = off #track_functions = none # none, pl, all #track_activity_query_size = 1024 # (change requires restart) update_process_title = off # (controlled by Trove) #stats_temp_directory = 'pg_stat_tmp' # - Statistics Monitoring - #log_parser_stats = off #log_planner_stats = off #log_executor_stats = off #log_statement_stats = off #------------------------------------------------------------------------------ # AUTOVACUUM PARAMETERS #------------------------------------------------------------------------------ #autovacuum = on # Enable autovacuum subprocess? 'on' # requires track_counts to also be on. #log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and # their durations, > 0 logs only # actions running at least this number # of milliseconds. #autovacuum_max_workers = 3 # max number of autovacuum subprocesses # (change requires restart) #autovacuum_naptime = 1min # time between autovacuum runs #autovacuum_vacuum_threshold = 50 # min number of row updates before # vacuum #autovacuum_analyze_threshold = 50 # min number of row updates before # analyze #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum # (change requires restart) #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age # before forced vacuum # (change requires restart) #autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for # autovacuum, in milliseconds; # -1 means use vacuum_cost_delay #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for # autovacuum, -1 means use # vacuum_cost_limit #------------------------------------------------------------------------------ # CLIENT CONNECTION DEFAULTS #------------------------------------------------------------------------------ # - Statement Behavior - #search_path = '"$user",public' # schema names #default_tablespace = '' # a tablespace name, '' uses the default #temp_tablespaces = '' # a list of tablespace names, '' uses # only default tablespace #check_function_bodies = on #default_transaction_isolation = 'read committed' #default_transaction_read_only = off #default_transaction_deferrable = off #session_replication_role = 'origin' #statement_timeout = 0 # in milliseconds, 0 is disabled #lock_timeout = 0 # in milliseconds, 0 is disabled #vacuum_freeze_min_age = 50000000 #vacuum_freeze_table_age = 150000000 #vacuum_multixact_freeze_min_age = 5000000 #vacuum_multixact_freeze_table_age = 150000000 #bytea_output = 'hex' # hex, escape #xmlbinary = 'base64' #xmloption = 'content' # - Locale and Formatting - #datestyle = 'iso, mdy' #intervalstyle = 'postgres' #timezone = 'GMT' #timezone_abbreviations = 'Default' # Select the set of available time zone # abbreviations. Currently, there are # Default # Australia (historical usage) # India # You can create your own file in # share/timezonesets/. #extra_float_digits = 0 # min -15, max 3 #client_encoding = sql_ascii # actually, defaults to database # encoding # These settings are initialized by initdb, but they can be changed. #lc_messages = 'C' # locale for system error message # strings #lc_monetary = 'C' # locale for monetary formatting #lc_numeric = 'C' # locale for number formatting #lc_time = 'C' # locale for time formatting # default configuration for text search #default_text_search_config = 'pg_catalog.simple' # - Other Defaults - #dynamic_library_path = '$libdir' #local_preload_libraries = '' #session_preload_libraries = '' #------------------------------------------------------------------------------ # LOCK MANAGEMENT #------------------------------------------------------------------------------ #deadlock_timeout = 1s #max_locks_per_transaction = 64 # min 10 # (change requires restart) # Note: Each lock table slot uses ~270 bytes of shared memory, and there are # max_locks_per_transaction * (max_connections + max_prepared_transactions) # lock table slots. #max_pred_locks_per_transaction = 64 # min 10 # (change requires restart) #------------------------------------------------------------------------------ # VERSION/PLATFORM COMPATIBILITY #------------------------------------------------------------------------------ # - Previous PostgreSQL Versions - #array_nulls = on #backslash_quote = safe_encoding # on, off, or safe_encoding #default_with_oids = off #escape_string_warning = on #lo_compat_privileges = off #quote_all_identifiers = off #sql_inheritance = on #standard_conforming_strings = on #synchronize_seqscans = on # - Other Platforms and Clients - #transform_null_equals = off #------------------------------------------------------------------------------ # ERROR HANDLING #------------------------------------------------------------------------------ #exit_on_error = off # terminate session on any error? #restart_after_crash = on # reinitialize after backend crash? #------------------------------------------------------------------------------ # CONFIG FILE INCLUDES #------------------------------------------------------------------------------ # These options allow settings to be loaded from files other than the # default postgresql.conf. #include_dir = 'conf.d' # include files ending in '.conf' from # directory 'conf.d' #include_if_exists = 'exists.conf' # include file only if it exists #include = 'special.conf' # include file #------------------------------------------------------------------------------ # CUSTOMIZED OPTIONS #------------------------------------------------------------------------------ # Add settings for extensions heretrove-5.0.0/trove/templates/postgresql/validation-rules.json0000664000567000056710000005570312701410316025536 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "name": "max_connections", "restart_required": true, "min": 0, "type": "integer" }, { "name": "superuser_reserved_connections", "restart_required": true, "min": 1, "type": "integer" }, { "name": "bonjour", "restart_required": true, "type": "boolean" }, { "name": "bonjour_name", "restart_required": true, "type": "string" }, { "name": "authentication_timeout", "restart_required": false, "type": "string" }, { "name": "password_encryption", "restart_required": false, "type": "boolean" }, { "name": "db_user_namespace", "restart_required": false, "type": "boolean" }, { "name": "tcp_keepalives_idle", "restart_required": false, "min": 0, "type": "integer" }, { "name": "tcp_keepalives_interval", "restart_required": false, "min": 0, "type": "integer" }, { "name": "tcp_keepalives_count", "restart_required": false, "min": 0, "type": "integer" }, { "name": "shared_buffers", "restart_required": true, "type": "string" }, { "name": "huge_pages", "restart_required": true, "type": "string" }, { "name": "temp_buffers", "restart_required": false, "type": "string" }, { "name": "max_prepared_transactions", "restart_required": true, "min": 0, "type": "integer" }, { "name": "work_mem", "restart_required": false, "type": "string" }, { "name": "maintenance_work_mem", "restart_required": false, "type": "string" }, { "name": "autovacuum_work_mem", "restart_required": false, "min": -1, "type": "integer" }, { "name": "max_stack_depth", "restart_required": false, "type": "string" }, { "name": "dynamic_shared_memory_type", "restart_required": false, "type": "string" }, { "name": "temp_file_limit", "restart_required": false, "min": -1, "type": "integer" }, { "name": "max_files_per_process", "restart_required": true, "min": 0, "type": "integer" }, { "name": "vacuum_cost_delay", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_cost_page_hit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_cost_page_miss", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_cost_page_dirty", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_cost_limit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "bgwriter_delay", "restart_required": false, "type": "string" }, { "name": "bgwriter_lru_maxpages", "restart_required": false, "min": 0, "type": "integer" }, { "name": "bgwriter_lru_multiplier", "restart_required": false, "min": 0, "type": "integer" }, { "name": "effective_io_concurrency", "restart_required": false, "min": 0, "type": "integer" }, { "name": "max_worker_processes", "restart_required": false, "min": 0, "type": "integer" }, { "name": "fsync", "restart_required": false, "type": "boolean" }, { "name": "synchronous_commit", "restart_required": false, "type": "boolean" }, { "name": "wal_sync_method", "restart_required": false, "type": "string" }, { "name": "full_page_writes", "restart_required": false, "type": "boolean" }, { "name": "wal_log_hints", "restart_required": true, "type": "boolean" }, { "name": "wal_buffers", "restart_required": true, "min": -1, "type": "integer" }, { "name": "wal_writer_delay", "restart_required": false, "type": "string" }, { "name": "commit_delay", "restart_required": false, "min": 0, "type": "integer" }, { "name": "commit_siblings", "restart_required": false, "min": 0, "type": "integer" }, { "name": "checkpoint_segments", "restart_required": false, "min": 0, "type": "integer" }, { "name": "checkpoint_timeout", "restart_required": false, "type": "string" }, { "name": "checkpoint_completion_target", "restart_required": false, "type": "string" }, { "name": "checkpoint_warning", "restart_required": false, "type": "string" }, { "name": "wal_keep_segments", "restart_required": false, "min": 0, "type": "integer" }, { "name": "wal_sender_timeout", "restart_required": false, "type": "string" }, { "name": "synchronous_standby_names", "restart_required": false, "type": "string" }, { "name": "vacuum_defer_cleanup_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "hot_standby", "restart_required": true, "type": "boolean" }, { "name": "max_standby_archive_delay", "restart_required": false, "type": "string" }, { "name": "max_standby_streaming_delay", "restart_required": false, "type": "string" }, { "name": "wal_receiver_status_interval", "restart_required": false, "type": "string" }, { "name": "hot_standby_feedback", "restart_required": false, "type": "boolean" }, { "name": "wal_receiver_timeout", "restart_required": false, "type": "string" }, { "name": "enable_bitmapscan", "restart_required": false, "type": "boolean" }, { "name": "enable_hashagg", "restart_required": false, "type": "boolean" }, { "name": "enable_hashjoin", "restart_required": false, "type": "boolean" }, { "name": "enable_indexscan", "restart_required": false, "type": "boolean" }, { "name": "enable_indexonlyscan", "restart_required": false, "type": "boolean" }, { "name": "enable_material", "restart_required": false, "type": "boolean" }, { "name": "enable_mergejoin", "restart_required": false, "type": "boolean" }, { "name": "enable_nestloop", "restart_required": false, "type": "boolean" }, { "name": "enable_seqscan", "restart_required": false, "type": "boolean" }, { "name": "enable_sort", "restart_required": false, "type": "boolean" }, { "name": "enable_tidscan", "restart_required": false, "type": "boolean" }, { "name": "seq_page_cost", "restart_required": false, "min": 0, "type": "integer" }, { "name": "random_page_cost", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cpu_tuple_cost", "restart_required": false, "type": "string" }, { "name": "cpu_index_tuple_cost", "restart_required": false, "type": "string" }, { "name": "cpu_operator_cost", "restart_required": false, "type": "string" }, { "name": "effective_cache_size", "restart_required": false, "type": "string" }, { "name": "geqo", "restart_required": false, "type": "boolean" }, { "name": "geqo_threshold", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_effort", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_pool_size", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_generations", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_selection_bias", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_seed", "restart_required": false, "min": 0, "type": "integer" }, { "name": "default_statistics_target", "restart_required": false, "min": 0, "type": "integer" }, { "name": "constraint_exclusion", "restart_required": false, "type": "string" }, { "name": "cursor_tuple_fraction", "restart_required": false, "type": "string" }, { "name": "from_collapse_limit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "join_collapse_limit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "log_truncate_on_rotation", "restart_required": false, "type": "boolean" }, { "name": "log_rotation_age", "restart_required": false, "type": "string" }, { "name": "log_rotation_size", "restart_required": false, "type": "string" }, { "name": "client_min_messages", "restart_required": false, "type": "string" }, { "name": "log_min_messages", "restart_required": false, "type": "string" }, { "name": "log_min_error_statement", "restart_required": false, "type": "string" }, { "name": "log_min_duration_statement", "restart_required": false, "min": -1, "type": "integer" }, { "name": "debug_print_parse", "restart_required": false, "type": "boolean" }, { "name": "debug_print_rewritten", "restart_required": false, "type": "boolean" }, { "name": "debug_print_plan", "restart_required": false, "type": "boolean" }, { "name": "debug_pretty_print", "restart_required": false, "type": "boolean" }, { "name": "log_checkpoints", "restart_required": false, "type": "boolean" }, { "name": "log_connections", "restart_required": false, "type": "boolean" }, { "name": "log_disconnections", "restart_required": false, "type": "boolean" }, { "name": "log_duration", "restart_required": false, "type": "boolean" }, { "name": "log_error_verbosity", "restart_required": false, "type": "string" }, { "name": "log_hostname", "restart_required": false, "type": "boolean" }, { "name": "log_line_prefix", "restart_required": false, "type": "string" }, { "name": "log_lock_waits", "restart_required": false, "type": "boolean" }, { "name": "log_statement", "restart_required": false, "type": "string" }, { "name": "log_temp_files", "restart_required": false, "min": -1, "type": "integer" }, { "name": "log_timezone", "restart_required": false, "type": "string" }, { "name": "track_activities", "restart_required": false, "type": "boolean" }, { "name": "track_counts", "restart_required": false, "type": "boolean" }, { "name": "track_io_timing", "restart_required": false, "type": "boolean" }, { "name": "track_functions", "restart_required": false, "type": "string" }, { "name": "track_activity_query_size", "restart_required": true, "min": 0, "type": "integer" }, { "name": "log_parser_stats", "restart_required": false, "type": "boolean" }, { "name": "log_planner_stats", "restart_required": false, "type": "boolean" }, { "name": "log_executor_stats", "restart_required": false, "type": "boolean" }, { "name": "log_statement_stats", "restart_required": false, "type": "boolean" }, { "name": "autovacuum", "restart_required": false, "type": "boolean" }, { "name": "log_autovacuum_min_duration", "restart_required": false, "min": -1, "type": "integer" }, { "name": "autovacuum_max_workers", "restart_required": true, "min": 0, "type": "integer" }, { "name": "autovacuum_naptime", "restart_required": false, "type": "string" }, { "name": "autovacuum_vacuum_threshold", "restart_required": false, "min": 0, "type": "integer" }, { "name": "autovacuum_analyze_threshold", "restart_required": false, "min": 0, "type": "integer" }, { "name": "autovacuum_vacuum_scale_factor", "restart_required": false, "type": "string" }, { "name": "autovacuum_analyze_scale_factor", "restart_required": false, "type": "string" }, { "name": "autovacuum_freeze_max_age", "restart_required": true, "min": 0, "type": "integer" }, { "name": "autovacuum_multixact_freeze_max_age", "restart_required": true, "min": 0, "type": "integer" }, { "name": "autovacuum_vacuum_cost_delay", "restart_required": false, "type": "string" }, { "name": "autovacuum_vacuum_cost_limit", "restart_required": false, "min": -1, "type": "integer" }, { "name": "search_path", "restart_required": false, "type": "string" }, { "name": "default_tablespace", "restart_required": false, "type": "string" }, { "name": "temp_tablespaces", "restart_required": false, "type": "string" }, { "name": "check_function_bodies", "restart_required": false, "type": "boolean" }, { "name": "default_transaction_isolation", "restart_required": false, "type": "string" }, { "name": "default_transaction_read_only", "restart_required": false, "type": "boolean" }, { "name": "default_transaction_deferrable", "restart_required": false, "type": "boolean" }, { "name": "session_replication_role", "restart_required": false, "type": "string" }, { "name": "statement_timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "lock_timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_freeze_min_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_freeze_table_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_multixact_freeze_min_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_multixact_freeze_table_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "bytea_output", "restart_required": false, "type": "string" }, { "name": "xmlbinary", "restart_required": false, "type": "string" }, { "name": "xmloption", "restart_required": false, "type": "string" }, { "name": "datestyle", "restart_required": false, "type": "string" }, { "name": "intervalstyle", "restart_required": false, "type": "string" }, { "name": "timezone", "restart_required": false, "type": "string" }, { "name": "timezone_abbreviations", "restart_required": false, "type": "string" }, { "name": "extra_float_digits", "restart_required": false, "min": 0, "type": "integer" }, { "name": "client_encoding", "restart_required": false, "type": "string" }, { "name": "lc_messages", "restart_required": false, "type": "string" }, { "name": "lc_monetary", "restart_required": false, "type": "string" }, { "name": "lc_numeric", "restart_required": false, "type": "string" }, { "name": "lc_time", "restart_required": false, "type": "string" }, { "name": "default_text_search_config", "restart_required": false, "type": "string" }, { "name": "deadlock_timeout", "restart_required": false, "type": "string" }, { "name": "max_locks_per_transaction", "restart_required": true, "min": 0, "type": "integer" }, { "name": "max_pred_locks_per_transaction", "restart_required": true, "min": 0, "type": "integer" }, { "name": "array_nulls", "restart_required": false, "type": "boolean" }, { "name": "backslash_quote", "restart_required": false, "type": "string" }, { "name": "default_with_oids", "restart_required": false, "type": "boolean" }, { "name": "escape_string_warning", "restart_required": false, "type": "boolean" }, { "name": "lo_compat_privileges", "restart_required": false, "type": "boolean" }, { "name": "quote_all_identifiers", "restart_required": false, "type": "boolean" }, { "name": "sql_inheritance", "restart_required": false, "type": "boolean" }, { "name": "standard_conforming_strings", "restart_required": false, "type": "boolean" }, { "name": "synchronize_seqscans", "restart_required": false, "type": "boolean" }, { "name": "transform_null_equals", "restart_required": false, "type": "boolean" }, { "name": "exit_on_error", "restart_required": false, "type": "boolean" }, { "name": "restart_after_crash", "restart_required": false, "type": "boolean" } ] } trove-5.0.0/trove/templates/couchdb/0000775000567000056710000000000012701410521020551 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/couchdb/override.config.template0000664000567000056710000000000012701410316025361 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/couchdb/config.template0000664000567000056710000000000012701410316023543 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/mysql/0000775000567000056710000000000012701410521020307 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/mysql/replica.config.template0000664000567000056710000000041312701410316024727 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log relay_log_info_repository = TABLE relay_log_recovery = 1 relay_log_purge = 1 log_slave_updates = ON enforce_gtid_consistency = ON gtid_mode = ON read_only = true trove-5.0.0/trove/templates/mysql/override.config.template0000664000567000056710000000033712701410316025134 0ustar jenkinsjenkins00000000000000[mysqld] {% for key, value in overrides.iteritems() -%} {%- if value == True -%} {{key}} = 1 {%- elif value == False -%} {{key}} = 0 {%- elif value == "" -%} {{key}} {%- else -%} {{key}}={{value}} {%- endif %} {% endfor %} trove-5.0.0/trove/templates/mysql/mysql-test/0000775000567000056710000000000012701410521022431 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/mysql/mysql-test/config.template0000664000567000056710000000001312701410316025427 0ustar jenkinsjenkins00000000000000hyper = 0M trove-5.0.0/trove/templates/mysql/config.template0000664000567000056710000000274212701410316023320 0ustar jenkinsjenkins00000000000000[client] port = 3306 [mysqld_safe] nice = 0 [mysqld] user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data ####tmpdir = /tmp tmpdir = /var/tmp pid_file = /var/run/mysqld/mysqld.pid skip-external-locking = 1 key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K thread_stack = 192K thread_cache_size = {{ (4 * flavor['ram']/512)|int }} myisam-recover = BACKUP query_cache_type = 1 query_cache_limit = 1M query_cache_size = {{ (8 * flavor['ram']/512)|int }}M innodb_data_file_path = ibdata1:10M:autoextend innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M innodb_file_per_table = 1 innodb_log_files_in_group = 2 innodb_log_file_size=50M innodb_log_buffer_size=25M connect_timeout = 15 wait_timeout = 120 join_buffer_size = 1M read_buffer_size = 512K read_rnd_buffer_size = 512K sort_buffer_size = 1M tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M table_open_cache = {{ (256 * flavor['ram']/512)|int }} table_definition_cache = {{ (256 * flavor['ram']/512)|int }} open_files_limit = {{ (512 * flavor['ram']/512)|int }} max_user_connections = {{ (100 * flavor['ram']/512)|int }} max_connections = {{ (100 * flavor['ram']/512)|int }} default_storage_engine = innodb local-infile = 0 server_id = {{server_id}} performance_schema = ON [mysqldump] quick = 1 quote-names = 1 max_allowed_packet = 16M [isamchk] key_buffer = 16M !includedir /etc/mysql/conf.d/ trove-5.0.0/trove/templates/mysql/5.5/0000775000567000056710000000000012701410521020616 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/mysql/5.5/replica.config.template0000664000567000056710000000017212701410316025240 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log read_only = true trove-5.0.0/trove/templates/mysql/5.5/replica_source.config.template0000664000567000056710000000006512701410316026621 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log trove-5.0.0/trove/templates/mysql/validation-rules.json0000664000567000056710000001405512701410316024473 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "name": "innodb_file_per_table", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "autocommit", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "local_infile", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "lower_case_table_names", "restart_required": true, "max": 2, "min": 0, "type": "integer" }, { "name": "key_buffer_size", "restart_required": false, "max": 4294967295, "min": 8, "type": "integer" }, { "name": "connect_timeout", "restart_required": false, "max": 31536000, "min": 2, "type": "integer" }, { "name": "join_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 128, "type": "integer" }, { "name": "sort_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 32768, "type": "integer" }, { "name": "innodb_buffer_pool_size", "restart_required": true, "max": 18446744073709551615, "min": 5242880, "type": "integer" }, { "name": "innodb_flush_log_at_trx_commit", "restart_required": false, "max": 2, "min": 0, "type": "integer" }, { "name": "innodb_log_buffer_size", "restart_required": true, "max": 4294967295, "min": 262144, "type": "integer" }, { "name": "innodb_open_files", "restart_required": true, "max": 4294967295, "min": 10, "type": "integer" }, { "name": "innodb_thread_concurrency", "restart_required": false, "max": 1000, "min": 0, "type": "integer" }, { "name": "sync_binlog", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "auto_increment_increment", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "auto_increment_offset", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "bulk_insert_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 0, "type": "integer" }, { "name": "expire_logs_days", "restart_required": false, "max": 99, "min": 0, "type": "integer" }, { "name": "interactive_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "max_allowed_packet", "restart_required": false, "max": 1073741824, "min": 1024, "type": "integer" }, { "name": "max_connect_errors", "restart_required": false, "max": 18446744073709551615, "min": 1, "type": "integer" }, { "name": "max_connections", "restart_required": false, "max": 100000, "min": 1, "type": "integer" }, { "name": "myisam_sort_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 4096, "type": "integer" }, { "name": "max_user_connections", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "server_id", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "wait_timeout", "restart_required": false, "max": 31536000, "min": 1, "type": "integer" }, { "name": "character_set_client", "restart_required": false, "type": "string" }, { "name": "character_set_connection", "restart_required": false, "type": "string" }, { "name": "character_set_database", "restart_required": false, "type": "string" }, { "name": "character_set_filesystem", "restart_required": false, "type": "string" }, { "name": "character_set_results", "restart_required": false, "type": "string" }, { "name": "character_set_server", "restart_required": false, "type": "string" }, { "name": "collation_connection", "restart_required": false, "type": "string" }, { "name": "collation_database", "restart_required": false, "type": "string" }, { "name": "collation_server", "restart_required": false, "type": "string" }, { "name": "performance_schema", "restart_required": true, "type": "boolean" } ] } trove-5.0.0/trove/templates/mysql/replica_source.config.template0000664000567000056710000000021712701410316026311 0ustar jenkinsjenkins00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log binlog_format = MIXED enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON trove-5.0.0/trove/templates/db2/0000775000567000056710000000000012701410521017611 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/db2/override.config.template0000664000567000056710000000000012701410316024421 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/templates/db2/config.template0000664000567000056710000000000012701410316022603 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/network/0000775000567000056710000000000012701410521016635 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/network/__init__.py0000664000567000056710000000000012701410316020736 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/network/base.py0000664000567000056710000000322112701410316020121 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import six @six.add_metaclass(abc.ABCMeta) class NetworkDriver(object): """Base Network Driver class to abstract the network driver used.""" @abc.abstractmethod def get_sec_group_by_id(self, group_id): """ Returns security group with given group_id """ @abc.abstractmethod def create_security_group(self, name, description): """ Creates the security group with given name and description """ @abc.abstractmethod def delete_security_group(self, sec_group_id): """Deletes the security group by given ID.""" @abc.abstractmethod def add_security_group_rule(self, sec_group_id, protocol, from_port, to_port, cidr): """ Adds the rule identified by the security group ID, transport protocol, port range: from -> to, CIDR. """ @abc.abstractmethod def delete_security_group_rule(self, sec_group_rule_id): """Deletes the rule by given ID.""" trove-5.0.0/trove/network/nova.py0000664000567000056710000000603012701410316020153 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.common import exception from trove.common import remote from trove.network import base LOG = logging.getLogger(__name__) class NovaNetwork(base.NetworkDriver): def __init__(self, context): try: self.client = remote.create_nova_client( context) except nova_exceptions.ClientException as e: raise exception.TroveError(str(e)) def get_sec_group_by_id(self, group_id): try: return self.client.security_groups.get(group_id) except nova_exceptions.ClientException as e: LOG.exception('Failed to get remote security group') raise exception.TroveError(str(e)) def create_security_group(self, name, description): try: sec_group = self.client.security_groups.create( name=name, description=description) return sec_group except nova_exceptions.ClientException as e: LOG.exception('Failed to create remote security group') raise exception.SecurityGroupCreationError(str(e)) def delete_security_group(self, sec_group_id): try: self.client.security_groups.delete(sec_group_id) except nova_exceptions.ClientException as e: LOG.exception('Failed to delete remote security group') raise exception.SecurityGroupDeletionError(str(e)) def add_security_group_rule(self, sec_group_id, protocol, from_port, to_port, cidr): try: sec_group_rule = self.client.security_group_rules.create( parent_group_id=sec_group_id, ip_protocol=protocol, from_port=from_port, to_port=to_port, cidr=cidr) return sec_group_rule except nova_exceptions.ClientException as e: LOG.exception('Failed to add rule to remote security group') raise exception.SecurityGroupRuleCreationError(str(e)) def delete_security_group_rule(self, sec_group_rule_id): try: self.client.security_group_rules.delete(sec_group_rule_id) except nova_exceptions.ClientException as e: LOG.exception('Failed to delete rule to remote security group') raise exception.SecurityGroupRuleDeletionError(str(e)) trove-5.0.0/trove/network/neutron.py0000664000567000056710000001403312701410316020704 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutronclient.common import exceptions as neutron_exceptions from oslo_log import log as logging from trove.common import exception from trove.common import remote from trove.network import base LOG = logging.getLogger(__name__) CONST = {'IPv4': "IPv4", 'IPv6': "IPv6", 'INGRESS': "ingress", 'EGRESS': "egress", 'PROTO_NAME_TCP': 'tcp', 'PROTO_NAME_ICMP': 'icmp', 'PROTO_NAME_ICMP_V6': 'icmpv6', 'PROTO_NAME_UDP': 'udp'} class NovaNetworkStruct(object): def __init__(self, **properties): self.__dict__.update(properties) class NeutronDriver(base.NetworkDriver): def __init__(self, context): try: self.client = remote.create_neutron_client(context) except neutron_exceptions.NeutronClientException as e: raise exception.TroveError(str(e)) def get_sec_group_by_id(self, group_id): try: return self.client.show_security_group(security_group=group_id) except neutron_exceptions.NeutronClientException as e: LOG.exception('Failed to get remote security group') raise exception.TroveError(str(e)) def create_security_group(self, name, description): try: sec_group_body = {"security_group": {"name": name, "description": description}} sec_group = self.client.create_security_group(body=sec_group_body) return self._convert_to_nova_security_group_format( sec_group.get('security_group', sec_group)) except neutron_exceptions.NeutronClientException as e: LOG.exception('Failed to create remote security group') raise exception.SecurityGroupCreationError(str(e)) def delete_security_group(self, sec_group_id): try: self.client.delete_security_group(security_group=sec_group_id) except neutron_exceptions.NeutronClientException as e: LOG.exception('Failed to delete remote security group') raise exception.SecurityGroupDeletionError(str(e)) def add_security_group_rule(self, sec_group_id, protocol, from_port, to_port, cidr, direction=CONST['INGRESS'], ethertype=CONST['IPv4']): try: secgroup_rule_body = {"security_group_rule": {"security_group_id": sec_group_id, "protocol": protocol, "port_range_min": from_port, "port_range_max": to_port, "remote_ip_prefix": cidr, "direction": direction, # ingress | egress "ethertype": ethertype, # IPv4 | IPv6 }} secgroup_rule = self.client.create_security_group_rule( secgroup_rule_body) return self._convert_to_nova_security_group_rule_format( secgroup_rule.get('security_group_rule', secgroup_rule)) except neutron_exceptions.NeutronClientException as e: # ignore error if rule already exists if e.status_code == 409: LOG.exception("secgroup rule already exists") else: LOG.exception('Failed to add rule to remote security group') raise exception.SecurityGroupRuleCreationError(str(e)) def delete_security_group_rule(self, sec_group_rule_id): try: self.client.delete_security_group_rule( security_group_rule=sec_group_rule_id) except neutron_exceptions.NeutronClientException as e: LOG.exception('Failed to delete rule to remote security group') raise exception.SecurityGroupRuleDeletionError(str(e)) def _convert_to_nova_security_group_format(self, security_group): nova_group = {} nova_group['id'] = security_group['id'] nova_group['description'] = security_group['description'] nova_group['name'] = security_group['name'] nova_group['project_id'] = security_group['tenant_id'] nova_group['rules'] = [] for rule in security_group.get('security_group_rules', []): if rule['direction'] == 'ingress': nova_group['rules'].append( self._convert_to_nova_security_group_rule_format(rule)) return NovaNetworkStruct(**nova_group) def _convert_to_nova_security_group_rule_format(self, rule): nova_rule = {} nova_rule['id'] = rule['id'] nova_rule['parent_group_id'] = rule['security_group_id'] nova_rule['protocol'] = rule['protocol'] if (nova_rule['protocol'] and rule.get('port_range_min') is None and rule.get('port_range_max') is None): if rule['protocol'].upper() in ['TCP', 'UDP']: nova_rule['from_port'] = 1 nova_rule['to_port'] = 65535 else: nova_rule['from_port'] = -1 nova_rule['to_port'] = -1 else: nova_rule['from_port'] = rule.get('port_range_min') nova_rule['to_port'] = rule.get('port_range_max') nova_rule['group_id'] = rule['remote_group_id'] nova_rule['cidr'] = rule.get('remote_ip_prefix') return NovaNetworkStruct(**nova_rule) trove-5.0.0/trove/locale/0000775000567000056710000000000012701410521016403 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/locale/trove-log-warning.pot0000664000567000056710000000207512701410316022516 0ustar jenkinsjenkins00000000000000# Translations template for trove. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the trove project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: trove 5.0.0.0b2\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-22 06:15+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: trove/common/profile.py:39 msgid "" "The OpenStack Profiler is enabled. Using one of the hmac_keys specified " "in the trove.conf file (typically in /etc/trove), a trace can be made of " "all requests. Only an admin user can retrieve the trace information, " "however.\n" "To disable the profiler, add the following to the configuration file:\n" "[profiler]\n" "enabled=false" msgstr "" #: trove/instance/models.py:459 #, python-format msgid "Could not load instance %s." msgstr "" trove-5.0.0/trove/locale/fr/0000775000567000056710000000000012701410521017012 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/locale/fr/LC_MESSAGES/0000775000567000056710000000000012701410521020577 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/locale/fr/LC_MESSAGES/trove-log-error.po0000664000567000056710000000300712701410316024206 0ustar jenkinsjenkins00000000000000# Translations template for trove. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the trove project. # # Translators: # Corinne Verheyde , 2015 # Frédéric , 2014 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: trove 5.0.0.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2015-09-24 09:51+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-07-24 09:45+0000\n" "Last-Translator: Corinne Verheyde \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "Could not find nova server_id(%s)." msgstr "nova server_id(%s) introuvable." #, python-format msgid "Could not load compute instance %s." msgstr "Impossible de charger l'instance compute %s." #, python-format msgid "Server %(server)s for instance %(instance)s was found twice!" msgstr "Serveur %(server)s pour l'instance %(instance)s trouvé en double!" #, python-format msgid "Server status could not be read for instance id(%s)." msgstr "Le statut du serveur n'a pas pu être lu pour l'instance id(%s)." #, python-format msgid "" "While shutting down instance (%(instance)s): server had status (%(status)s)." msgstr "" "Pendant l'arrêt de l'instance (%(instance)s): le serveur avait le statut " "(%(status)s)." trove-5.0.0/trove/locale/fr/LC_MESSAGES/trove-log-warning.po0000664000567000056710000000320212701410316024517 0ustar jenkinsjenkins00000000000000# Translations template for trove. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the trove project. # # Translators: # Corinne Verheyde , 2015 # Frédéric , 2014 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: trove 5.0.0.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2015-09-24 09:51+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-07-24 10:07+0000\n" "Last-Translator: Corinne Verheyde \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "Could not load instance %s." msgstr "Impossible de charger l'instance %s." msgid "" "The OpenStack Profiler is enabled. Using one of the hmac_keys specified in " "the api-paste.ini file (typically in /etc/trove), a trace can be made of all " "requests. Only an admin user can retrieve the trace information, however.\n" "To disable the profiler, add the following to the configuration file:\n" "[profiler]\n" "enabled=false" msgstr "" "Le Profiler OpenStack est activé. Utilisant une des hmac_keys spécifiées " "dans le fichier api-paste.ini (typiquement /etc/trove), une trace peut être " "faite de toutes les requêtes. Néanmoins, seul un administrateur peut " "récupérer les informations de la trace.\n" "Pour désactiver le profiler, ajouter les lignes suivantes au fichier de " "configuration:\n" "[profiler]\n" "enabled=false" trove-5.0.0/trove/locale/fr/LC_MESSAGES/trove-log-info.po0000664000567000056710000000762612701410316024023 0ustar jenkinsjenkins00000000000000# Translations template for trove. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the trove project. # # Translators: # Corinne Verheyde , 2015 # Frédéric , 2014 # Corinne Verheyde , 2015. #zanata # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: trove 5.0.0.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2015-09-24 09:51+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-22 11:06+0000\n" "Last-Translator: Corinne Verheyde \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "Creating a database instance for tenant '%s'" msgstr "Création d'une instance de base de données pour le tenant '%s'" #, python-format msgid "Deleting database instance '%(instance_id)s' for tenant '%(tenant_id)s'" msgstr "" "Suppression de l'instance de base de données '%(instance_id)s' pour le " "tenant '%(tenant_id)s'" #, python-format msgid "Detaching instance %s from its replication source." msgstr "Instance %s en cours de détachement de sa source de réplication." #, python-format msgid "Editing instance for tenant id %s." msgstr "Édition de l'instance for tenant_id %s." #, python-format msgid "Ejecting replica source %s from it's replication set." msgstr "Éviction du source replica %s de son jeu de réplication " #, python-format msgid "Enabling root for cluster '%s'." msgstr "Activation de root pour le cluster '%s'." #, python-format msgid "Enabling root for instance '%s'." msgstr "Activation de root pour l'instance '%s'." msgid "Enabling root." msgstr "Activation de root." #, python-format msgid "Getting default configuration for instance %s" msgstr "Récupération de la configuration par défaut pour l'instance %s" #, python-format msgid "Listing backups for instance '%s'" msgstr "Listing en cours des sauvegardes pour l'instance '%s'" #, python-format msgid "Listing database instances for tenant '%s'" msgstr "" "Listing en cours des instances de bases de données pour le locataire '%s'" #, python-format msgid "Migrating instance id = %(instance_id)s to host = %(host)s." msgstr "Migration de l'instance id = %(instance_id)s vers le host = %(host)s." #, python-format msgid "" "Performing %(action_name)s action against instance %(instance_id)s for " "tenant '%(tenant_id)s'" msgstr "" "Action %(action_name)s en cours sur l'instance %(instance_id)s pour le " "tenant '%(tenant_id)s'" #, python-format msgid "Promoting instance %s to replication source." msgstr "Promotion de l'instance %s en source de réplication." #, python-format msgid "Rebooting instance %s." msgstr "Redémarrage de l'instance %s." #, python-format msgid "Resetting task status to NONE on instance %s." msgstr "Réinitialisation du statut de la tâche à NONE sur l'instance %s." #, python-format msgid "Resizing instance %(instance_id)s flavor to %(flavor_id)s." msgstr "" "Redimensionnement du gabarit de l'instance %(instance_id)s vers " "%(flavor_id)s." #, python-format msgid "Resizing volume of instance %s." msgstr "Redimensionnement du volume de l'instance %s." #, python-format msgid "Restarting datastore on instance %s." msgstr "Redémarrage du datastore sur l'instance %s." #, python-format msgid "Showing database instance '%(instance_id)s' for tenant '%(tenant_id)s'" msgstr "" "Présentation de l'instance de base de données '%(instance_id)s' pour le " "tenant '%(tenant_id)s'" #, python-format msgid "Updating database instance '%(instance_id)s' for tenant '%(tenant_id)s'" msgstr "" "Mise à jour de l'instance de base de données '%(instance_id)s' pour le " "tenant '%(tenant_id)s'" #, python-format msgid "" "req : '%s'\n" "\n" msgstr "" "req : '%s'\n" "\n" trove-5.0.0/trove/locale/trove-log-error.pot0000664000567000056710000000233112701410316022175 0ustar jenkinsjenkins00000000000000# Translations template for trove. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the trove project. # FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: trove 4.0.0.0b3.dev14\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2015-08-03 06:13+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.0\n" #: trove/instance/models.py:78 #, python-format msgid "Could not find nova server_id(%s)." msgstr "" #: trove/instance/models.py:319 #, python-format msgid "" "While shutting down instance (%(instance)s): server had status " "(%(status)s)." msgstr "" #: trove/instance/models.py:479 #, python-format msgid "Could not load compute instance %s." msgstr "" #: trove/instance/models.py:1102 #, python-format msgid "Server %(server)s for instance %(instance)s was found twice!" msgstr "" #: trove/instance/models.py:1181 trove/instance/models.py:1187 #, python-format msgid "Server status could not be read for instance id(%s)." msgstr "" trove-5.0.0/trove/locale/trove-log-info.pot0000664000567000056710000000703012701410316022000 0ustar jenkinsjenkins00000000000000# Translations template for trove. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the trove project. # FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: trove 5.0.0.dev1\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2015-09-29 06:12+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.1.1\n" #: trove/extensions/common/service.py:60 trove/extensions/vertica/service.py:38 #, python-format msgid "Getting root enabled for instance '%s'." msgstr "" #: trove/extensions/common/service.py:61 trove/extensions/common/service.py:71 #: trove/extensions/vertica/service.py:39 #: trove/extensions/vertica/service.py:59 #, python-format msgid "" "req : '%s'\n" "\n" msgstr "" #: trove/extensions/common/service.py:70 trove/extensions/vertica/service.py:58 #, python-format msgid "Enabling root for instance '%s'." msgstr "" #: trove/extensions/vertica/service.py:45 #, python-format msgid "Getting root enabled for cluster '%s'." msgstr "" #: trove/extensions/vertica/service.py:71 #, python-format msgid "Enabling root for cluster '%s'." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:281 msgid "Enabling root." msgstr "" #: trove/instance/models.py:632 #, python-format msgid "Resetting task status to NONE on instance %s." msgstr "" #: trove/instance/models.py:870 #, python-format msgid "Resizing instance %(instance_id)s flavor to %(flavor_id)s." msgstr "" #: trove/instance/models.py:907 #, python-format msgid "Resizing volume of instance %s." msgstr "" #: trove/instance/models.py:930 #, python-format msgid "Rebooting instance %s." msgstr "" #: trove/instance/models.py:938 #, python-format msgid "Restarting datastore on instance %s." msgstr "" #: trove/instance/models.py:952 #, python-format msgid "Detaching instance %s from its replication source." msgstr "" #: trove/instance/models.py:961 #, python-format msgid "Promoting instance %s to replication source." msgstr "" #: trove/instance/models.py:976 #, python-format msgid "Ejecting replica source %s from it's replication set." msgstr "" #: trove/instance/models.py:999 #, python-format msgid "Migrating instance id = %(instance_id)s to host = %(host)s." msgstr "" #: trove/instance/service.py:91 #, python-format msgid "" "Performing %(action_name)s action against instance %(instance_id)s for " "tenant '%(tenant_id)s'" msgstr "" #: trove/instance/service.py:146 #, python-format msgid "Listing database instances for tenant '%s'" msgstr "" #: trove/instance/service.py:159 #, python-format msgid "Listing backups for instance '%s'" msgstr "" #: trove/instance/service.py:171 #, python-format msgid "Showing database instance '%(instance_id)s' for tenant '%(tenant_id)s'" msgstr "" #: trove/instance/service.py:184 #, python-format msgid "Deleting database instance '%(instance_id)s' for tenant '%(tenant_id)s'" msgstr "" #: trove/instance/service.py:197 #, python-format msgid "Creating a database instance for tenant '%s'" msgstr "" #: trove/instance/service.py:280 #, python-format msgid "Updating database instance '%(instance_id)s' for tenant '%(tenant_id)s'" msgstr "" #: trove/instance/service.py:299 #, python-format msgid "Editing instance for tenant id %s." msgstr "" #: trove/instance/service.py:321 #, python-format msgid "Getting default configuration for instance %s" msgstr "" trove-5.0.0/trove/locale/trove.pot0000664000567000056710000023565612701410316020311 0ustar jenkinsjenkins00000000000000# Translations template for trove. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the trove project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: trove 5.0.0.0b2\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-22 06:15+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: trove/backup/models.py:97 #, python-format msgid "Unable to create backup record for instance: %s" msgstr "" #: trove/backup/models.py:260 #, python-format msgid "Backup %s cannot be deleted because it is running." msgstr "" #: trove/backup/models.py:310 #, python-format msgid "Bad location for backup object: %s" msgstr "" #: trove/backup/service.py:58 #, python-format msgid "Creating a backup for tenant %s" msgstr "" #: trove/backup/service.py:69 #, python-format msgid "Deleting backup for tenant %(tenant_id)s ID: %(backup_id)s" msgstr "" #: trove/cluster/models.py:129 #, python-format msgid "Setting task to NONE on cluster %s" msgstr "" #: trove/cluster/models.py:208 #: trove/common/strategies/cluster/experimental/mongodb/api.py:245 #: trove/common/strategies/cluster/experimental/mongodb/api.py:450 #, python-format msgid "" "This action cannot be performed on the cluster while the current cluster " "task is '%s'." msgstr "" #: trove/cluster/service.py:56 trove/cluster/service.py:86 #: trove/cluster/service.py:97 trove/cluster/service.py:111 #: trove/cluster/service.py:122 trove/cluster/service.py:137 #: trove/extensions/account/service.py:35 #: trove/extensions/account/service.py:46 #: trove/extensions/mgmt/clusters/service.py:44 #: trove/extensions/mgmt/clusters/service.py:60 #: trove/extensions/mgmt/clusters/service.py:73 #: trove/extensions/mgmt/host/service.py:34 #: trove/extensions/mgmt/host/service.py:43 #: trove/extensions/mgmt/instances/service.py:50 #: trove/extensions/mgmt/instances/service.py:74 #: trove/extensions/mgmt/instances/service.py:154 #: trove/extensions/mgmt/instances/service.py:173 #: trove/extensions/mgmt/instances/service.py:185 #: trove/extensions/mgmt/instances/service.py:198 #: trove/extensions/mgmt/quota/service.py:34 #: trove/extensions/mgmt/upgrade/service.py:36 #: trove/extensions/mgmt/volume/service.py:34 #: trove/extensions/mysql/service.py:57 trove/extensions/mysql/service.py:68 #: trove/extensions/mysql/service.py:81 trove/extensions/mysql/service.py:104 #: trove/extensions/mysql/service.py:121 trove/extensions/mysql/service.py:143 #: trove/extensions/mysql/service.py:191 trove/extensions/mysql/service.py:207 #: trove/extensions/mysql/service.py:222 trove/extensions/mysql/service.py:245 #: trove/extensions/mysql/service.py:256 trove/extensions/mysql/service.py:266 #, python-format msgid "" "req : '%s'\n" "\n" msgstr "" #: trove/cluster/service.py:57 trove/cluster/service.py:87 #: trove/cluster/service.py:112 trove/extensions/mgmt/clusters/service.py:61 #: trove/extensions/mgmt/host/service.py:45 #: trove/extensions/mgmt/instances/service.py:76 #: trove/extensions/mgmt/instances/service.py:156 #: trove/extensions/mgmt/instances/service.py:187 #: trove/extensions/mgmt/instances/service.py:199 #, python-format msgid "" "id : '%s'\n" "\n" msgstr "" #: trove/cluster/service.py:59 trove/extensions/mgmt/clusters/service.py:75 #: trove/extensions/mgmt/configuration/service.py:81 #: trove/extensions/mgmt/configuration/service.py:108 #: trove/extensions/mgmt/host/instance/service.py:35 #: trove/extensions/mgmt/host/instance/service.py:53 #: trove/extensions/mgmt/instances/service.py:97 #: trove/extensions/mgmt/instances/service.py:120 #: trove/extensions/mgmt/quota/service.py:44 trove/instance/service.py:74 msgid "Invalid request body." msgstr "" #: trove/cluster/service.py:71 #, python-format msgid "No action '%(action)s' supplied by strategy for manager '%(manager)s'" msgstr "" #: trove/cluster/service.py:98 #, python-format msgid "" "cluster_id : '%s'\n" "\n" msgstr "" #: trove/cluster/service.py:99 #, python-format msgid "" "instance_id : '%s'\n" "\n" msgstr "" #: trove/cluster/service.py:138 trove/extensions/mysql/service.py:69 #: trove/extensions/mysql/service.py:257 #, python-format msgid "" "body : '%s'\n" "\n" msgstr "" #: trove/cmd/manage.py:225 #, python-format msgid "Possible wrong number of arguments supplied %s." msgstr "" #: trove/cmd/manage.py:228 msgid "Command failed, please check log for more info." msgstr "" #: trove/common/auth.py:65 #, python-format msgid "Authorized tenant '%(tenant_id)s' request: %(request)s" msgstr "" #: trove/common/auth.py:70 #, python-format msgid "User with tenant id %s cannot access this resource." msgstr "" #: trove/common/base_exception.py:103 msgid "Uncaught exception" msgstr "" #: trove/common/base_wsgi.py:112 #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" #: trove/common/base_wsgi.py:373 msgid "Unsupported Content-Type" msgstr "" #: trove/common/base_wsgi.py:376 msgid "Malformed request body" msgstr "" #: trove/common/base_wsgi.py:662 msgid "Empty body provided in request" msgstr "" #: trove/common/base_wsgi.py:668 msgid "Unrecognized Content-Type provided in request" msgstr "" #: trove/common/base_wsgi.py:672 msgid "No Content-Type provided in request" msgstr "" #: trove/common/base_wsgi.py:678 msgid "Unable to deserialize body as provided Content-Type" msgstr "" #: trove/common/base_wsgi.py:728 msgid "cannot understand JSON" msgstr "" #: trove/common/base_wsgi.py:752 msgid "cannot understand XML" msgstr "" #: trove/common/debug_utils.py:99 msgid "" "Enabling debugging with eventlet monkey patched produce unexpected " "behavior." msgstr "" #: trove/common/debug_utils.py:120 #, python-format msgid "" "Can't connect to remote debug server. Continuing to work in standard " "mode. Error: %s." msgstr "" #: trove/common/exception.py:56 #, python-format msgid "Failed to save %(model_name)s because: %(error)s." msgstr "" #: trove/common/exception.py:61 msgid "Invalid RPC Connection Reuse." msgstr "" #: trove/common/exception.py:66 trove/common/exception.py:81 #, python-format msgid "Resource %(uuid)s cannot be found." msgstr "" #: trove/common/exception.py:71 #, python-format msgid "Capability '%(capability)s' cannot be found." msgstr "" #: trove/common/exception.py:76 #, python-format msgid "Capability '%(capability)s' is disabled." msgstr "" #: trove/common/exception.py:86 #, python-format msgid "User %(uuid)s cannot be found on the instance." msgstr "" #: trove/common/exception.py:91 #, python-format msgid "Database %(uuid)s cannot be found on the instance." msgstr "" #: trove/common/exception.py:96 #, python-format msgid "Cannot find compute instance %(server_id)s for instance %(instance_id)s." msgstr "" #: trove/common/exception.py:99 #, python-format msgid "Resource %(instance_id)s can not be retrieved." msgstr "" #: trove/common/exception.py:104 #, python-format msgid "DnsRecord with name= %(name)s not found." msgstr "" #: trove/common/exception.py:109 #, python-format msgid "Datastore '%(datastore)s' cannot be found." msgstr "" #: trove/common/exception.py:114 #, python-format msgid "Datastore version '%(version)s' cannot be found." msgstr "" #: trove/common/exception.py:119 msgid "Datastores cannot be found." msgstr "" #: trove/common/exception.py:124 #, python-format msgid "" "Flavor %(flavor_id)s is not supported for datastore %(datastore)s version" " %(datastore_version)s" msgstr "" #: trove/common/exception.py:130 #, python-format msgid "" "Flavor %(flavor_id)s is already associated with datastore %(datastore)s " "version %(datastore_version)s" msgstr "" #: trove/common/exception.py:136 #, python-format msgid "Datastore '%(datastore)s' has no version '%(version)s'." msgstr "" #: trove/common/exception.py:141 #, python-format msgid "Datastore version '%(version)s' is not active." msgstr "" #: trove/common/exception.py:146 msgid "Please specify datastore. Default datastore cannot be found." msgstr "" #: trove/common/exception.py:152 #, python-format msgid "Default version for datastore '%(datastore)s' not found." msgstr "" #: trove/common/exception.py:157 #, python-format msgid "Datastore manager %(datastore_manager)s cannot be found." msgstr "" #: trove/common/exception.py:162 #, python-format msgid "" "The '%(operation)s' operation is not supported for the '%(datastore)s' " "datastore." msgstr "" #: trove/common/exception.py:168 #, python-format msgid "Multiple matches found for '%(name)s', use an UUID to be more specific." msgstr "" #: trove/common/exception.py:174 msgid "The server rejected the request due to its size or rate." msgstr "" #: trove/common/exception.py:180 #, python-format msgid "Quota exceeded for resources: %(overs)s." msgstr "" #: trove/common/exception.py:185 msgid "Instance volume quota exceeded." msgstr "" #: trove/common/exception.py:190 #, python-format msgid "An error occurred communicating with the guest: %(original_message)s." msgstr "" #: trove/common/exception.py:196 msgid "Timeout trying to connect to the Guest Agent." msgstr "" #: trove/common/exception.py:201 msgid "" "The server could not comply with the request since it is either malformed" " or otherwise incorrect." msgstr "" #: trove/common/exception.py:207 #, python-format msgid "Required element/key - %(key)s was not specified." msgstr "" #: trove/common/exception.py:212 #, python-format msgid "A database with the name \"%(name)s\" already exists." msgstr "" #: trove/common/exception.py:217 #, python-format msgid "A user with the name \"%(name)s\" already exists." msgstr "" #: trove/common/exception.py:222 msgid "" "A configuration group cannot be deleted if it is associated with one or " "more non-terminated instances. Detach the configuration group from all " "non-terminated instances and please try again." msgstr "" #: trove/common/exception.py:230 msgid "Unable to process the contained request." msgstr "" #: trove/common/exception.py:235 msgid "No change was requested in the size of the instance." msgstr "" #: trove/common/exception.py:240 #, python-format msgid "Cannot find the volumes attached to compute instance %(server_id)s." msgstr "" #: trove/common/exception.py:246 msgid "Failed to create a volume in Nova." msgstr "" #: trove/common/exception.py:251 msgid "Volume size was not specified." msgstr "" #: trove/common/exception.py:256 #, python-format msgid "Local storage not specified in flavor ID: %(flavor)s." msgstr "" #: trove/common/exception.py:261 msgid "Local storage support is not enabled." msgstr "" #: trove/common/exception.py:266 msgid "Volume support is not enabled." msgstr "" #: trove/common/exception.py:271 #, python-format msgid "Replication is not supported for the '%(datastore)s' datastore." msgstr "" #: trove/common/exception.py:277 msgid "Exception encountered attaching slave to new replica source." msgstr "" #: trove/common/exception.py:282 #, python-format msgid "" "An error occurred communicating with the task manager: " "%(original_message)s." msgstr "" #: trove/common/exception.py:288 #, python-format msgid "Value could not be converted: %(msg)s." msgstr "" #: trove/common/exception.py:293 msgid "Polling request timed out." msgstr "" #: trove/common/exception.py:298 msgid "User does not have admin privileges." msgstr "" #: trove/common/exception.py:303 #, python-format msgid "The following values are invalid: %(errors)s." msgstr "" #: trove/common/exception.py:308 msgid "Not Found." msgstr "" #: trove/common/exception.py:313 msgid "Failed to update instances." msgstr "" #: trove/common/exception.py:318 msgid "Config file not found." msgstr "" #: trove/common/exception.py:323 msgid "Paste app not found." msgstr "" #: trove/common/exception.py:327 msgid "Quota could not be found." msgstr "" #: trove/common/exception.py:331 #, python-format msgid "Quota for tenant %(tenant_id)s could not be found." msgstr "" #: trove/common/exception.py:335 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "" #: trove/common/exception.py:339 msgid "Unable to upload Backup to swift." msgstr "" #: trove/common/exception.py:343 msgid "Unable to download Backup from swift" msgstr "" #: trove/common/exception.py:347 msgid "Unable to create Backup." msgstr "" #: trove/common/exception.py:351 msgid "Unable to update Backup table in database." msgstr "" #: trove/common/exception.py:356 trove/extensions/security_group/models.py:60 msgid "Failed to create Security Group." msgstr "" #: trove/common/exception.py:361 msgid "Failed to delete Security Group." msgstr "" #: trove/common/exception.py:366 msgid "Failed to create Security Group Rule." msgstr "" #: trove/common/exception.py:371 msgid "Failed to delete Security Group Rule." msgstr "" #: trove/common/exception.py:376 #, python-format msgid "" "Error creating security group rules. Malformed port(s). Port must be an " "integer. FromPort = %(from)s greater than ToPort = %(to)s." msgstr "" #: trove/common/exception.py:383 #, python-format msgid "" "Unable to create instance because backup %(backup_id)s is not completed. " "Actual state: %(state)s." msgstr "" #: trove/common/exception.py:388 #, python-format msgid "Backup file in %(location)s was not found in the object storage." msgstr "" #: trove/common/exception.py:393 #, python-format msgid "" "The datastore from which the backup was taken, %(datastore1)s, does not " "match the destination datastore of %(datastore2)s." msgstr "" #: trove/common/exception.py:399 #, python-format msgid "Swift account not accessible for tenant %(tenant_id)s." msgstr "" #: trove/common/exception.py:403 #, python-format msgid "Swift is disabled for tenant %(tenant_id)s." msgstr "" #: trove/common/exception.py:407 #, python-format msgid "" "The request indicates that user %(user)s should have access to database " "%(database)s, but database %(database)s is not included in the initial " "databases list." msgstr "" #: trove/common/exception.py:413 msgid "" "Two or more databases share the same name in the initial databases list. " "Please correct the names or remove the duplicate entries." msgstr "" #: trove/common/exception.py:419 msgid "" "Two or more users share the same name and host in the initial users list." " Please correct the names or remove the duplicate entries." msgstr "" #: trove/common/exception.py:425 #, python-format msgid "" "Current Swift object checksum does not match original checksum for backup" " %(backup_id)s." msgstr "" #: trove/common/exception.py:430 #, python-format msgid "%(key)s is not a supported configuration parameter." msgstr "" #: trove/common/exception.py:434 #, python-format msgid "No configuration parser found for datastore %(datastore_manager)s." msgstr "" #: trove/common/exception.py:439 #, python-format msgid "" "Datastore Version on Configuration %(config_datastore_version)s does not " "match the Datastore Version on the instance " "%(instance_datastore_version)s." msgstr "" #: trove/common/exception.py:446 #, python-format msgid "" "%(parameter_name)s parameter can no longer be set as of " "%(parameter_deleted_at)s." msgstr "" #: trove/common/exception.py:451 #, python-format msgid "" "%(parameter_name)s parameter already exists for datastore version " "%(datastore_version)s." msgstr "" #: trove/common/exception.py:456 #, python-format msgid "" "Instance %(instance_id)s already has a Configuration Group attached: " "%(configuration_id)s." msgstr "" #: trove/common/exception.py:461 #, python-format msgid "" "The operation you have requested cannot be executed because the instance " "status is currently: %(status)s." msgstr "" #: trove/common/exception.py:467 #, python-format msgid "" "Endpoint not found for service_type=%(service_type)s, " "endpoint_type=%(endpoint_type)s, endpoint_region=%(endpoint_region)s." msgstr "" #: trove/common/exception.py:474 msgid "Empty catalog." msgstr "" #: trove/common/exception.py:478 #, python-format msgid "" "Instance with replication strategy %(guest_strategy)s cannot replicate " "from instance with replication strategy %(replication_strategy)s." msgstr "" #: trove/common/exception.py:484 #, python-format msgid "" "The target instance has only %(slave_volume_size)sG free, but the " "replication snapshot contains %(dataset_size)sG of data." msgstr "" #: trove/common/exception.py:490 msgid "The replica source cannot be deleted without detaching the replicas." msgstr "" #: trove/common/exception.py:495 #, python-format msgid "Cluster '%(cluster)s' cannot be found." msgstr "" #: trove/common/exception.py:499 msgid "The flavor for each instance in a cluster must be the same." msgstr "" #: trove/common/exception.py:503 msgid "A volume size is required for each instance in the cluster." msgstr "" #: trove/common/exception.py:507 msgid "The volume size for each instance in a cluster must be the same." msgstr "" #: trove/common/exception.py:512 #, python-format msgid "" "The number of instances for your initial cluster must be " "%(num_instances)s." msgstr "" #: trove/common/exception.py:517 #, python-format msgid "" "The number of instances for your initial cluster must be at least " "%(num_instances)s." msgstr "" #: trove/common/exception.py:522 msgid "Operation not supported for instances that are part of a cluster." msgstr "" #: trove/common/exception.py:528 #, python-format msgid "The '%(operation)s' operation is not supported for cluster." msgstr "" #: trove/common/exception.py:532 #, python-format msgid "Operation not allowed for tenant %(tenant_id)s." msgstr "" #: trove/common/exception.py:536 #, python-format msgid "Clusters not supported for %(datastore)s-%(datastore_version)s." msgstr "" #: trove/common/exception.py:541 #, python-format msgid "" "Backup is too large for given flavor or volume. Backup size: " "%(backup_size)s GBs. Available size: %(disk_size)s GBs." msgstr "" #: trove/common/exception.py:548 #, python-format msgid "Image %(uuid)s cannot be found." msgstr "" #: trove/common/exception.py:553 #, python-format msgid "A datastore version with the name '%(name)s' already exists." msgstr "" #: trove/common/extensions.py:203 #, python-format msgid "Extension with alias %s does not exist" msgstr "" #: trove/common/extensions.py:400 #, python-format msgid "Exception loading extension: %s" msgstr "" #: trove/common/limits.py:88 #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" #: trove/common/limits.py:212 msgid "This request was rate-limited." msgstr "" #: trove/common/template.py:137 #, python-format msgid "Missing heat template for %(s_datastore_manager)s." msgstr "" #: trove/common/utils.py:243 #, python-format msgid "" "Command '%(cmd)s' failed. %(description)s Exit code: %(exit_code)s\n" "stderr: %(stderr)s\n" "stdout: %(stdout)s" msgstr "" #: trove/common/utils.py:252 msgid "Got a timeout but not the one expected." msgstr "" #: trove/common/utils.py:255 #, python-format msgid "" "Time out after waiting %(time)s seconds when running proc: %(args)s " "%(kwargs)s." msgstr "" #: trove/common/wsgi.py:144 msgid "version not supported" msgstr "" #: trove/common/wsgi.py:303 msgid "Unserializable result detected." msgstr "" #: trove/common/wsgi.py:529 msgid "Invalid service catalog json." msgstr "" #: trove/common/wsgi.py:574 #, python-format msgid "Caught error: %s." msgstr "" #: trove/common/rpc/service.py:81 msgid "Failed to stop RPC server before shutdown. " msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:74 #, python-format msgid "" "An instance with the options %(given)s is missing the MongoDB required " "option %(expected)s." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:81 #, python-format msgid "" "The value %(value)s for key %(key)s is invalid. Allowed values are " "%(valid)s." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:100 #, python-format msgid "The arguments %s are not supported by MongoDB." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:255 msgid "" "This action cannot be performed on the cluster as no reference shard " "exists." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:308 #: trove/common/strategies/cluster/experimental/mongodb/api.py:339 msgid "Not instances specified for grow operation." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:355 #, python-format msgid "" "Instances %s cannot be deleted. MongoDB cluster shink only supports " "removing replicas and query routers." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:365 msgid "" "Cannot delete all remaining query routers. At least one query router must" " be available in the cluster." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:390 #, python-format msgid "" "MongoDB cluster shrink only supports removing an entire shard. Shard " "%(shard)s has members: %(instances)s" msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:478 #, python-format msgid "Replica instance does not have required field(s) %s." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:486 #, python-format msgid "Instance type %s not supported for MongoDB cluster grow." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:517 msgid "Duplicate member names not allowed." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:522 #, python-format msgid "related_to target(s) %(targets)s do not match any specified names." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:544 msgid "Members of the same shard have mismatching flavorRef and/or volume values." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/api.py:578 #, python-format msgid "" "Shard with instance %s is still active. Please remove the shard from the " "MongoDB cluster before shrinking." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:123 #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:397 msgid "error adding config servers" msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:142 msgid "timeout for building cluster." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:189 msgid "timeout for building shard." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:263 msgid "timeout for growing cluster." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:287 #: trove/taskmanager/models.py:274 msgid "timeout for instances to be marked as deleted." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:298 msgid "timeout for shrinking cluster." msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:332 msgid "error initializing replica set" msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:353 msgid "error adding shard" msgstr "" #: trove/common/strategies/cluster/experimental/mongodb/taskmanager.py:367 msgid "no query routers ready to accept requests" msgstr "" #: trove/common/strategies/cluster/experimental/pxc/taskmanager.py:130 #: trove/common/strategies/cluster/experimental/redis/taskmanager.py:88 #: trove/common/strategies/cluster/experimental/vertica/taskmanager.py:92 msgid "Error creating cluster." msgstr "" #: trove/common/strategies/cluster/experimental/pxc/taskmanager.py:140 #: trove/common/strategies/cluster/experimental/redis/taskmanager.py:98 #: trove/common/strategies/cluster/experimental/vertica/taskmanager.py:102 msgid "Timeout for building cluster." msgstr "" #: trove/common/strategies/cluster/experimental/redis/api.py:194 msgid "Some nodes cannot be removed (check slots)" msgstr "" #: trove/common/strategies/cluster/experimental/redis/taskmanager.py:144 msgid "Timeout for growing cluster." msgstr "" #: trove/common/strategies/cluster/experimental/redis/taskmanager.py:147 #, python-format msgid "Error growing cluster %s." msgstr "" #: trove/common/strategies/storage/swift.py:132 #, python-format msgid "" "Error saving data segment to swift. ETAG: %(tag)s Segment MD5: " "%(checksum)s." msgstr "" #: trove/common/strategies/storage/swift.py:163 #, python-format msgid "Error saving data to swift. Manifest ETAG: %(tag)s Swift MD5: %(checksum)s" msgstr "" #: trove/common/strategies/storage/swift.py:180 #, python-format msgid "" "Original checksum: %(original)s does not match the current checksum: " "%(current)s" msgstr "" #: trove/common/strategies/storage/swift.py:237 #, python-format msgid "Writing metadata: %s" msgstr "" #: trove/conductor/manager.py:50 #, python-format msgid "[Instance %s] sent field not present. Cannot compare." msgstr "" #: trove/conductor/manager.py:80 #, python-format msgid "[Instance %s] Rec'd message is older than last seen. Discarding." msgstr "" #: trove/conductor/manager.py:113 #, python-format msgid "" "[Instance: %(instance)s] Backup IDs mismatch! Expected %(expected)s, " "found %(found)s" msgstr "" #: trove/conductor/manager.py:122 #, python-format msgid "" "[Instance: %(instance)s] Backup instance IDs mismatch! Expected " "%(expected)s, found %(found)s" msgstr "" #: trove/configuration/models.py:139 #, python-format msgid "Configuration group with ID %s could not be found." msgstr "" #: trove/configuration/service.py:92 #, python-format msgid "" "Creating configuration group on tenant %(tenant_id)s with name: " "%(cfg_name)s" msgstr "" #: trove/configuration/service.py:124 #, python-format msgid "Deleting configuration group %(cfg_id)s on tenant: %(tenant_id)s" msgstr "" #: trove/configuration/service.py:140 #, python-format msgid "Updating configuration group %(cfg_id)s for tenant id %(tenant_id)s" msgstr "" #: trove/configuration/service.py:209 msgid "Validating configuration values" msgstr "" #: trove/configuration/service.py:220 #, python-format msgid "" "Configuration groups are not supported for this datastore: %(name)s " "%(version)s" msgstr "" #: trove/configuration/service.py:231 #, python-format msgid "" "The configuration parameter %(key)s is not supported for this datastore: " "%(name)s %(version)s." msgstr "" #: trove/configuration/service.py:244 #, python-format msgid "" "The value provided for the configuration parameter %(key)s is not of type" " %(type)s." msgstr "" #: trove/configuration/service.py:254 msgid "" "Invalid or unsupported min value defined in the configuration-parameters " "configuration file. Expected integer." msgstr "" #: trove/configuration/service.py:260 #, python-format msgid "" "The value for the configuration parameter %(key)s is less than the " "minimum allowed: %(min)s" msgstr "" #: trove/configuration/service.py:270 msgid "" "Invalid or unsupported max value defined in the configuration-parameters " "configuration file. Expected integer." msgstr "" #: trove/configuration/service.py:276 #, python-format msgid "" "The value for the configuration parameter %(key)s is greater than the " "maximum allowed: %(max)s" msgstr "" #: trove/configuration/service.py:291 msgid "" "Invalid or unsupported type defined in the configuration-parameters " "configuration file." msgstr "" #: trove/configuration/service.py:301 #, python-format msgid "%s is not a supported configuration parameter." msgstr "" #: trove/datastore/models.py:219 #, python-format msgid "" "Somehow we got a datastore version capability without a parent, that " "shouldn't happen. %s" msgstr "" #: trove/datastore/models.py:262 #, python-format msgid "Capability Override not found for capability %s" msgstr "" #: trove/datastore/models.py:676 msgid "Specify both the datastore and datastore_version_id." msgstr "" #: trove/db/models.py:99 #, python-format msgid "%(s_name)s Not Found" msgstr "" #: trove/db/models.py:104 #, python-format msgid "Tenant %(s_tenant)s tried to access %(s_name)s, owned by %(s_owner)s." msgstr "" #: trove/db/models.py:110 #, python-format msgid "Tenant %(s_tenant)s cannot access %(s_name)s" msgstr "" #: trove/db/sqlalchemy/session.py:86 #, python-format msgid "" "Configuration option \"query_log\" has been depracated. Use " "\"connection_debug\" instead. Setting connection_debug = %(debug_level)s " "instead." msgstr "" #: trove/db/sqlalchemy/session.py:104 msgid "***The Database has not been setup!!!***" msgstr "" #: trove/dns/models.py:69 #, python-format msgid "%s Not Found" msgstr "" #: trove/extensions/account/service.py:36 #, python-format msgid "Showing account information for '%(account)s' to '%(tenant)s'" msgstr "" #: trove/extensions/account/service.py:47 #, python-format msgid "Showing all accounts with instances for '%s'" msgstr "" #: trove/extensions/mgmt/clusters/service.py:82 msgid "Invalid cluster action requested." msgstr "" #: trove/extensions/mgmt/configuration/service.py:62 msgid "max_size is required for integer data type." msgstr "" #: trove/extensions/mgmt/configuration/service.py:65 msgid "min_size is required for integer data type." msgstr "" #: trove/extensions/mgmt/configuration/service.py:71 msgid "max_size must be greater than or equal to min_size." msgstr "" #: trove/extensions/mgmt/configuration/service.py:77 msgid "Creating configuration parameter for datastore" msgstr "" #: trove/extensions/mgmt/configuration/service.py:104 msgid "Updating configuration parameter for datastore" msgstr "" #: trove/extensions/mgmt/configuration/service.py:129 msgid "Deleting configuration parameter for datastore" msgstr "" #: trove/extensions/mgmt/configuration/service.py:135 #, python-format msgid "Parameter %s does not exist in the database." msgstr "" #: trove/extensions/mgmt/datastores/service.py:51 #, python-format msgid "" "Tenant: '%(tenant)s' is adding the datastore version: '%(version)s' to " "datastore: '%(datastore)s'" msgstr "" #: trove/extensions/mgmt/datastores/service.py:66 #, python-format msgid "Creating datastore %s" msgstr "" #: trove/extensions/mgmt/datastores/service.py:109 #, python-format msgid "" "Tenant: '%(tenant)s' is updating the datastore version: '%(version)s' for" " datastore: '%(datastore)s'" msgstr "" #: trove/extensions/mgmt/datastores/service.py:147 #, python-format msgid "" "Tenant: '%(tenant)s' is removing the datastore version: '%(version)s' for" " datastore: '%(datastore)s'" msgstr "" #: trove/extensions/mgmt/host/models.py:75 #, python-format msgid "Compute Instance ID found with no associated RD instance: %s." msgstr "" #: trove/extensions/mgmt/host/models.py:89 #, python-format msgid "Unable to update instance: %s." msgstr "" #: trove/extensions/mgmt/host/models.py:92 #, python-format msgid "Failed to update instances: %s." msgstr "" #: trove/extensions/mgmt/host/service.py:35 #, python-format msgid "Indexing a host for tenant '%s'" msgstr "" #: trove/extensions/mgmt/host/service.py:44 #, python-format msgid "Showing a host for tenant '%s'" msgstr "" #: trove/extensions/mgmt/host/instance/service.py:43 #: trove/extensions/mgmt/instances/service.py:110 msgid "Only one action can be specified per request." msgstr "" #: trove/extensions/mgmt/host/instance/service.py:47 #, python-format msgid "Invalid host action: %s" msgstr "" #: trove/extensions/mgmt/instances/service.py:51 #, python-format msgid "Indexing a database instance for tenant '%s'" msgstr "" #: trove/extensions/mgmt/instances/service.py:75 #, python-format msgid "Showing a database instance for tenant '%s'" msgstr "" #: trove/extensions/mgmt/instances/service.py:114 #, python-format msgid "Invalid instance action: %s" msgstr "" #: trove/extensions/mgmt/instances/service.py:155 #, python-format msgid "Showing root history for tenant '%s'" msgstr "" #: trove/extensions/mgmt/instances/service.py:174 #, python-format msgid "Showing hardware info for instance '%s'" msgstr "" #: trove/extensions/mgmt/instances/service.py:186 #, python-format msgid "Showing instance diagnostics for the instance '%s'" msgstr "" #: trove/extensions/mgmt/quota/service.py:35 #, python-format msgid "Indexing quota info for tenant '%s'" msgstr "" #: trove/extensions/mgmt/upgrade/service.py:37 msgid "Sending upgrade notification" msgstr "" #: trove/extensions/mgmt/upgrade/service.py:38 #, python-format msgid "Admin tenant_id: %s" msgstr "" #: trove/extensions/mgmt/volume/service.py:35 #, python-format msgid "Indexing storage info for tenant '%s'" msgstr "" #: trove/extensions/mysql/service.py:56 #, python-format msgid "Listing users for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:67 #, python-format msgid "Creating users for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:80 #, python-format msgid "Deleting user for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:103 #, python-format msgid "Showing a user for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:120 #, python-format msgid "Updating user attributes for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:142 #, python-format msgid "Updating user passwords for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:190 #, python-format msgid "Showing user access for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:197 trove/extensions/mysql/service.py:212 #: trove/extensions/mysql/service.py:227 #, python-format msgid "No such user: %(user)s " msgstr "" #: trove/extensions/mysql/service.py:206 #, python-format msgid "Granting user access for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:221 #, python-format msgid "Revoking user access for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:244 #, python-format msgid "Listing schemas for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:255 #, python-format msgid "Creating schema for instance '%s'" msgstr "" #: trove/extensions/mysql/service.py:265 #, python-format msgid "Deleting schema for instance '%s'" msgstr "" #: trove/extensions/security_group/models.py:70 #: trove/extensions/security_group/models.py:167 msgid "Failed to create remote security group." msgstr "" #: trove/extensions/security_group/models.py:77 trove/taskmanager/models.py:719 #, python-format msgid "Security Group for %s" msgstr "" #: trove/extensions/security_group/models.py:115 #: trove/extensions/security_group/models.py:181 msgid "Failed to delete security group." msgstr "" #: trove/extensions/security_group/models.py:131 #, python-format msgid "Security Group with id: %(id)s already had been deleted" msgstr "" #: trove/extensions/security_group/models.py:215 msgid "Security Group does not have id defined!" msgstr "" #: trove/extensions/security_group/service.py:131 #, python-format msgid "Create Security Group Rules Required field(s) - %s" msgstr "" #: trove/guestagent/api.py:67 trove/guestagent/api.py:70 #: trove/guestagent/api.py:81 trove/guestagent/api.py:84 #, python-format msgid "Error calling %s" msgstr "" #: trove/guestagent/dbaas.py:94 msgid "Error getting volume stats." msgstr "" #: trove/guestagent/models.py:87 #, python-format msgid "Error finding instance %s" msgstr "" #: trove/guestagent/pkg.py:122 #, python-format msgid "Error removing conflict %(package)s" msgstr "" #: trove/guestagent/pkg.py:221 #, python-format msgid "Unexpected output from rpm command. (%(output)s)" msgstr "" #: trove/guestagent/pkg.py:244 msgid "Error fixing dpkg" msgstr "" #: trove/guestagent/pkg.py:356 msgid "Error updating the apt sources" msgstr "" #: trove/guestagent/volume.py:69 msgid "Error getting device status" msgstr "" #: trove/guestagent/volume.py:70 #, python-format msgid "InvalidDevicePath(path=%s)" msgstr "" #: trove/guestagent/volume.py:86 msgid "Device path at {0} did not seem to be {1}." msgstr "" #: trove/guestagent/volume.py:90 msgid "Volume was not formatted." msgstr "" #: trove/guestagent/volume.py:133 msgid "Error resizing file system." msgstr "" #: trove/guestagent/volume.py:134 #, python-format msgid "Error resizing the filesystem: %s" msgstr "" #: trove/guestagent/volume.py:147 #, python-format msgid "Device %(device)s is already mounted in %(mount_point)s. Unmounting now." msgstr "" #: trove/guestagent/volume.py:160 msgid "Error retrieving mount points" msgstr "" #: trove/guestagent/volume.py:161 #, python-format msgid "Could not obtain a list of mount points for device: %s" msgstr "" #: trove/guestagent/volume.py:172 #, python-format msgid "Error setting readhead size to %(size)s for device %(device)s." msgstr "" #: trove/guestagent/volume.py:175 #, python-format msgid "Error setting readhead size: %s." msgstr "" #: trove/guestagent/backup/backupagent.py:117 #, python-format msgid "Error saving backup: %(backup_id)s." msgstr "" #: trove/guestagent/backup/backupagent.py:121 #, python-format msgid "Completed backup %(backup_id)s." msgstr "" #: trove/guestagent/backup/backupagent.py:178 #, python-format msgid "Error restoring backup %(id)s." msgstr "" #: trove/guestagent/common/operating_system.py:62 #, python-format msgid "File does not exist: %s" msgstr "" #: trove/guestagent/common/operating_system.py:132 #, python-format msgid "Invalid path: %s" msgstr "" #: trove/guestagent/common/operating_system.py:313 #, python-format msgid "Service control command not available: %s" msgstr "" #: trove/guestagent/common/operating_system.py:316 msgid "Candidate service names not specified." msgstr "" #: trove/guestagent/common/operating_system.py:409 msgid "Cannot create a blank directory." msgstr "" #: trove/guestagent/common/operating_system.py:438 msgid "Cannot change ownership of a blank file or directory." msgstr "" #: trove/guestagent/common/operating_system.py:441 msgid "Please specify owner or group, or both." msgstr "" #: trove/guestagent/common/operating_system.py:497 msgid "Cannot change mode of a blank file." msgstr "" #: trove/guestagent/common/operating_system.py:528 msgid "Missing user." msgstr "" #: trove/guestagent/common/operating_system.py:530 msgid "Missing group." msgstr "" #: trove/guestagent/common/operating_system.py:563 msgid "No file mode specified." msgstr "" #: trove/guestagent/common/operating_system.py:587 msgid "Cannot remove a blank file." msgstr "" #: trove/guestagent/common/operating_system.py:611 #: trove/guestagent/common/operating_system.py:648 msgid "Missing source path." msgstr "" #: trove/guestagent/common/operating_system.py:613 #: trove/guestagent/common/operating_system.py:650 msgid "Missing destination path." msgstr "" #: trove/guestagent/common/operating_system.py:725 #, python-format msgid "Got unknown keyword args: %r" msgstr "" #: trove/guestagent/datastore/manager.py:130 #, python-format msgid "Starting datastore prepare for '%s'." msgstr "" #: trove/guestagent/datastore/manager.py:140 #, python-format msgid "An error occurred preparing datastore: %s" msgstr "" #: trove/guestagent/datastore/manager.py:144 #, python-format msgid "Ending datastore prepare for '%s'." msgstr "" #: trove/guestagent/datastore/manager.py:151 #, python-format msgid "Completed setup of '%s' datastore successfully." msgstr "" #: trove/guestagent/datastore/manager.py:159 msgid "Creating databases (called from 'prepare')." msgstr "" #: trove/guestagent/datastore/manager.py:161 msgid "Databases created successfully." msgstr "" #: trove/guestagent/datastore/manager.py:163 msgid "Creating users (called from 'prepare')" msgstr "" #: trove/guestagent/datastore/manager.py:165 msgid "Users created successfully." msgstr "" #: trove/guestagent/datastore/manager.py:167 #, python-format msgid "An error occurred creating databases/users: %s" msgstr "" #: trove/guestagent/datastore/manager.py:172 #, python-format msgid "Calling post_prepare for '%s' datastore." msgstr "" #: trove/guestagent/datastore/manager.py:178 #, python-format msgid "Post prepare for '%s' datastore completed." msgstr "" #: trove/guestagent/datastore/manager.py:181 #, python-format msgid "An error occurred in post prepare: %s" msgstr "" #: trove/guestagent/datastore/manager.py:211 msgid "No post_prepare work has been defined." msgstr "" #: trove/guestagent/datastore/service.py:118 #, python-format msgid "Set final status to %s." msgstr "" #: trove/guestagent/datastore/service.py:125 msgid "Ending restart." msgstr "" #: trove/guestagent/datastore/service.py:133 #, python-format msgid "Current database status is '%s'." msgstr "" #: trove/guestagent/datastore/service.py:182 msgid "" "DB server is not installed or is in restart mode, so for now we'll skip " "determining the status of DB on this instance." msgstr "" #: trove/guestagent/datastore/service.py:212 msgid "Database restart failed." msgstr "" #: trove/guestagent/datastore/service.py:239 msgid "Starting database service." msgstr "" #: trove/guestagent/datastore/service.py:245 msgid "Enable service auto-start on boot." msgstr "" #: trove/guestagent/datastore/service.py:262 msgid "Database failed to start." msgstr "" #: trove/guestagent/datastore/service.py:264 msgid "Database has started successfully." msgstr "" #: trove/guestagent/datastore/service.py:286 msgid "Stopping database service." msgstr "" #: trove/guestagent/datastore/service.py:292 msgid "Database failed to stop." msgstr "" #: trove/guestagent/datastore/service.py:294 msgid "Database has stopped successfully." msgstr "" #: trove/guestagent/datastore/service.py:297 msgid "Disable service auto-start on boot." msgstr "" #: trove/guestagent/datastore/service.py:316 #, python-format msgid "" "Service status did not change to %(status)s within the given timeout: " "%(timeout)ds" msgstr "" #: trove/guestagent/datastore/service.py:365 #, python-format msgid "" "Timeout while waiting for database status to change.Expected state " "%(status)s, current state is %(actual_status)s" msgstr "" #: trove/guestagent/datastore/experimental/cassandra/service.py:51 msgid "Preparing Guest as a Cassandra Server" msgstr "" #: trove/guestagent/datastore/experimental/cassandra/service.py:60 msgid "Error while initiating storage structure." msgstr "" #: trove/guestagent/datastore/experimental/cassandra/service.py:111 #, python-format msgid "Exception generating Cassandra configuration %s." msgstr "" #: trove/guestagent/datastore/experimental/cassandra/service.py:118 msgid "Wrote new Cassandra configuration." msgstr "" #: trove/guestagent/datastore/experimental/cassandra/service.py:167 msgid "Starting Cassandra with configuration changes." msgstr "" #: trove/guestagent/datastore/experimental/cassandra/service.py:171 #, python-format msgid "Cannot execute start_db_with_conf_changes because Cassandra state == %s." msgstr "" #: trove/guestagent/datastore/experimental/cassandra/service.py:197 msgid "Error getting Cassandra status" msgstr "" #: trove/guestagent/datastore/experimental/couchbase/manager.py:105 #, python-format msgid "Restoring database from backup %s" msgstr "" #: trove/guestagent/datastore/experimental/couchbase/manager.py:110 #, python-format msgid "Error performing restore from backup %s" msgstr "" #: trove/guestagent/datastore/experimental/couchbase/manager.py:115 msgid "Restored database successfully" msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:63 msgid "Preparing Guest as Couchbase Server." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:72 msgid "Couchbase Server change data dir path." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:91 msgid "Couchbase Server initial setup finished." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:93 msgid "Error performing initial Couchbase setup." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:127 msgid "Starting Couchbase with configuration changes." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:128 #, python-format msgid "" "Configuration contents:\n" " %s." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:130 #, python-format msgid "Cannot start Couchbase with configuration changes. Couchbase state == %s." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:161 msgid "Error getting the Couchbase status." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:167 msgid "Error getting the root password from the native Couchbase config file." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:181 msgid "" "Error getting Couchbase status using the password parsed from the native " "Couchbase config file." msgstr "" #: trove/guestagent/datastore/experimental/couchbase/service.py:253 #, python-format msgid "An error occurred in saving password (%(errno)s). %(strerror)s." msgstr "" #: trove/guestagent/datastore/experimental/couchdb/service.py:62 msgid "Preparing guest as a CouchDB server." msgstr "" #: trove/guestagent/datastore/experimental/couchdb/service.py:66 msgid "Finished installing CouchDB server." msgstr "" #: trove/guestagent/datastore/experimental/couchdb/service.py:86 msgid "Error changing permissions." msgstr "" #: trove/guestagent/datastore/experimental/couchdb/service.py:111 msgid "Error while trying to update bind address of CouchDB server." msgstr "" #: trove/guestagent/datastore/experimental/couchdb/service.py:122 msgid "Starting CouchDB with configuration changes." msgstr "" #: trove/guestagent/datastore/experimental/couchdb/service.py:150 msgid "Error getting CouchDB status." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:61 msgid "Command to change ownership of DB2 data directory failed." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:69 msgid "Command to enable DB2 server on boot failed." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:77 msgid "Command to disable DB2 server on boot failed." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:88 msgid "Starting DB2 with configuration changes." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:102 msgid "Start of DB2 server instance failed." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:104 msgid "Could not start DB2." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:118 #: trove/guestagent/datastore/experimental/db2/service.py:120 msgid "Could not stop DB2." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:146 msgid "Error getting the DB2 server status." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:173 #, python-format msgid "There was an error creating database: %s." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:178 #, python-format msgid "Creating the following databases failed: %s." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:191 #, python-format msgid "There was an error while deleting database:%s." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:193 #, python-format msgid "Unable to delete database: %s." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:238 #, python-format msgid "An error occurred listing databases: %s." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:256 #, python-format msgid "Error creating user: %s." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:274 #, python-format msgid "An error occurred creating users: %s." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:310 #, python-format msgid "There was an error while deleting user: %s." msgstr "" #: trove/guestagent/datastore/experimental/db2/service.py:312 #, python-format msgid "Unable to delete user: %s." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/manager.py:178 #: trove/guestagent/datastore/experimental/redis/manager.py:52 #: trove/guestagent/datastore/mysql_common/manager.py:125 #, python-format msgid "Restoring database from backup %s." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/manager.py:182 #: trove/guestagent/datastore/experimental/redis/manager.py:56 #: trove/guestagent/datastore/mysql_common/manager.py:129 #, python-format msgid "Error performing restore from backup %s." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/manager.py:186 #: trove/guestagent/datastore/experimental/redis/manager.py:60 #: trove/guestagent/datastore/mysql_common/manager.py:133 msgid "Restored database successfully." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:71 msgid "Preparing Guest as MongoDB." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:75 msgid "Finished installing MongoDB server." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:104 msgid "Starting MongoDB with configuration changes." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:109 #: trove/guestagent/datastore/experimental/redis/service.py:197 msgid "Initiating config." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:167 #, python-format msgid "Bad cluster configuration; instance type given as %s." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:175 msgid "Configuring instance as a cluster query router." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:199 msgid "Configuring instance as a cluster config server." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:205 msgid "Configuring instance as a cluster member." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:245 msgid "Error clearing storage." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:261 #, python-format msgid "Setting config servers: %s" msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:389 msgid "Cannot secure the instance. The service is still running." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:454 msgid "Error getting MongoDB status." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:512 msgid "User's password is empty." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:552 #, python-format msgid "Cannot delete user with reserved name %(user)s" msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:611 #, python-format msgid "" "Cannot update attributes for user %(user)s as it either does not exist or" " is a reserved user." msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:650 #, python-format msgid "Cannot grant access for reserved or non-existant user %(user)s" msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:671 #, python-format msgid "Cannot revoke access for reserved or non-existant user %(user)s" msgstr "" #: trove/guestagent/datastore/experimental/mongodb/service.py:688 #, python-format msgid "Cannot list access for reserved or non-existant user %(user)s" msgstr "" #: trove/guestagent/datastore/experimental/postgresql/pgutil.py:52 #, python-format msgid "Invalid SQL statement: %s" msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/access.py:38 msgid "{guest_id}: Granting user ({user}) access to database ({database})." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/access.py:60 msgid "{guest_id}: Revoking user ({user}) access to database({database})." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/config.py:110 msgid "The service is still running." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/database.py:48 msgid "{guest_id}: Creating database {name}." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/database.py:70 msgid "{guest_id}: Dropping database {name}." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/install.py:53 msgid "{guest_id}: Installing ({packages})." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/status.py:45 msgid "Error getting Postgres status." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/users.py:74 msgid "{guest_id}: Creating user {user} {with_clause}." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/users.py:164 msgid "{guest_id}: Dropping user {name}." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/users.py:219 msgid "{guest_id}: Altering user {user} {with_clause}." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/users.py:263 msgid "{guest_id}: Changing username for {old} to {new}." msgstr "" #: trove/guestagent/datastore/experimental/postgresql/service/users.py:278 msgid "{guest_id}: Regranting permissions from {old} to {new}." msgstr "" #: trove/guestagent/datastore/experimental/pxc/manager.py:63 #: trove/guestagent/datastore/experimental/vertica/manager.py:107 msgid "Cluster installation failed." msgstr "" #: trove/guestagent/datastore/experimental/pxc/service.py:67 #: trove/guestagent/datastore/mysql_common/service.py:660 msgid "Generating admin password." msgstr "" #: trove/guestagent/datastore/experimental/pxc/service.py:103 #: trove/guestagent/datastore/mysql_common/service.py:848 msgid "Granting Replication Slave privilege." msgstr "" #: trove/guestagent/datastore/experimental/pxc/service.py:113 msgid "Bootstraping cluster." msgstr "" #: trove/guestagent/datastore/experimental/pxc/service.py:121 msgid "Error bootstrapping cluster." msgstr "" #: trove/guestagent/datastore/experimental/pxc/service.py:122 msgid "Service is not discovered." msgstr "" #: trove/guestagent/datastore/experimental/pxc/service.py:126 msgid "Installing cluster configuration." msgstr "" #: trove/guestagent/datastore/experimental/redis/manager.py:77 msgid "Writing redis configuration." msgstr "" #: trove/guestagent/datastore/experimental/redis/manager.py:221 msgid "Retrieving latest repl offset." msgstr "" #: trove/guestagent/datastore/experimental/redis/manager.py:225 #, python-format msgid "Waiting on repl offset '%s'." msgstr "" #: trove/guestagent/datastore/experimental/redis/manager.py:235 #, python-format msgid "Timeout occurred waiting for Redis repl offset to change to '%s'." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:64 msgid "Error getting Redis status." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:117 msgid "Preparing Guest as Redis Server." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:119 msgid "Installing Redis." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:121 msgid "Redis installed completely." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:192 msgid "Starting redis with conf changes." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:336 #, python-format msgid "Error joining node to cluster at %s." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:349 #, python-format msgid "Error executing addslots: %s" msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:353 #, python-format msgid "Error adding slots %(first_slot)s-%(last_slot)s to cluster." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:364 msgid "Error getting node info." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:371 msgid "Unable to determine node details" msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:386 msgid "Error validating node to for removal." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:395 msgid "Error removing node from cluster." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:434 #, python-format msgid "Could not set configuration property '%(name)s' to '%(value)s'." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:451 #, python-format msgid "Redis command '%(cmd_name)s %(cmd_args)s' failed." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:465 #, python-format msgid "Output from Redis command: %s" msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:466 #, python-format msgid "Field %(field)s not found (Section: '%(sec)s')." msgstr "" #: trove/guestagent/datastore/experimental/redis/service.py:474 #, python-format msgid "" "Timeout occurred waiting for Redis field '%(field)s' to change to " "'%(val)s'." msgstr "" #: trove/guestagent/datastore/experimental/vertica/manager.py:63 #, python-format msgid "Bad cluster configuration: instance type given as %s." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:50 msgid "Service Status is RUNNING." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:53 msgid "Service Status is SHUTDOWN." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:56 #: trove/guestagent/datastore/mysql_common/service.py:151 msgid "Failed to get database status." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:76 msgid "Failed to enable db on boot." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:86 msgid "Failed to disable db on boot." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:91 msgid "Stopping Vertica." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:109 msgid "Could not stop Vertica." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:116 msgid "Failed to stop database." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:121 msgid "Starting Vertica." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:144 msgid "Starting Vertica with configuration changes." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:158 msgid "Creating database on Vertica host." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:167 #: trove/guestagent/datastore/experimental/vertica/service.py:168 msgid "Vertica database create failed." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:169 msgid "Vertica database create completed." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:173 msgid "Installing Vertica Server." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:180 #: trove/guestagent/datastore/experimental/vertica/service.py:181 msgid "install_vertica failed." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:183 msgid "install_vertica completed." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:218 #, python-format msgid "Failed to read config %s." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:227 msgid "Preparing Guest as Vertica Server." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:249 msgid "Failed to prepare for install_vertica." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:254 msgid "Creating user in Vertica database." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:262 #, python-format msgid "Failed to create user %s." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:293 #: trove/guestagent/datastore/experimental/vertica/service.py:296 #: trove/guestagent/datastore/experimental/vertica/service.py:297 #, python-format msgid "Failed to update %s password." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:310 #: trove/guestagent/datastore/experimental/vertica/service.py:312 msgid "Failed to query for root user." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:331 msgid "Cannot read public key." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:358 msgid "Cannot install public keys." msgstr "" #: trove/guestagent/datastore/experimental/vertica/service.py:371 msgid "Cannot export configuration." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:95 msgid "/root/.mysql_secret does not exist." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:105 msgid "Cannot change mysql password." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:148 msgid "MySQL Service Status is RUNNING." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:158 #, python-format msgid "MySQL Service Status %(pid)s is BLOCKED." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:162 msgid "Process execution failed." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:167 msgid "MySQL Service Status is CRASHED." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:170 msgid "MySQL Service Status is SHUTDOWN." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:364 msgid "Error Getting user information" msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:365 #, python-format msgid "Username %(user)s is not valid: %(reason)s" msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:397 msgid "Error granting access" msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:398 #, python-format msgid "Grant access to %s is not allowed" msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:647 msgid "Preparing Guest as MySQL Server." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:656 msgid "Finished installing MySQL server." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:693 msgid "Preserving root access from restore." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:724 msgid "Error enabling MySQL start on boot." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:734 msgid "Error disabling MySQL start on boot." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:738 msgid "Stopping MySQL." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:746 msgid "Error stopping MySQL." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:751 msgid "Could not stop MySQL." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:793 #, python-format msgid "Unable to set %(key)s with value %(value)s." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:808 msgid "Wiping ib_logfiles." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:826 msgid "Removing replication configuration file." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:880 msgid "Starting slave replication." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:887 msgid "Stopping slave replication." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:901 msgid "Stopping replication master." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:916 #, python-format msgid "Replication is now %s." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:919 #, python-format msgid "Replication is not %(status)s after %(max)d seconds." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:923 msgid "Starting MySQL." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:950 msgid "Start up of MySQL failed." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:956 msgid "Error killing stalled MySQL start command." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:962 msgid "Starting MySQL with conf changes." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:966 #, python-format msgid "Cannot execute start_db_with_conf_changes because MySQL state == %s." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:969 #: trove/guestagent/datastore/mysql_common/service.py:975 msgid "Resetting configuration." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:980 #: trove/guestagent/datastore/mysql_common/service.py:1017 msgid "Retrieving latest txn id." msgstr "" #: trove/guestagent/datastore/mysql_common/service.py:1021 #, python-format msgid "Waiting on txn '%s'." msgstr "" #: trove/guestagent/db/models.py:63 trove/guestagent/db/models.py:491 #, python-format msgid "Bad dictionary. Keys: %(keys)s. Required: %(reqs)s" msgstr "" #: trove/guestagent/db/models.py:96 #, python-format msgid "Schema name '%(name)s' is too long. Max length = %(max_length)d." msgstr "" #: trove/guestagent/db/models.py:101 #, python-format msgid "'%s' is not a valid schema name." msgstr "" #: trove/guestagent/db/models.py:139 #, python-format msgid "Bad args. name: %(name)s, deserializing %(deser)s." msgstr "" #: trove/guestagent/db/models.py:432 #, python-format msgid "%(val)s not a valid collation for charset %(char)s." msgstr "" #: trove/guestagent/db/models.py:438 #, python-format msgid "'%s' not a valid collation." msgstr "" #: trove/guestagent/db/models.py:456 #, python-format msgid "'%s' not a valid character set." msgstr "" #: trove/guestagent/db/models.py:469 #, python-format msgid "'%s' is not a valid database name." msgstr "" #: trove/guestagent/db/models.py:471 #, python-format msgid "Database name '%s' is too long. Max length = 64." msgstr "" #: trove/guestagent/db/models.py:517 trove/guestagent/db/models.py:813 #, python-format msgid "'%s' is not a valid password." msgstr "" #: trove/guestagent/db/models.py:539 trove/guestagent/db/models.py:836 #, python-format msgid "'%s' is not a valid hostname." msgstr "" #: trove/guestagent/db/models.py:557 #, python-format msgid "User name '%(name)s' is too long. Max length = %(max_length)d." msgstr "" #: trove/guestagent/db/models.py:562 trove/guestagent/db/models.py:799 #, python-format msgid "'%s' is not a valid user name." msgstr "" #: trove/guestagent/db/models.py:618 #, python-format msgid "Bad args. name: %(name)s, password %(pass)s, deserializing %(deser)s." msgstr "" #: trove/guestagent/db/models.py:658 #, python-format msgid "MongoDB user's name missing %s." msgstr "" #: trove/guestagent/db/models.py:663 msgid "MongoDB user missing database." msgstr "" #: trove/guestagent/db/models.py:667 msgid "MongoDB user missing username." msgstr "" #: trove/guestagent/db/models.py:711 #, python-format msgid "MongoDB user name \"%s\" not in . format." msgstr "" #: trove/guestagent/db/models.py:731 #, python-format msgid "Role %s is invalid." msgstr "" #: trove/guestagent/db/models.py:801 #, python-format msgid "User name '%s' is too long. Max length = 16." msgstr "" #: trove/guestagent/strategies/backup/mysql_impl.py:69 msgid "Innobackupex log file empty." msgstr "" #: trove/guestagent/strategies/backup/mysql_impl.py:73 msgid "Innobackupex did not complete successfully." msgstr "" #: trove/guestagent/strategies/backup/mysql_impl.py:87 #, python-format msgid "Metadata for backup: %s." msgstr "" #: trove/guestagent/strategies/backup/experimental/couchbase_impl.py:90 msgid "All buckets are memcached. Skipping backup." msgstr "" #: trove/guestagent/strategies/backup/experimental/mongo_impl.py:56 #, python-format msgid "" "Need more free space to run mongodump, estimated %(est_dump_size)s and " "found %(avail)s bytes free " msgstr "" #: trove/guestagent/strategies/replication/mysql_base.py:79 msgid "Replication user retry count exceeded" msgstr "" #: trove/guestagent/strategies/replication/mysql_base.py:130 msgid "Exception enabling guest as replica" msgstr "" #: trove/guestagent/strategies/replication/mysql_binlog.py:40 #, python-format msgid "Unable to determine binlog position (from file %(binlog_file)s)." msgstr "" #: trove/guestagent/strategies/replication/mysql_binlog.py:67 #, python-format msgid "Setting read permissions on %s" msgstr "" #: trove/guestagent/strategies/replication/mysql_binlog.py:69 #, python-format msgid "Reading log position from %s" msgstr "" #: trove/guestagent/strategies/restore/mysql_impl.py:91 msgid "Starting MySQL" msgstr "" #: trove/guestagent/strategies/restore/mysql_impl.py:93 msgid "Got a timeout launching mysqld_safe" msgstr "" #: trove/guestagent/strategies/restore/mysql_impl.py:109 msgid "Root password reset successfully." msgstr "" #: trove/guestagent/strategies/restore/mysql_impl.py:207 #, python-format msgid "Cleaning out restore location: %s." msgstr "" #: trove/guestagent/strategies/restore/mysql_impl.py:216 #: trove/guestagent/strategies/restore/mysql_impl.py:273 msgid "Innobackupex prepare finished successfully." msgstr "" #: trove/guestagent/strategies/restore/mysql_impl.py:288 #, python-format msgid "Restoring parent: %(parent_location)s checksum: %(parent_checksum)s." msgstr "" #: trove/guestagent/strategies/restore/experimental/redis_impl.py:47 #, python-format msgid "Removing old persistence file: %s." msgstr "" #: trove/instance/models.py:571 msgid "Detach replicas before deleting replica source." msgstr "" #: trove/instance/models.py:749 #, python-format msgid "Cannot create a replica of a replica %(id)s." msgstr "" #: trove/instance/models.py:765 #, python-format msgid "Cannot create a replica of %(id)s as that instance could not be found." msgstr "" #: trove/instance/models.py:770 #, python-format msgid "" "Replica count only valid when creating replicas. Cannot create %(count)d " "instances." msgstr "" #: trove/instance/models.py:884 #, python-format msgid "The new flavor id must be different than the current flavor id of '%s'." msgstr "" #: trove/instance/models.py:916 #, python-format msgid "The new volume 'size' must be larger than the current volume size of '%s'." msgstr "" #: trove/instance/models.py:924 #, python-format msgid "Instance %s has no volume." msgstr "" #: trove/instance/models.py:959 trove/instance/models.py:967 #, python-format msgid "Instance %s is not a replica." msgstr "" #: trove/instance/models.py:984 #, python-format msgid "Instance %s is not a replica source." msgstr "" #: trove/instance/models.py:990 #, python-format msgid "Replica Source %s cannot be ejected as it has a current heartbeat" msgstr "" #: trove/instance/models.py:1027 #, python-format msgid "" "Instance %(instance_id)s is not currently available for an action to be " "performed (status was %(action_status)s)." msgstr "" #: trove/quota/quota.py:311 #, python-format msgid "Failed to commit reservations %(reservations)s" msgstr "" #: trove/quota/quota.py:324 #, python-format msgid "Failed to roll back reservations %(reservations)s" msgstr "" #: trove/taskmanager/manager.py:112 #, python-format msgid "" "promote-to-replica-source: Unable to migrate replica %(slave)s from old " "replica source %(old_master)s to new source %(new_master)s." msgstr "" #: trove/taskmanager/manager.py:126 msgid "Exception demoting old replica source" msgstr "" #: trove/taskmanager/manager.py:134 #, python-format msgid "" "promote-to-replica-source %(id)s: The following replicas may not have " "been switched: %(replicas)s" msgstr "" #: trove/taskmanager/manager.py:170 #, python-format msgid "Replicas of %s not all replicating from same master" msgstr "" #: trove/taskmanager/manager.py:196 #, python-format msgid "" "eject-replica-source: Unable to migrate replica %(slave)s from old " "replica source %(old_master)s to new source %(new_master)s." msgstr "" #: trove/taskmanager/manager.py:212 #, python-format msgid "" "eject-replica-source %(id)s: The following replicas may not have been " "switched: %(replicas)s" msgstr "" #: trove/taskmanager/manager.py:292 #, python-format msgid "Could not create replica %(num)d of %(count)d." msgstr "" #: trove/taskmanager/manager.py:320 msgid "Cannot create multiple non-replica instances." msgstr "" #: trove/taskmanager/models.py:246 msgid "Timeout for all instance service statuses to become ready." msgstr "" #: trove/taskmanager/models.py:253 #, python-format msgid "Some instances failed to become ready: %s" msgstr "" #: trove/taskmanager/models.py:322 #, python-format msgid "Created instance %s successfully." msgstr "" #: trove/taskmanager/models.py:325 #, python-format msgid "" "Failed to create instance %s. Timeout waiting for instance to become " "active. No usage create-event was sent." msgstr "" #: trove/taskmanager/models.py:330 #, python-format msgid "Failed to send usage create-event for instance %s." msgstr "" #: trove/taskmanager/models.py:341 #, python-format msgid "Creating instance %s." msgstr "" #: trove/taskmanager/models.py:353 #, python-format msgid "Error creating security group for instance: %s" msgstr "" #: trove/taskmanager/models.py:424 #, python-format msgid "Error creating DNS entry for instance: %s" msgstr "" #: trove/taskmanager/models.py:435 #, python-format msgid "Error attaching instance %s as replica." msgstr "" #: trove/taskmanager/models.py:476 #, python-format msgid "Unable to create replication snapshot record for instance: %s" msgstr "" #: trove/taskmanager/models.py:510 #, python-format msgid "" "Error creating replication snapshot from instance %(source)s for new " "replica %(replica)s." msgstr "" #: trove/taskmanager/models.py:526 #, python-format msgid "" "An error occurred while deleting a bad replication snapshot from instance" " %(source)s." msgstr "" #: trove/taskmanager/models.py:547 #, python-format msgid "Service status: %(status)s" msgstr "" #: trove/taskmanager/models.py:550 #, python-format msgid "Service error description: %(desc)s" msgstr "" #: trove/taskmanager/models.py:558 #, python-format msgid "Trove instance status: %(action)s" msgstr "" #: trove/taskmanager/models.py:561 #, python-format msgid "Trove instance status description: %(text)s" msgstr "" #: trove/taskmanager/models.py:583 #, python-format msgid "Service not active, status: %s" msgstr "" #: trove/taskmanager/models.py:589 #, python-format msgid "Server not active, status: %s" msgstr "" #: trove/taskmanager/models.py:624 #, python-format msgid "Error creating server and volume for instance %s" msgstr "" #: trove/taskmanager/models.py:731 #, python-format msgid "Error occurred during Heat stack creation for instance %s." msgstr "" #: trove/taskmanager/models.py:763 #, python-format msgid "Failed to create server for instance %s" msgstr "" #: trove/taskmanager/models.py:782 #, python-format msgid "Failed to create volume for instance %s" msgstr "" #: trove/taskmanager/models.py:916 #, python-format msgid "" "Failed to create DNS entry for instance %(instance)s. Server status was " "%(status)s)." msgstr "" #: trove/taskmanager/models.py:927 msgid "Creating dns entry..." msgstr "" #: trove/taskmanager/models.py:950 #, python-format msgid "" "Failed to create security group rules for instance %(instance_id)s: " "Invalid port format - FromPort = %(from)s, ToPort = %(to)s" msgstr "" #: trove/taskmanager/models.py:1008 #, python-format msgid "Error stopping the datastore before attempting to delete instance id %s." msgstr "" #: trove/taskmanager/models.py:1019 #, python-format msgid "Error during delete compute server %s" msgstr "" #: trove/taskmanager/models.py:1028 #, python-format msgid "Error during dns entry of instance %(id)s: %(ex)s" msgstr "" #: trove/taskmanager/models.py:1037 #, python-format msgid "" "Server %(server_id)s entered ERROR status when deleting instance " "%(instance_id)s!" msgstr "" #: trove/taskmanager/models.py:1048 #, python-format msgid "" "Failed to delete instance %(instance_id)s: Timeout deleting compute " "server %(server_id)s" msgstr "" #: trove/taskmanager/models.py:1058 #, python-format msgid "Deleting volume %(v)s for instance: %(i)s." msgstr "" #: trove/taskmanager/models.py:1062 #, python-format msgid "Error deleting volume of instance %(id)s." msgstr "" #: trove/taskmanager/models.py:1077 #, python-format msgid "" "Resizing volume for instance %(instance_id)s from %(old_size)s GB to " "%(new_size)s GB." msgstr "" #: trove/taskmanager/models.py:1083 #, python-format msgid "Resized volume for instance %s successfully." msgstr "" #: trove/taskmanager/models.py:1086 #, python-format msgid "" "Resizing instance %(instance_id)s from flavor %(old_flavor)s to " "%(new_flavor)s." msgstr "" #: trove/taskmanager/models.py:1092 #, python-format msgid "Resized instance %s successfully." msgstr "" #: trove/taskmanager/models.py:1095 #, python-format msgid "Initiating migration to host %s." msgstr "" #: trove/taskmanager/models.py:1100 #, python-format msgid "Initiating backup for instance %s." msgstr "" #: trove/taskmanager/models.py:1119 #, python-format msgid "Failed to get replication snapshot from %s." msgstr "" #: trove/taskmanager/models.py:1133 #, python-format msgid "Failed to detach replica %s." msgstr "" #: trove/taskmanager/models.py:1146 #, python-format msgid "Failed to attach replica %s." msgstr "" #: trove/taskmanager/models.py:1225 #, python-format msgid "Cannot reboot instance. DB status is %s." msgstr "" #: trove/taskmanager/models.py:1231 #, python-format msgid "Rebooting instance %s." msgstr "" #: trove/taskmanager/models.py:1248 #, python-format msgid "Rebooted instance %s successfully." msgstr "" #: trove/taskmanager/models.py:1250 #, python-format msgid "Failed to reboot instance %(id)s: %(e)s" msgstr "" #: trove/taskmanager/models.py:1257 #, python-format msgid "Initiating datastore restart on instance %s." msgstr "" #: trove/taskmanager/models.py:1261 #, python-format msgid "Failed to initiate datastore restart on instance %s." msgstr "" #: trove/taskmanager/models.py:1331 #, python-format msgid "Deleting backup %s." msgstr "" #: trove/taskmanager/models.py:1344 #, python-format msgid "Error occurred when deleting from swift. Details: %s" msgstr "" #: trove/taskmanager/models.py:1352 #, python-format msgid "Deleted backup %s successfully." msgstr "" #: trove/taskmanager/models.py:1372 #, python-format msgid "" "%(func)s encountered an error when attempting to resize the volume for " "instance %(id)s. Setting service status to failed." msgstr "" #: trove/taskmanager/models.py:1382 #, python-format msgid "" "%(func)s encountered an error when attempting to resize the volume for " "instance %(id)s. Trying to recover by restarting the guest." msgstr "" #: trove/taskmanager/models.py:1390 #, python-format msgid "" "%(func)s encountered an error when attempting to resize the volume for " "instance %(id)s. Trying to recover by mounting the volume and then " "restarting the guest." msgstr "" #: trove/taskmanager/models.py:1399 #, python-format msgid "" "%(func)s encountered an error when attempting to resize the volume for " "instance %(id)s. Trying to recover by attaching and mounting the volume " "and then restarting the guest." msgstr "" #: trove/taskmanager/models.py:1507 #, python-format msgid "Failed to get volume %(vol_id)s" msgstr "" #: trove/taskmanager/models.py:1521 #, python-format msgid "Timeout trying to extend the volume %(vol_id)s for instance %(id)s" msgstr "" #: trove/taskmanager/models.py:1534 #, python-format msgid "" "Error encountered trying to verify extend for the volume %(vol_id)s for " "instance %(id)s" msgstr "" #: trove/taskmanager/models.py:1581 #, python-format msgid "" "Failed to resize instance %(id)s volume for server %(server_id)s. The " "instance must be in state %(state)s not %(inst_state)s." msgstr "" #: trove/taskmanager/models.py:1704 msgid "Exception during nova action." msgstr "" #: trove/taskmanager/models.py:1706 #, python-format msgid "Reverting action for instance %s" msgstr "" #: trove/taskmanager/models.py:1712 msgid "Restarting datastore." msgstr "" #: trove/taskmanager/models.py:1715 msgid "Cannot restart datastore because Nova server status is not ACTIVE" msgstr "" #: trove/taskmanager/models.py:1718 #, python-format msgid "Error resizing instance %s." msgstr "" #: trove/taskmanager/models.py:1780 msgid "Error sending reset_configuration call." msgstr "" #: trove/tests/db/migrations.py:74 #, python-format msgid "" "The following migration scripts are missing a downgrade implementation:\n" "\t%s" msgstr "" #: trove/tests/fakes/nova.py:282 #, python-format msgid "volume status = %s" msgstr "" #: trove/tests/fakes/nova.py:334 #, python-format msgid "Couldn't find server id %(id)s, collection=%(db)s" msgstr "" #: trove/tests/fakes/nova.py:354 #, python-format msgid "Simulated event ended, deleting server %s." msgstr "" #: trove/tests/fakes/nova.py:497 #, python-format msgid "Couldn't find volume id %(id)s, collection=%(db)s" msgstr "" trove-5.0.0/trove/module/0000775000567000056710000000000012701410521016431 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/module/__init__.py0000664000567000056710000000000012701410316020532 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/module/service.py0000664000567000056710000001355612701410316020457 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from oslo_log import log as logging import trove.common.apischema as apischema from trove.common import cfg from trove.common.i18n import _ from trove.common import pagination from trove.common import wsgi from trove.datastore import models as datastore_models from trove.instance import models as instance_models from trove.instance import views as instance_views from trove.module import models from trove.module import views CONF = cfg.CONF LOG = logging.getLogger(__name__) class ModuleController(wsgi.Controller): schemas = apischema.module def index(self, req, tenant_id): context = req.environ[wsgi.CONTEXT_KEY] datastore = req.GET.get('datastore', '') if datastore and datastore.lower() != models.Modules.MATCH_ALL_NAME: ds, ds_ver = datastore_models.get_datastore_version( type=datastore) datastore = ds.id modules = models.Modules.load(context, datastore=datastore) view = views.ModulesView(modules) return wsgi.Result(view.data(), 200) def show(self, req, tenant_id, id): LOG.info(_("Showing module %s") % id) context = req.environ[wsgi.CONTEXT_KEY] module = models.Module.load(context, id) module.instance_count = len(models.InstanceModules.load( context, module_id=module.id, md5=module.md5)) return wsgi.Result( views.DetailedModuleView(module).data(), 200) def create(self, req, body, tenant_id): name = body['module']['name'] LOG.info(_("Creating module '%s'") % name) context = req.environ[wsgi.CONTEXT_KEY] module_type = body['module']['module_type'] contents = body['module']['contents'] description = body['module'].get('description') all_tenants = body['module'].get('all_tenants', 0) module_tenant_id = None if all_tenants else tenant_id datastore = body['module'].get('datastore', {}).get('type', None) ds_version = body['module'].get('datastore', {}).get('version', None) auto_apply = body['module'].get('auto_apply', 0) visible = body['module'].get('visible', 1) live_update = body['module'].get('live_update', 0) module = models.Module.create( context, name, module_type, contents, description, module_tenant_id, datastore, ds_version, auto_apply, visible, live_update) view_data = views.DetailedModuleView(module) return wsgi.Result(view_data.data(), 200) def delete(self, req, tenant_id, id): LOG.info(_("Deleting module %s") % id) context = req.environ[wsgi.CONTEXT_KEY] module = models.Module.load(context, id) models.Module.delete(context, module) return wsgi.Result(None, 200) def update(self, req, body, tenant_id, id): LOG.info(_("Updating module %s") % id) context = req.environ[wsgi.CONTEXT_KEY] module = models.Module.load(context, id) original_module = copy.deepcopy(module) if 'name' in body['module']: module.name = body['module']['name'] if 'module_type' in body['module']: module.type = body['module']['module_type'] if 'contents' in body['module']: module.contents = body['module']['contents'] if 'description' in body['module']: module.description = body['module']['description'] if 'all_tenants' in body['module']: module.tenant_id = (None if body['module']['all_tenants'] else tenant_id) if 'datastore' in body['module']: if 'type' in body['module']['datastore']: module.datastore_id = body['module']['datastore']['type'] if 'version' in body['module']['datastore']: module.datastore_version_id = ( body['module']['datastore']['version']) if 'auto_apply' in body['module']: module.auto_apply = body['module']['auto_apply'] if 'visible' in body['module']: module.visible = body['module']['visible'] if 'live_update' in body['module']: module.live_update = body['module']['live_update'] models.Module.update(context, module, original_module) view_data = views.DetailedModuleView(module) return wsgi.Result(view_data.data(), 200) def instances(self, req, tenant_id, id): LOG.info(_("Getting instances for module %s") % id) context = req.environ[wsgi.CONTEXT_KEY] instance_modules, marker = models.InstanceModules.load( context, module_id=id) if instance_modules: instance_ids = [inst_mod.instance_id for inst_mod in instance_modules] include_clustered = ( req.GET.get('include_clustered', '').lower() == 'true') instances, marker = instance_models.Instances.load( context, include_clustered, instance_ids=instance_ids) else: instances = [] marker = None view = instance_views.InstancesView(instances, req=req) paged = pagination.SimplePaginatedDataView(req.url, 'instances', view, marker) return wsgi.Result(paged.data(), 200) trove-5.0.0/trove/module/views.py0000664000567000056710000000667112701410316020154 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.datastore import models as datastore_models from trove.module import models LOG = logging.getLogger(__name__) class ModuleView(object): def __init__(self, module): self.module = module def data(self): module_dict = dict( id=self.module.id, name=self.module.name, type=self.module.type, description=self.module.description, tenant_id=self.module.tenant_id, datastore_id=self.module.datastore_id, datastore_version_id=self.module.datastore_version_id, auto_apply=self.module.auto_apply, md5=self.module.md5, visible=self.module.visible, created=self.module.created, updated=self.module.updated) # add extra data to make results more legible if self.module.tenant_id: # This should be the tenant name, but until we figure out where # to get it from, use the tenant_id tenant = self.module.tenant_id else: tenant = models.Modules.MATCH_ALL_NAME module_dict["tenant"] = tenant datastore = self.module.datastore_id datastore_version = self.module.datastore_version_id if datastore: ds, ds_ver = ( datastore_models.get_datastore_version( type=datastore, version=datastore_version)) datastore = ds.name if datastore_version: datastore_version = ds_ver.name else: datastore_version = models.Modules.MATCH_ALL_NAME else: datastore = models.Modules.MATCH_ALL_NAME datastore_version = models.Modules.MATCH_ALL_NAME module_dict["datastore"] = datastore module_dict["datastore_version"] = datastore_version return {"module": module_dict} class ModulesView(object): def __init__(self, modules): self.modules = modules def data(self): data = [] for module in self.modules: data.append(self.data_for_module(module)) return {"modules": data} def data_for_module(self, module): view = ModuleView(module) return view.data()['module'] class DetailedModuleView(ModuleView): def __init__(self, module): super(DetailedModuleView, self).__init__(module) def data(self, include_contents=False): return_value = super(DetailedModuleView, self).data() module_dict = return_value["module"] module_dict["live_update"] = self.module.live_update if hasattr(self.module, 'instance_count'): module_dict["instance_count"] = self.module.instance_count if include_contents: module_dict['contents'] = self.module.contents return {"module": module_dict} trove-5.0.0/trove/module/models.py0000664000567000056710000003513712701410316020301 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Model classes that form the core of Module functionality.""" from datetime import datetime import hashlib from sqlalchemy.sql.expression import or_ from trove.common import cfg from trove.common import crypto_utils from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.datastore import models as datastore_models from trove.db import models from oslo_log import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) class Modules(object): DEFAULT_LIMIT = CONF.modules_page_size ENCRYPT_KEY = CONF.module_aes_cbc_key VALID_MODULE_TYPES = [mt.lower() for mt in CONF.module_types] MATCH_ALL_NAME = 'all' @staticmethod def load(context, datastore=None): if context is None: raise TypeError("Argument context not defined.") elif id is None: raise TypeError("Argument is not defined.") query_opts = {'deleted': False} if datastore: if datastore.lower() == Modules.MATCH_ALL_NAME: datastore = None query_opts['datastore_id'] = datastore if context.is_admin: db_info = DBModule.find_all(**query_opts) if db_info.count() == 0: LOG.debug("No modules found for admin user") else: # build a query manually, since we need current tenant # plus the 'all' tenant ones query_opts['visible'] = True db_info = DBModule.query().filter_by(**query_opts) db_info = db_info.filter(or_(DBModule.tenant_id == context.tenant, DBModule.tenant_id.is_(None))) if db_info.count() == 0: LOG.debug("No modules found for tenant %s" % context.tenant) modules = db_info.all() return modules @staticmethod def load_auto_apply(context, datastore_id, datastore_version_id): """Return all the auto-apply modules for the given criteria.""" if context is None: raise TypeError("Argument context not defined.") elif id is None: raise TypeError("Argument is not defined.") query_opts = {'deleted': False, 'auto_apply': True} db_info = DBModule.query().filter_by(**query_opts) db_info = Modules.add_tenant_filter(db_info, context.tenant) db_info = Modules.add_datastore_filter(db_info, datastore_id) db_info = Modules.add_ds_version_filter(db_info, datastore_version_id) if db_info.count() == 0: LOG.debug("No auto-apply modules found for tenant %s" % context.tenant) modules = db_info.all() return modules @staticmethod def add_tenant_filter(query, tenant_id): return query.filter(or_(DBModule.tenant_id == tenant_id, DBModule.tenant_id.is_(None))) @staticmethod def add_datastore_filter(query, datastore_id): return query.filter(or_(DBModule.datastore_id == datastore_id, DBModule.datastore_id.is_(None))) @staticmethod def add_ds_version_filter(query, datastore_version_id): return query.filter(or_( DBModule.datastore_version_id == datastore_version_id, DBModule.datastore_version_id.is_(None))) @staticmethod def load_by_ids(context, module_ids): """Return all the modules for the given ids. Screens out the ones for other tenants, unless the user is admin. """ if context is None: raise TypeError("Argument context not defined.") elif id is None: raise TypeError("Argument is not defined.") modules = [] if module_ids: query_opts = {'deleted': False} db_info = DBModule.query().filter_by(**query_opts) if not context.is_admin: db_info = Modules.add_tenant_filter(db_info, context.tenant) db_info = db_info.filter(DBModule.id.in_(module_ids)) modules = db_info.all() return modules class Module(object): def __init__(self, context, module_id): self.context = context self.module_id = module_id @staticmethod def create(context, name, module_type, contents, description, tenant_id, datastore, datastore_version, auto_apply, visible, live_update): if module_type.lower() not in Modules.VALID_MODULE_TYPES: LOG.error("Valid module types: %s" % Modules.VALID_MODULE_TYPES) raise exception.ModuleTypeNotFound(module_type=module_type) Module.validate_action( context, 'create', tenant_id, auto_apply, visible) datastore_id, datastore_version_id = Module.validate_datastore( datastore, datastore_version) if Module.key_exists( name, module_type, tenant_id, datastore_id, datastore_version_id): datastore_str = datastore_id or Modules.MATCH_ALL_NAME ds_version_str = datastore_version_id or Modules.MATCH_ALL_NAME raise exception.ModuleAlreadyExists( name=name, datastore=datastore_str, ds_version=ds_version_str) md5, processed_contents = Module.process_contents(contents) module = DBModule.create( name=name, type=module_type.lower(), contents=processed_contents, description=description, tenant_id=tenant_id, datastore_id=datastore_id, datastore_version_id=datastore_version_id, auto_apply=auto_apply, visible=visible, live_update=live_update, md5=md5) return module # Certain fields require admin access to create/change/delete @staticmethod def validate_action(context, action_str, tenant_id, auto_apply, visible): error_str = None if not context.is_admin: option_strs = [] if tenant_id is None: option_strs.append(_("Tenant: %s") % Modules.MATCH_ALL_NAME) if auto_apply: option_strs.append(_("Auto: %s") % auto_apply) if not visible: option_strs.append(_("Visible: %s") % visible) if option_strs: error_str = "(" + " ".join(option_strs) + ")" if error_str: raise exception.ModuleAccessForbidden( action=action_str, options=error_str) @staticmethod def validate_datastore(datastore, datastore_version): datastore_id = None datastore_version_id = None if datastore: ds, ds_ver = datastore_models.get_datastore_version( type=datastore, version=datastore_version) datastore_id = ds.id if datastore_version: datastore_version_id = ds_ver.id elif datastore_version: msg = _("Cannot specify version without datastore") raise exception.BadRequest(message=msg) return datastore_id, datastore_version_id @staticmethod def key_exists(name, module_type, tenant_id, datastore_id, datastore_version_id): try: DBModule.find_by( name=name, type=module_type, tenant_id=tenant_id, datastore_id=datastore_id, datastore_version_id=datastore_version_id, deleted=False) return True except exception.ModelNotFoundError: return False # We encrypt the contents (which should be encoded already, since it # might be in binary format) and then encode them again so they can # be stored in a text field in the Trove database. @staticmethod def process_contents(contents): md5 = hashlib.md5(contents).hexdigest() encrypted_contents = crypto_utils.encrypt_data( contents, Modules.ENCRYPT_KEY) return md5, crypto_utils.encode_data(encrypted_contents) # Do the reverse to 'deprocess' the contents @staticmethod def deprocess_contents(processed_contents): encrypted_contents = crypto_utils.decode_data(processed_contents) return crypto_utils.decrypt_data( encrypted_contents, Modules.ENCRYPT_KEY) @staticmethod def delete(context, module): Module.validate_action( context, 'delete', module.tenant_id, module.auto_apply, module.visible) Module.enforce_live_update(module.id, module.live_update, module.md5) module.deleted = True module.deleted_at = datetime.utcnow() module.save() @staticmethod def enforce_live_update(module_id, live_update, md5): if not live_update: instances = DBInstanceModule.find_all( module_id=module_id, md5=md5, deleted=False).all() if instances: raise exception.ModuleAppliedToInstance() @staticmethod def load(context, module_id): module = None try: if context.is_admin: module = DBModule.find_by(id=module_id, deleted=False) else: module = DBModule.find_by( id=module_id, tenant_id=context.tenant, visible=True, deleted=False) except exception.ModelNotFoundError: # See if we have the module in the 'all' tenant section if not context.is_admin: try: module = DBModule.find_by( id=module_id, tenant_id=None, visible=True, deleted=False) except exception.ModelNotFoundError: pass # fall through to the raise below if not module: msg = _("Module with ID %s could not be found.") % module_id raise exception.ModelNotFoundError(msg) # Save the encrypted contents in case we need to put it back # when updating the record module.encrypted_contents = module.contents module.contents = Module.deprocess_contents(module.contents) return module @staticmethod def update(context, module, original_module): Module.enforce_live_update( original_module.id, original_module.live_update, original_module.md5) # we don't allow any changes to 'admin'-type modules, even if # the values changed aren't the admin ones. access_tenant_id = (None if (original_module.tenant_id is None or module.tenant_id is None) else module.tenant_id) access_auto_apply = original_module.auto_apply or module.auto_apply access_visible = original_module.visible and module.visible Module.validate_action( context, 'update', access_tenant_id, access_auto_apply, access_visible) ds_id, ds_ver_id = Module.validate_datastore( module.datastore_id, module.datastore_version_id) if module.contents != original_module.contents: md5, processed_contents = Module.process_contents(module.contents) module.md5 = md5 module.contents = processed_contents else: # on load the contents were decrypted, so # we need to put the encrypted contents back before we update module.contents = original_module.encrypted_contents if module.datastore_id: module.datastore_id = ds_id if module.datastore_version_id: module.datastore_version_id = ds_ver_id module.updated = datetime.utcnow() DBModule.save(module) class InstanceModules(object): @staticmethod def load(context, instance_id=None, module_id=None, md5=None): selection = {'deleted': False} if instance_id: selection['instance_id'] = instance_id if module_id: selection['module_id'] = module_id if md5: selection['md5'] = md5 db_info = DBInstanceModule.find_all(**selection) if db_info.count() == 0: LOG.debug("No instance module records found") limit = utils.pagination_limit( context.limit, Modules.DEFAULT_LIMIT) data_view = DBInstanceModule.find_by_pagination( 'modules', db_info, 'foo', limit=limit, marker=context.marker) next_marker = data_view.next_page_marker return data_view.collection, next_marker class InstanceModule(object): def __init__(self, context, instance_id, module_id): self.context = context self.instance_id = instance_id self.module_id = module_id @staticmethod def create(context, instance_id, module_id, md5): instance_module = DBInstanceModule.create( instance_id=instance_id, module_id=module_id, md5=md5) return instance_module @staticmethod def delete(context, instance_module): instance_module.deleted = True instance_module.deleted_at = datetime.utcnow() instance_module.save() @staticmethod def load(context, instance_id, module_id, deleted=False): instance_module = None try: instance_module = DBInstanceModule.find_by( instance_id=instance_id, module_id=module_id, deleted=deleted) except exception.ModelNotFoundError: pass return instance_module @staticmethod def update(context, instance_module): instance_module.updated = datetime.utcnow() DBInstanceModule.save(instance_module) class DBInstanceModule(models.DatabaseModelBase): _data_fields = [ 'id', 'instance_id', 'module_id', 'md5', 'created', 'updated', 'deleted', 'deleted_at'] class DBModule(models.DatabaseModelBase): _data_fields = [ 'id', 'name', 'type', 'contents', 'description', 'tenant_id', 'datastore_id', 'datastore_version_id', 'auto_apply', 'visible', 'live_update', 'md5', 'created', 'updated', 'deleted', 'deleted_at'] def persisted_models(): return {'modules': DBModule, 'instance_modules': DBInstanceModule} trove-5.0.0/trove/extensions/0000775000567000056710000000000012701410521017343 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/account/0000775000567000056710000000000012701410521020777 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/account/__init__.py0000664000567000056710000000000012701410316023100 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/account/service.py0000664000567000056710000000363712701410316023024 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.account import models from trove.extensions.account import views LOG = logging.getLogger(__name__) class AccountController(wsgi.Controller): """Controller for account functionality.""" schemas = apischema.account @admin_context def show(self, req, tenant_id, id): """Return a account and instances associated with a single account.""" LOG.info(_("req : '%s'\n\n") % req) LOG.info(_("Showing account information for '%(account)s' " "to '%(tenant)s'") % {'account': id, 'tenant': tenant_id}) context = req.environ[wsgi.CONTEXT_KEY] account = models.Account.load(context, id) return wsgi.Result(views.AccountView(account).data(), 200) @admin_context def index(self, req, tenant_id): """Return a list of all accounts with non-deleted instances.""" LOG.info(_("req : '%s'\n\n") % req) LOG.info(_("Showing all accounts with instances for '%s'") % tenant_id) accounts_summary = models.AccountsSummary.load() return wsgi.Result(views.AccountsView(accounts_summary).data(), 200) trove-5.0.0/trove/extensions/account/views.py0000664000567000056710000000213512701410316022511 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class AccountsView(object): def __init__(self, accounts_summary): self.accounts_summary = accounts_summary def data(self): return {'accounts': self.accounts_summary.accounts} class AccountView(object): def __init__(self, account): self.account = account def data(self): return { 'account': { 'id': self.account.id, 'instance_ids': self.account.instance_ids, } } trove-5.0.0/trove/extensions/account/models.py0000664000567000056710000000372212701410316022642 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.instance.models import DBInstance LOG = logging.getLogger(__name__) class Account(object): """Shows all trove instance ids owned by an account.""" def __init__(self, id, instance_ids): self.id = id self.instance_ids = instance_ids @staticmethod def load(context, id): db_infos = DBInstance.find_all(tenant_id=id, deleted=False) instance_ids = [] for db_info in db_infos: instance_ids.append(db_info.id) return Account(id, instance_ids) class AccountsSummary(object): def __init__(self, accounts): self.accounts = accounts @classmethod def load(cls): # TODO(pdmars): This should probably be changed to a more generic # database filter query if one is added, however, this should suffice # for now. db_infos = DBInstance.find_all(deleted=False) tenant_ids_for_instances = [db_info.tenant_id for db_info in db_infos] tenant_ids = set(tenant_ids_for_instances) LOG.debug("All tenants with instances: %s" % tenant_ids) accounts = [] for tenant_id in tenant_ids: num_instances = tenant_ids_for_instances.count(tenant_id) accounts.append({'id': tenant_id, 'num_instances': num_instances}) return cls(accounts) trove-5.0.0/trove/extensions/mgmt/0000775000567000056710000000000012701410521020307 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/quota/0000775000567000056710000000000012701410521021440 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/quota/__init__.py0000664000567000056710000000000012701410316023541 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/quota/service.py0000664000567000056710000000511612701410316023457 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.quota import views from trove.quota.models import Quota from trove.quota.quota import QUOTAS as quota_engine LOG = logging.getLogger(__name__) class QuotaController(wsgi.Controller): """Controller for quota functionality.""" @admin_context def show(self, req, tenant_id, id): """Return all quotas for this tenant.""" LOG.info(_("Indexing quota info for tenant '%(id)s'\n" "req : '%(req)s'\n\n") % { "id": id, "req": req}) quotas = quota_engine.get_all_quotas_by_tenant(id) return wsgi.Result(views.QuotaView(quotas).data(), 200) @admin_context def update(self, req, body, tenant_id, id): LOG.info(_("Updating quota limits for tenant '%(id)s'\n" "req : '%(req)s'\n\n") % { "id": id, "req": req}) if not body: raise exception.BadRequest(_("Invalid request body.")) quotas = {} quota = None registered_resources = quota_engine.resources for resource, limit in body['quotas'].items(): if limit is None: continue if resource == "xmlns": continue if resource not in registered_resources: raise exception.QuotaResourceUnknown(unknown=resource) try: quota = Quota.find_by(tenant_id=id, resource=resource) quota.hard_limit = limit quota.save() except exception.ModelNotFoundError: quota = Quota.create(tenant_id=id, resource=resource, hard_limit=limit) quotas[resource] = quota return wsgi.Result(views.QuotaView(quotas).data(), 200) trove-5.0.0/trove/extensions/mgmt/quota/views.py0000664000567000056710000000160412701410316023152 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class QuotaView(object): def __init__(self, quotas): self.quotas = quotas def data(self): rtn = {} for resource_name, quota in self.quotas.items(): rtn[resource_name] = quota.hard_limit return {'quotas': rtn} trove-5.0.0/trove/extensions/mgmt/configuration/0000775000567000056710000000000012701410521023156 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/configuration/__init__.py0000664000567000056710000000000012701410316025257 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/configuration/service.py0000664000567000056710000001322012701410316025170 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.configuration import models as config_models from trove.datastore import models as ds_models from trove.extensions.mgmt.configuration import views LOG = logging.getLogger(__name__) class ConfigurationsParameterController(wsgi.Controller): """Controller for configuration parameters functionality.""" schemas = apischema.mgmt_configuration @admin_context def index(self, req, tenant_id, version_id): """List all configuration parameters.""" ds_version = ds_models.DatastoreVersion.load_by_uuid(version_id) config_params = config_models.DatastoreConfigurationParameters rules = config_params.load_parameters( ds_version.id, show_deleted=True) return wsgi.Result(views.MgmtConfigurationParametersView(rules).data(), 200) @admin_context def show(self, req, tenant_id, version_id, id): """Show a configuration parameter.""" ds_models.DatastoreVersion.load_by_uuid(version_id) config_params = config_models.DatastoreConfigurationParameters rule = config_params.load_parameter_by_name( version_id, id, show_deleted=True) return wsgi.Result(views.MgmtConfigurationParameterView(rule).data(), 200) def _validate_data_type(self, parameter): min_size = None max_size = None data_type = parameter['data_type'] if data_type == "integer": if 'max_size' not in parameter: raise exception.BadRequest(_("max_size is required for " "integer data type.")) if 'min_size' not in parameter: raise exception.BadRequest(_("min_size is required for " "integer data type.")) max_size = int(parameter['max_size']) min_size = int(parameter['min_size']) if max_size < min_size: raise exception.BadRequest( _("max_size must be greater than or equal to min_size.")) return data_type, min_size, max_size @admin_context def create(self, req, body, tenant_id, version_id): """Create configuration parameter for datastore version.""" LOG.info(_("Creating configuration parameter for datastore")) LOG.debug("req : '%s'\n\n" % req) LOG.debug("body : '%s'\n\n" % body) if not body: raise exception.BadRequest(_("Invalid request body.")) parameter = body['configuration-parameter'] name = parameter['name'] restart_required = bool(parameter['restart_required']) data_type, min_size, max_size = self._validate_data_type(parameter) datastore_version = ds_models.DatastoreVersion.load_by_uuid(version_id) rule = config_models.DatastoreConfigurationParameters.create( name=name, datastore_version_id=datastore_version.id, restart_required=restart_required, data_type=data_type, max_size=max_size, min_size=min_size ) return wsgi.Result( views.MgmtConfigurationParameterView(rule).data(), 200) @admin_context def update(self, req, body, tenant_id, version_id, id): """Updating configuration parameter for datastore version.""" LOG.info(_("Updating configuration parameter for datastore")) LOG.debug("req : '%s'\n\n" % req) LOG.debug("body : '%s'\n\n" % body) if not body: raise exception.BadRequest(_("Invalid request body.")) parameter = body['configuration-parameter'] restart_required = bool(parameter['restart_required']) data_type, min_size, max_size = self._validate_data_type(parameter) ds_models.DatastoreVersion.load_by_uuid(version_id) ds_config_params = config_models.DatastoreConfigurationParameters param = ds_config_params.load_parameter_by_name( version_id, id) param.restart_required = restart_required param.data_type = data_type param.max_size = max_size param.min_size = min_size param.save() return wsgi.Result( views.MgmtConfigurationParameterView(param).data(), 200) @admin_context def delete(self, req, tenant_id, version_id, id): """Delete configuration parameter for datastore version.""" LOG.info(_("Deleting configuration parameter for datastore")) LOG.debug("req : '%s'\n\n" % req) ds_config_params = config_models.DatastoreConfigurationParameters try: ds_config_params.delete(version_id, id) except exception.NotFound: raise exception.BadRequest(_("Parameter %s does not exist in the " "database.") % id) return wsgi.Result(None, 204) trove-5.0.0/trove/extensions/mgmt/configuration/views.py0000664000567000056710000000351412701410316024672 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging LOG = logging.getLogger(__name__) class MgmtConfigurationParameterView(object): def __init__(self, config): self.config = config def data(self): # v1 api is to be a 'true' or 'false' json boolean instead of 1/0 restart_required = True if self.config.restart_required else False ret = { "name": self.config.name, "datastore_version_id": self.config.datastore_version_id, "restart_required": restart_required, "type": self.config.data_type, "deleted": self.config.deleted, "deleted_at": self.config.deleted_at, } if self.config.max_size: ret["max_size"] = int(self.config.max_size) if self.config.min_size: ret["min_size"] = int(self.config.min_size) return ret class MgmtConfigurationParametersView(object): def __init__(self, configs): self.configs = configs def data(self): params = [] LOG.debug(self.configs.__dict__) for p in self.configs: param = MgmtConfigurationParameterView(p) params.append(param.data()) return {"configuration-parameters": params} trove-5.0.0/trove/extensions/mgmt/upgrade/0000775000567000056710000000000012701410521021736 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/upgrade/__init__.py0000664000567000056710000000000012701410316024037 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/upgrade/service.py0000664000567000056710000000327012701410316023754 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.upgrade.models import UpgradeMessageSender LOG = logging.getLogger(__name__) class UpgradeController(wsgi.Controller): """ Controller for guest agent upgrade """ schemas = apischema.upgrade @admin_context def create(self, req, body, tenant_id, instance_id): LOG.info(_("Sending upgrade notifications\nreq : '%(req)s'\n" "Admin tenant_id: %(tenant_id)s") % {"tenant_id": tenant_id, "req": req}) context = req.environ.get(wsgi.CONTEXT_KEY) upgrade = body['upgrade'] instance_version = upgrade.get('instance_version') location = upgrade.get('location') metadata = upgrade.get('metadata') send = UpgradeMessageSender.create( context, instance_id, instance_version, location, metadata) send() return wsgi.Result(None, 202) trove-5.0.0/trove/extensions/mgmt/upgrade/models.py0000664000567000056710000000316512701410316023602 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.remote import guest_client class UpgradeMessageSender(object): """ This class handles the business logic for sending an rpc message to the guest """ @staticmethod def create(context, instance_id, instance_version, location, metadata=None): instance_id = UpgradeMessageSender._validate(instance_id, 36) if instance_version: instance_version = UpgradeMessageSender._validate( instance_version, 255) if location: location = UpgradeMessageSender._validate(location, 255) def _create_resources(): guest_client(context, instance_id).upgrade( instance_version, location, metadata) return _create_resources @staticmethod def _validate(s, max_length): if s is None: raise ValueError() s = s.strip() length = len(s) if length < 1 or length > max_length: raise ValueError() return s trove-5.0.0/trove/extensions/mgmt/volume/0000775000567000056710000000000012701410521021616 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/volume/__init__.py0000664000567000056710000000000012701410316023717 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/volume/service.py0000664000567000056710000000256512701410316023642 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.auth import admin_context from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.volume import models from trove.extensions.mgmt.volume import views LOG = logging.getLogger(__name__) class StorageController(wsgi.Controller): """Controller for storage device functionality.""" @admin_context def index(self, req, tenant_id): """Return all storage devices.""" LOG.info(_("req : '%s'\n\n") % req) LOG.info(_("Indexing storage info for tenant '%s'") % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] storages = models.StorageDevices.load(context) return wsgi.Result(views.StoragesView(storages).data(), 200) trove-5.0.0/trove/extensions/mgmt/volume/views.py0000664000567000056710000000263712701410316023337 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class StorageView(object): def __init__(self, storage): self.storage = storage def data(self): return {'name': self.storage.name, 'type': self.storage.type, 'capacity': {'total': self.storage.total_space, 'available': self.storage.total_avail}, 'provision': {'total': self.storage.prov_total, 'available': self.storage.prov_avail, 'percent': self.storage.prov_percent}, 'used': self.storage.used} class StoragesView(object): def __init__(self, storages): self.storages = storages def data(self): data = [StorageView(storage).data() for storage in self.storages] return {'devices': data} trove-5.0.0/trove/extensions/mgmt/volume/models.py0000664000567000056710000000321012701410316023451 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Model classes that extend the instances functionality for volumes. """ from oslo_log import log as logging from trove.common.remote import create_cinder_client LOG = logging.getLogger(__name__) class StorageDevice(object): def __init__(self, storage_info): self.name = storage_info.name self.type = storage_info.type self.total_space = storage_info.capacity['total'] self.total_avail = storage_info.capacity['available'] self.prov_total = storage_info.provision['total'] self.prov_avail = storage_info.provision['available'] self.prov_percent = storage_info.provision['percent'] self.used = storage_info.used class StorageDevices(object): @staticmethod def load(context): client = create_cinder_client(context) rdstorages = client.rdstorage.list() for rdstorage in rdstorages: LOG.debug("rdstorage=" + str(rdstorage)) return [StorageDevice(storage_info) for storage_info in rdstorages] trove-5.0.0/trove/extensions/mgmt/__init__.py0000664000567000056710000000000012701410316022410 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/clusters/0000775000567000056710000000000012701410521022153 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/clusters/__init__.py0000664000567000056710000000000012701410316024254 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/clusters/service.py0000664000567000056710000000646312701410316024200 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.cluster.service import ClusterController import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.clusters import models from trove.extensions.mgmt.clusters import views LOG = logging.getLogger(__name__) class MgmtClusterController(ClusterController): """Controller for cluster functionality.""" schemas = apischema.mgmt_cluster @classmethod def get_action_schema(cls, body, action_schema): action_type = list(body.keys())[0] return action_schema.get(action_type, {}) @admin_context def index(self, req, tenant_id): """Return a list of clusters.""" LOG.debug("Showing a list of clusters for tenant '%s'." % tenant_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] deleted = None deleted_q = req.GET.get('deleted', '').lower() if deleted_q in ['true']: deleted = True elif deleted_q in ['false']: deleted = False clusters = models.MgmtCluster.load_all(context, deleted=deleted) view_cls = views.MgmtClustersView return wsgi.Result(view_cls(clusters, req=req).data(), 200) @admin_context def show(self, req, tenant_id, id): """Return a single cluster.""" LOG.info(_("Showing cluster for tenant '%(tenant_id)s'.\n" "req : '%(req)s'\n" "id : '%(id)s'") % { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.MgmtCluster.load(context, id) return wsgi.Result( views.load_mgmt_view(cluster, req=req).data(), 200) @admin_context def action(self, req, body, tenant_id, id): LOG.debug("Committing an action against cluster %(cluster)s for " "tenant '%(tenant)s'." % {'cluster': id, 'tenant': tenant_id}) LOG.info(_("req : '%s'\n\n") % req) if not body: raise exception.BadRequest(_("Invalid request body.")) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.MgmtCluster.load(context=context, id=id) if 'reset-task' in body: return self._action_reset_task(context, cluster, body) else: msg = _("Invalid cluster action requested.") raise exception.BadRequest(msg) def _action_reset_task(self, context, cluster, body): cluster.reset_task() return wsgi.Result(None, 202) trove-5.0.0/trove/extensions/mgmt/clusters/views.py0000664000567000056710000000375112701410316023672 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.cluster.views import ClusterView from trove.common.strategies.cluster import strategy class MgmtClusterView(ClusterView): def __init__(self, cluster, req=None, load_servers=True): super(MgmtClusterView, self).__init__(cluster, req, load_servers) def data(self): result = super(MgmtClusterView, self).data() result['cluster']['tenant_id'] = self.cluster.tenant_id result['cluster']['deleted'] = bool(self.cluster.deleted) if self.cluster.deleted_at: result['cluster']['deleted_at'] = self.cluster.deleted_at return result def build_instances(self): raise NotImplementedError() class MgmtClustersView(object): """Shows a list of MgmtCluster objects.""" def __init__(self, clusters, req=None): self.clusters = clusters self.req = req def data(self): data = [] for cluster in self.clusters: data.append(self.data_for_cluster(cluster)) return {'clusters': data} def data_for_cluster(self, cluster): view = load_mgmt_view(cluster, req=self.req, load_servers=False) return view.data()['cluster'] def load_mgmt_view(cluster, req, load_servers=True): manager = cluster.datastore_version.manager return strategy.load_api_strategy(manager).mgmt_cluster_view_class( cluster, req, load_servers) trove-5.0.0/trove/extensions/mgmt/clusters/models.py0000664000567000056710000000336312701410316024017 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.cluster import models as cluster_models from trove.instance import models as instance_models class MgmtCluster(cluster_models.Cluster): def __init__(self, context, db_info, datastore=None, datastore_version=None): super(MgmtCluster, self).__init__(context, db_info, datastore, datastore_version) @classmethod def load(cls, context, id): db_cluster = cluster_models.DBCluster.find_by(id=id) return cls(context, db_cluster) @classmethod def load_all(cls, context, deleted=None): args = {} if deleted is not None: args['deleted'] = deleted db_infos = cluster_models.DBCluster.find_all(**args) clusters = [cls(context, db_info) for db_info in db_infos] return clusters @property def instances(self): db_instances = instance_models.DBInstance.find_all( cluster_id=self.db_info.id, deleted=False) instances = [instance_models.load_any_instance( self.context, db_inst.id) for db_inst in db_instances] return instances trove-5.0.0/trove/extensions/mgmt/host/0000775000567000056710000000000012701410521021264 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/host/instance/0000775000567000056710000000000012701410521023070 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/host/instance/__init__.py0000664000567000056710000000000012701410316025171 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/host/instance/service.py0000664000567000056710000000441612701410316025111 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.host import models LOG = logging.getLogger(__name__) class HostInstanceController(wsgi.Controller): """Controller for all instances on specific hosts.""" def action(self, req, body, tenant_id, host_id): LOG.info(_("Committing an ACTION against host %(host_id)s for " "tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n") % {"req": req, "host_id": host_id, "tenant_id": tenant_id}) if not body: raise exception.BadRequest(_("Invalid request body.")) context = req.environ[wsgi.CONTEXT_KEY] host = models.DetailedHost.load(context, host_id) _actions = {'update': self._action_update} selected_action = None for key in body: if key in _actions: if selected_action is not None: msg = _("Only one action can be specified per request.") raise exception.BadRequest(msg) selected_action = _actions[key] else: msg = _("Invalid host action: %s") % key raise exception.BadRequest(msg) if selected_action: return selected_action(context, host, body) else: raise exception.BadRequest(_("Invalid request body.")) def _action_update(self, context, host, body): LOG.debug("Updating all instances for host: %s" % host.name) host.update_all(context) return wsgi.Result(None, 202) trove-5.0.0/trove/extensions/mgmt/host/__init__.py0000664000567000056710000000000012701410316023365 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/host/service.py0000664000567000056710000000347112701410316023305 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.auth import admin_context from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.host import models from trove.extensions.mgmt.host import views from trove.instance.service import InstanceController LOG = logging.getLogger(__name__) class HostController(InstanceController): """Controller for instance functionality.""" @admin_context def index(self, req, tenant_id, detailed=False): """Return all hosts.""" LOG.info(_("req : '%s'\n\n") % req) LOG.info(_("Indexing a host for tenant '%s'") % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] hosts = models.SimpleHost.load_all(context) return wsgi.Result(views.HostsView(hosts).data(), 200) @admin_context def show(self, req, tenant_id, id): """Return a single host.""" LOG.info(_("req : '%s'\n\n") % req) LOG.info(_("Showing a host for tenant '%s'") % tenant_id) LOG.info(_("id : '%s'\n\n") % id) context = req.environ[wsgi.CONTEXT_KEY] host = models.DetailedHost.load(context, id) return wsgi.Result(views.HostDetailedView(host).data(), 200) trove-5.0.0/trove/extensions/mgmt/host/views.py0000664000567000056710000000261712701410316023003 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class HostView(object): def __init__(self, host): self.host = host def data(self): return { 'instanceCount': self.host.instance_count, 'name': self.host.name } class HostDetailedView(object): def __init__(self, host): self.host = host def data(self): return {'host': { 'instances': self.host.instances, 'name': self.host.name, 'percentUsed': self.host.percent_used, 'totalRAM': self.host.total_ram, 'usedRAM': self.host.used_ram }} class HostsView(object): def __init__(self, hosts): self.hosts = hosts def data(self): data = [HostView(host).data() for host in self.hosts] return {'hosts': data} trove-5.0.0/trove/extensions/mgmt/host/models.py0000664000567000056710000000735412701410316023134 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Model classes that extend the instances functionality for MySQL instances. """ from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common.remote import create_guest_client from trove.common.remote import create_nova_client from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.instance.models import SimpleInstance LOG = logging.getLogger(__name__) class SimpleHost(object): def __init__(self, name, instance_count): self.name = name self.instance_count = instance_count @staticmethod def load_all(context): client = create_nova_client(context) LOG.debug("Client.rdhosts=" + str(client.rdhosts)) rdhosts = client.rdhosts.list() LOG.debug("RDHOSTS=" + str(rdhosts)) for rdhost in rdhosts: LOG.debug("rdhost=" + str(rdhost)) return [SimpleHost(rdhost.name, rdhost.instanceCount) for rdhost in rdhosts] class DetailedHost(object): def __init__(self, host_info): self.name = host_info.name self.percent_used = host_info.percentUsed self.total_ram = host_info.totalRAM self.used_ram = host_info.usedRAM self.instances = host_info.instances for instance in self.instances: instance['server_id'] = instance['uuid'] del instance['uuid'] try: db_info = DBInstance.find_by( compute_instance_id=instance['server_id']) instance['id'] = db_info.id instance['tenant_id'] = db_info.tenant_id status = InstanceServiceStatus.find_by( instance_id=db_info.id) instance_info = SimpleInstance(None, db_info, status) instance['status'] = instance_info.status except exception.TroveError as re: LOG.error(re) LOG.error(_("Compute Instance ID found with no associated RD " "instance: %s.") % instance['server_id']) instance['id'] = None def update_all(self, context): num_i = len(self.instances) LOG.debug("Host %s has %s instances to update." % (self.name, num_i)) failed_instances = [] for instance in self.instances: client = create_guest_client(context, instance['id']) try: client.update_guest() except exception.TroveError as re: LOG.error(re) LOG.error(_("Unable to update instance: %s.") % instance['id']) failed_instances.append(instance['id']) if len(failed_instances) > 0: msg = _("Failed to update instances: %s.") % failed_instances raise exception.UpdateGuestError(msg) @staticmethod def load(context, name): client = create_nova_client(context) try: return DetailedHost(client.rdhosts.get(name)) except nova_exceptions.NotFound: raise exception.NotFound(uuid=name) trove-5.0.0/trove/extensions/mgmt/instances/0000775000567000056710000000000012701410521022276 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/instances/__init__.py0000664000567000056710000000000012701410316024377 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/instances/service.py0000664000567000056710000002075612701410316024324 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.backup.models import Backup import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.instances import models from trove.extensions.mgmt.instances import views from trove.extensions.mgmt.instances.views import DiagnosticsView from trove.extensions.mgmt.instances.views import HwInfoView from trove.extensions.mysql import models as mysql_models from trove.instance import models as instance_models from trove.instance.service import InstanceController LOG = logging.getLogger(__name__) class MgmtInstanceController(InstanceController): """Controller for instance functionality.""" schemas = apischema.mgmt_instance @classmethod def get_action_schema(cls, body, action_schema): action_type = list(body.keys())[0] return action_schema.get(action_type, {}) @admin_context def index(self, req, tenant_id, detailed=False): """Return all instances.""" LOG.info(_("Indexing a database instance for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n") % { "tenant_id": tenant_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] deleted = None deleted_q = req.GET.get('deleted', '').lower() if deleted_q in ['true']: deleted = True elif deleted_q in ['false']: deleted = False clustered_q = req.GET.get('include_clustered', '').lower() include_clustered = clustered_q == 'true' try: instances = models.load_mgmt_instances( context, deleted=deleted, include_clustered=include_clustered) except nova_exceptions.ClientException as e: LOG.error(e) return wsgi.Result(str(e), 403) view_cls = views.MgmtInstancesView return wsgi.Result(view_cls(instances, req=req).data(), 200) @admin_context def show(self, req, tenant_id, id): """Return a single instance.""" LOG.info(_("Showing a database instance %(id)s for tenant " "'%(tenant_id)s'\n" "req : '%(req)s'\n\n") % { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] deleted_q = req.GET.get('deleted', '').lower() include_deleted = deleted_q == 'true' server = models.DetailedMgmtInstance.load(context, id, include_deleted) root_history = mysql_models.RootHistory.load(context=context, instance_id=id) return wsgi.Result( views.MgmtInstanceDetailView( server, req=req, root_history=root_history).data(), 200) @admin_context def action(self, req, body, tenant_id, id): LOG.info(_("Committing an ACTION against a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n") % { "tenant_id": tenant_id, "req": req, "id": id}) if not body: raise exception.BadRequest(_("Invalid request body.")) context = req.environ[wsgi.CONTEXT_KEY] instance = models.MgmtInstance.load(context=context, id=id) _actions = { 'stop': self._action_stop, 'reboot': self._action_reboot, 'migrate': self._action_migrate, 'reset-task-status': self._action_reset_task_status } selected_action = None for key in body: if key in _actions: if selected_action is not None: msg = _("Only one action can be specified per request.") raise exception.BadRequest(msg) selected_action = _actions[key] else: msg = _("Invalid instance action: %s") % key raise exception.BadRequest(msg) if selected_action: return selected_action(context, instance, body) else: raise exception.BadRequest(_("Invalid request body.")) def _action_stop(self, context, instance, body): LOG.debug("Stopping MySQL on instance %s." % instance.id) instance.stop_db() return wsgi.Result(None, 202) def _action_reboot(self, context, instance, body): LOG.debug("Rebooting instance %s." % instance.id) instance.reboot() return wsgi.Result(None, 202) def _action_migrate(self, context, instance, body): LOG.debug("Migrating instance %s." % instance.id) LOG.debug("body['migrate']= %s" % body['migrate']) host = body['migrate'].get('host', None) instance.migrate(host) return wsgi.Result(None, 202) def _action_reset_task_status(self, context, instance, body): LOG.debug("Setting Task-Status to NONE on instance %s." % instance.id) instance.reset_task_status() LOG.debug("Failing backups for instance %s." % instance.id) Backup.fail_for_instance(instance.id) return wsgi.Result(None, 202) @admin_context def root(self, req, tenant_id, id): """Return the date and time root was enabled on an instance, if ever. """ LOG.info(_("Showing root history for a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n") % { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] try: instance_models.Instance.load(context=context, id=id) except exception.TroveError as e: LOG.error(e) return wsgi.Result(str(e), 404) rhv = views.RootHistoryView(id) reh = mysql_models.RootHistory.load(context=context, instance_id=id) if reh: rhv = views.RootHistoryView(reh.id, enabled=reh.created, user_id=reh.user) return wsgi.Result(rhv.data(), 200) @admin_context def hwinfo(self, req, tenant_id, id): """Return a single instance hardware info.""" LOG.info(_("Showing hardware info for a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n") % { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] instance = models.MgmtInstance.load(context=context, id=id) hwinfo = instance.get_hwinfo() return wsgi.Result(HwInfoView(id, hwinfo).data(), 200) @admin_context def diagnostics(self, req, tenant_id, id): """Return instance diagnostics for a single instance.""" LOG.info(_("Showing diagnostic info for a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n") % { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] instance = models.MgmtInstance.load(context=context, id=id) diagnostics = instance.get_diagnostics() return wsgi.Result(DiagnosticsView(id, diagnostics).data(), 200) @admin_context def rpc_ping(self, req, tenant_id, id): """Checks if instance is reachable via rpc.""" LOG.info(_("Sending RPC PING for a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n") % { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] instance = models.MgmtInstance.load(context=context, id=id) instance.rpc_ping() return wsgi.Result(None, 204) trove-5.0.0/trove/extensions/mgmt/instances/views.py0000664000567000056710000001361512701410316024015 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.instance.views import InstanceDetailView class MgmtInstanceView(InstanceDetailView): def __init__(self, instance, req=None): super(MgmtInstanceView, self).__init__(instance, req) def data(self): result = super(MgmtInstanceView, self).data() if self.instance.server is None: result['instance']['server'] = None else: server = self.instance.server result['instance']['server'] = { 'id': server.id, 'name': server.name, 'status': server.status, 'tenant_id': server.tenant_id, } if hasattr(server, 'host'): result['instance']['server']['host'] = server.host else: result['instance']['server']['host'] = server.hostId if hasattr(server, 'deleted'): result['instance']['server']['deleted'] = server.deleted if hasattr(server, 'deleted_at'): result['instance']['server']['deleted_at'] = server.deleted_at if hasattr(server, 'local_id'): result['instance']['server']['local_id'] = server.local_id try: service_status = self.instance.datastore_status.status.api_status except AttributeError: service_status = None result['instance']['service_status'] = service_status result['instance']['tenant_id'] = self.instance.tenant_id result['instance']['deleted'] = bool(self.instance.deleted) result['instance']['deleted_at'] = self.instance.deleted_at result['instance']['task_description'] = self.instance.task_description return result class MgmtInstanceDetailView(MgmtInstanceView): """Works with a full-blown instance.""" def __init__(self, instance, req, root_history=None): super(MgmtInstanceDetailView, self).__init__(instance, req=req) self.root_history = root_history def data(self): result = super(MgmtInstanceDetailView, self).data() if self.instance.server is not None: server = self.instance.server result['instance']['server'].update( {'addresses': server.addresses}) elif self.instance.server_id: result['instance']['server'] = {"id": self.instance.server_id} if self.root_history: result['instance']['root_enabled'] = self.root_history.created result['instance']['root_enabled_by'] = self.root_history.user if self.instance.volume: volume = self.instance.volume result['instance']['volume'] = { "attachments": volume.attachments, "availability_zone": volume.availability_zone, "created_at": volume.created_at, "id": volume.id, "size": volume.size, "status": volume.status, "used": self.instance.volume_used or None, "total": self.instance.volume_total or None, } elif self.instance.volume_id: result['instance']['volume'] = {"id": self.instance.volume_id} else: result['instance']['volume'] = None description = self.instance.datastore_status.status.description result['instance']['guest_status'] = {"state_description": description} return result class MgmtInstancesView(object): """Shows a list of MgmtInstance objects.""" def __init__(self, instances, req=None): self.instances = instances self.req = req def data(self): data = [] # These are model instances for instance in self.instances: data.append(self.data_for_instance(instance)) return {'instances': data} def data_for_instance(self, instance): view = MgmtInstanceView(instance, req=self.req) return view.data()['instance'] class RootHistoryView(object): def __init__(self, instance_id, enabled='Never', user_id='Nobody'): self.instance_id = instance_id self.enabled = enabled self.user = user_id def data(self): return { 'root_history': { 'id': self.instance_id, 'enabled': self.enabled, 'user': self.user, } } class HwInfoView(object): def __init__(self, instance_id, hwinfo): self.instance_id = instance_id self.hwinfo = hwinfo def data(self): return { 'hwinfo': { 'mem_total': self.hwinfo['mem_total'], 'num_cpus': self.hwinfo['num_cpus'], } } class DiagnosticsView(object): def __init__(self, instance_id, diagnostics): self.instance_id = instance_id self.diagnostics = diagnostics def data(self): return { 'diagnostics': { 'version': self.diagnostics['version'], 'threads': self.diagnostics['threads'], 'fdSize': self.diagnostics['fd_size'], 'vmSize': self.diagnostics['vm_size'], 'vmPeak': self.diagnostics['vm_peak'], 'vmRss': self.diagnostics['vm_rss'], 'vmHwm': self.diagnostics['vm_hwm'], } } trove-5.0.0/trove/extensions/mgmt/instances/models.py0000664000567000056710000002543012701410316024141 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import remote from trove.common import utils from trove.extensions.mysql import models as mysql_models from trove.instance import models as imodels from trove.instance import models as instance_models from trove.instance.models import load_instance, InstanceServiceStatus from trove import rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF def load_mgmt_instances(context, deleted=None, client=None, include_clustered=None): if not client: client = remote.create_nova_client(context) try: mgmt_servers = client.rdservers.list() except AttributeError: mgmt_servers = client.servers.list(search_opts={'all_tenants': 1}) LOG.info(_("Found %d servers in Nova") % len(mgmt_servers if mgmt_servers else [])) args = {} if deleted is not None: args['deleted'] = deleted if not include_clustered: args['cluster_id'] = None db_infos = instance_models.DBInstance.find_all(**args) instances = MgmtInstances.load_status_from_existing(context, db_infos, mgmt_servers) return instances def load_mgmt_instance(cls, context, id, include_deleted): try: instance = load_instance(cls, context, id, needs_server=True, include_deleted=include_deleted) client = remote.create_nova_client(context) try: server = client.rdservers.get(instance.server_id) except AttributeError: server = client.servers.get(instance.server_id) if hasattr(server, 'host'): instance.server.host = server.host elif hasattr(server, 'hostId'): instance.server.host = server.hostId if hasattr(server, 'deleted'): instance.server.deleted = server.deleted if hasattr(server, 'deleted_at'): instance.server.deleted_at = server.deleted_at if hasattr(server, 'local_id'): instance.server.local_id = server.local_id assert instance.server is not None except Exception as e: LOG.error(e) instance = load_instance(cls, context, id, needs_server=False, include_deleted=include_deleted) return instance class SimpleMgmtInstance(imodels.BaseInstance): def __init__(self, context, db_info, server, datastore_status): super(SimpleMgmtInstance, self).__init__(context, db_info, server, datastore_status) @property def status(self): if self.deleted: return imodels.InstanceStatus.SHUTDOWN return super(SimpleMgmtInstance, self).status @property def deleted(self): return self.db_info.deleted @property def deleted_at(self): return self.db_info.deleted_at @classmethod def load(cls, context, id, include_deleted=False): return load_mgmt_instance(cls, context, id, include_deleted) @property def task_description(self): return self.db_info.task_description class DetailedMgmtInstance(SimpleMgmtInstance): def __init__(self, *args, **kwargs): super(DetailedMgmtInstance, self).__init__(*args, **kwargs) self.volume = None self.volume_used = None self.volume_total = None self.root_history = None @classmethod def load(cls, context, id, include_deleted=False): instance = load_mgmt_instance(cls, context, id, include_deleted) client = remote.create_cinder_client(context) try: instance.volume = client.volumes.get(instance.volume_id) except Exception: instance.volume = None # Populate the volume_used attribute from the guest agent. instance_models.load_guest_info(instance, context, id) instance.root_history = mysql_models.RootHistory.load(context=context, instance_id=id) return instance class MgmtInstance(imodels.Instance): def get_diagnostics(self): return self.get_guest().get_diagnostics() def stop_db(self): return self.get_guest().stop_db() def get_hwinfo(self): return self.get_guest().get_hwinfo() def rpc_ping(self): return self.get_guest().rpc_ping() class MgmtInstances(imodels.Instances): @staticmethod def load_status_from_existing(context, db_infos, servers): def load_instance(context, db, status, server=None): return SimpleMgmtInstance(context, db, server, status) if context is None: raise TypeError("Argument context not defined.") find_server = imodels.create_server_list_matcher(servers) instances = imodels.Instances._load_servers_status(load_instance, context, db_infos, find_server) _load_servers(instances, find_server) return instances def _load_servers(instances, find_server): for instance in instances: db = instance.db_info instance.server = None try: server = find_server(db.id, db.compute_instance_id) instance.server = server except Exception as ex: LOG.error(ex) return instances def publish_exist_events(transformer, admin_context): notifier = rpc.get_notifier("taskmanager") notifications = transformer() # clear out admin_context.auth_token so it does not get logged admin_context.auth_token = None for notification in notifications: notifier.info(admin_context, "trove.instance.exists", notification) class NotificationTransformer(object): def __init__(self, **kwargs): pass @staticmethod def _get_audit_period(): now = datetime.datetime.now() audit_start = utils.isotime( now - datetime.timedelta( seconds=CONF.exists_notification_interval), subsecond=True) audit_end = utils.isotime(now, subsecond=True) return audit_start, audit_end def _get_service_id(self, datastore_manager, id_map): if datastore_manager in id_map: datastore_manager_id = id_map[datastore_manager] else: datastore_manager_id = cfg.UNKNOWN_SERVICE_ID LOG.error(_("Datastore ID for Manager (%s) is not configured") % datastore_manager) return datastore_manager_id def transform_instance(self, instance, audit_start, audit_end): payload = { 'audit_period_beginning': audit_start, 'audit_period_ending': audit_end, 'created_at': instance.created, 'display_name': instance.name, 'instance_id': instance.id, 'instance_name': instance.name, 'instance_type_id': instance.flavor_id, 'launched_at': instance.created, 'nova_instance_id': instance.server_id, 'region': CONF.region, 'state_description': instance.status.lower(), 'state': instance.status.lower(), 'tenant_id': instance.tenant_id } payload['service_id'] = self._get_service_id( instance.datastore_version.manager, CONF.notification_service_id) return payload def __call__(self): audit_start, audit_end = NotificationTransformer._get_audit_period() messages = [] db_infos = instance_models.DBInstance.find_all(deleted=False) for db_info in db_infos: try: service_status = InstanceServiceStatus.find_by( instance_id=db_info.id) except exception.ModelNotFoundError: # There is a small window of opportunity during when the db # resource for an instance exists, but no InstanceServiceStatus # for it has yet been created. We skip sending the notification # message for all such instances. These instance are too new # and will get picked up the next round of notifications. LOG.debug("InstanceServiceStatus not found for %s. " "Will wait to send notification." % db_info.id) continue instance = SimpleMgmtInstance(None, db_info, None, service_status) message = self.transform_instance(instance, audit_start, audit_end) messages.append(message) return messages class NovaNotificationTransformer(NotificationTransformer): def __init__(self, **kwargs): super(NovaNotificationTransformer, self).__init__(**kwargs) self.context = kwargs['context'] self.nova_client = remote.create_admin_nova_client(self.context) self._flavor_cache = {} def _lookup_flavor(self, flavor_id): if flavor_id in self._flavor_cache: LOG.debug("Flavor cache hit for %s" % flavor_id) return self._flavor_cache[flavor_id] # fetch flavor resource from nova LOG.info(_("Flavor cache miss for %s") % flavor_id) flavor = self.nova_client.flavors.get(flavor_id) self._flavor_cache[flavor_id] = flavor.name if flavor else 'unknown' return self._flavor_cache[flavor_id] def __call__(self): audit_start, audit_end = NotificationTransformer._get_audit_period() instances = load_mgmt_instances(self.context, deleted=False, client=self.nova_client) messages = [] for instance in filter( lambda inst: inst.status != 'SHUTDOWN' and inst.server, instances): message = { 'instance_type': self._lookup_flavor(instance.flavor_id), 'user_id': instance.server.user_id } message.update(self.transform_instance(instance, audit_start, audit_end)) messages.append(message) return messages trove-5.0.0/trove/extensions/mgmt/datastores/0000775000567000056710000000000012701410521022460 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/datastores/__init__.py0000664000567000056710000000000012701410316024561 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mgmt/datastores/service.py0000664000567000056710000001442112701410316024476 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.common import apischema as apischema from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import remote from trove.common import utils from trove.common import wsgi from trove.datastore import models from trove.extensions.mgmt.datastores import views LOG = logging.getLogger(__name__) class DatastoreVersionController(wsgi.Controller): """Controller for datastore version registration functionality.""" schemas = apischema.mgmt_datastore_version @admin_context def create(self, req, body, tenant_id): """Adds a new datastore version.""" context = req.environ[wsgi.CONTEXT_KEY] datastore_name = body['version']['datastore_name'] version_name = body['version']['name'] manager = body['version']['datastore_manager'] image_id = body['version']['image'] packages = body['version']['packages'] if type(packages) is list: packages = ','.join(packages) active = body['version']['active'] default = body['version']['default'] LOG.info(_("Tenant: '%(tenant)s' is adding the datastore " "version: '%(version)s' to datastore: '%(datastore)s'") % {'tenant': tenant_id, 'version': version_name, 'datastore': datastore_name}) client = remote.create_nova_client(context) try: client.images.get(image_id) except nova_exceptions.NotFound: raise exception.ImageNotFound(uuid=image_id) try: datastore = models.Datastore.load(datastore_name) except exception.DatastoreNotFound: # Create the datastore if datastore_name does not exists. LOG.info(_("Creating datastore %s") % datastore_name) datastore = models.DBDatastore() datastore.id = utils.generate_uuid() datastore.name = datastore_name datastore.save() try: models.DatastoreVersion.load(datastore, version_name) raise exception.DatastoreVersionAlreadyExists(name=version_name) except exception.DatastoreVersionNotFound: models.update_datastore_version(datastore.name, version_name, manager, image_id, packages, active) if default: models.update_datastore(datastore.name, version_name) return wsgi.Result(None, 202) @admin_context def index(self, req, tenant_id): """Lists all datastore-versions for given datastore.""" db_ds_versions = models.DatastoreVersions.load_all(only_active=False) datastore_versions = [models.DatastoreVersion.load_by_uuid( ds_version.id) for ds_version in db_ds_versions] return wsgi.Result( views.DatastoreVersionsView(datastore_versions).data(), 200) @admin_context def show(self, req, tenant_id, id): """Lists details of a datastore-version for given datastore.""" datastore_version = models.DatastoreVersion.load_by_uuid(id) return wsgi.Result( views.DatastoreVersionView(datastore_version).data(), 200) @admin_context def edit(self, req, body, tenant_id, id): """Updates the attributes of a datastore version.""" context = req.environ[wsgi.CONTEXT_KEY] datastore_version = models.DatastoreVersion.load_by_uuid(id) LOG.info(_("Tenant: '%(tenant)s' is updating the datastore " "version: '%(version)s' for datastore: '%(datastore)s'") % {'tenant': tenant_id, 'version': datastore_version.name, 'datastore': datastore_version.datastore_name}) manager = body.get('datastore_manager', datastore_version.manager) image_id = body.get('image', datastore_version.image_id) active = body.get('active', datastore_version.active) default = body.get('default', None) packages = body.get('packages', datastore_version.packages) if type(packages) is list: packages = ','.join(packages) client = remote.create_nova_client(context) try: client.images.get(image_id) except nova_exceptions.NotFound: raise exception.ImageNotFound(uuid=image_id) models.update_datastore_version(datastore_version.datastore_name, datastore_version.name, manager, image_id, packages, active) if default: models.update_datastore(datastore_version.datastore_name, datastore_version.name) elif (default is False and datastore_version.default is True): models.update_datastore(datastore_version.datastore_name, None) return wsgi.Result(None, 202) @admin_context def delete(self, req, tenant_id, id): """Remove an existing datastore version.""" datastore_version = models.DatastoreVersion.load_by_uuid(id) datastore = models.Datastore.load(datastore_version.datastore_id) LOG.info(_("Tenant: '%(tenant)s' is removing the datastore " "version: '%(version)s' for datastore: '%(datastore)s'") % {'tenant': tenant_id, 'version': datastore_version.name, 'datastore': datastore.name}) if datastore.default_version_id == datastore_version.id: models.update_datastore(datastore.name, None) datastore_version.delete() return wsgi.Result(None, 202) trove-5.0.0/trove/extensions/mgmt/datastores/views.py0000664000567000056710000000341012701410316024167 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DatastoreVersionView(object): def __init__(self, datastore_version): self.datastore_version = datastore_version def data(self): datastore_version_dict = { "id": self.datastore_version.id, "name": self.datastore_version.name, "datastore_id": self.datastore_version.datastore_id, "datastore_name": self.datastore_version.datastore_name, "datastore_manager": self.datastore_version.manager, "image": self.datastore_version.image_id, "packages": (self.datastore_version.packages.split( ',') if self.datastore_version.packages else ['']), "active": self.datastore_version.active, "default": self.datastore_version.default} return {'version': datastore_version_dict} class DatastoreVersionsView(object): def __init__(self, datastore_versions): self.datastore_versions = datastore_versions def data(self): data = [] for datastore_version in self.datastore_versions: data.append( DatastoreVersionView(datastore_version).data()['version']) return {'versions': data} trove-5.0.0/trove/extensions/cassandra/0000775000567000056710000000000012701410521021302 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/cassandra/__init__.py0000664000567000056710000000000012701410316023403 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/cassandra/service.py0000664000567000056710000000221412701410316023315 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.extensions.common.service import DefaultRootController from trove.extensions.mysql import models from trove.guestagent.db import models as guest_models class CassandraRootController(DefaultRootController): def _find_root_user(self, context, instance_id): user = guest_models.CassandraRootUser() # TODO(pmalik): Using MySQL model until we have datastore specific # extensions (bug/1498573). return models.User.load( context, instance_id, user.name, user.host, root_user=True) trove-5.0.0/trove/extensions/pxc/0000775000567000056710000000000012701410521020135 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/pxc/__init__.py0000664000567000056710000000000012701410316022236 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/pxc/service.py0000664000567000056710000000223312701410316022151 0ustar jenkinsjenkins00000000000000# Copyright [2016] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.extensions.common.service import ClusterRootController LOG = logging.getLogger(__name__) CONF = cfg.CONF MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'pxc' class PxcRootController(ClusterRootController): def root_delete(self, req, tenant_id, instance_id, is_cluster): raise exception.DatastoreOperationNotSupported( operation='disable_root', datastore=MANAGER) trove-5.0.0/trove/extensions/__init__.py0000664000567000056710000000000012701410316021444 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/routes/0000775000567000056710000000000012701410521020664 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/routes/account.py0000664000567000056710000000256612701410316022705 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import extensions from trove.extensions.account import service LOG = logging.getLogger(__name__) class Account(extensions.ExtensionDescriptor): def get_name(self): return "Account" def get_description(self): return "Account information with instances" def get_alias(self): return "Account" def get_namespace(self): return "http://TBD" def get_updated(self): return "2012-06-07T13:25:27-06:00" def get_resources(self): resources = [] resource = extensions.ResourceExtension( '{tenant_id}/mgmt/accounts', service.AccountController()) resources.append(resource) return resources trove-5.0.0/trove/extensions/routes/__init__.py0000664000567000056710000000000012701410316022765 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/routes/security_group.py0000664000567000056710000000405612701410316024330 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.common import extensions from trove.extensions.security_group import service LOG = logging.getLogger(__name__) CONF = cfg.CONF # The Extensions module from openstack common expects the classname of the # extension to be loaded to be the exact same as the filename, except with # a capital first letter. That's the reason this class has such a funky name. class Security_group(extensions.ExtensionDescriptor): def get_name(self): return "SecurityGroup" def get_description(self): return "Security Group related operations such as list \ security groups and manage security group rules." def get_alias(self): return "SecurityGroup" def get_namespace(self): return "http://TBD" def get_updated(self): return "2012-02-26T17:25:27-08:00" def get_resources(self): resources = [] if CONF.trove_security_groups_support: security_groups = extensions.ResourceExtension( '{tenant_id}/security-groups', service.SecurityGroupController()) resources.append(security_groups) security_group_rules = extensions.ResourceExtension( '{tenant_id}/security-group-rules', service.SecurityGroupRuleController()) resources.append(security_group_rules) return resources trove-5.0.0/trove/extensions/routes/mysql.py0000664000567000056710000000553112701410316022411 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import extensions from trove.extensions.common import service as common_service from trove.extensions.mysql import service as mysql_service LOG = logging.getLogger(__name__) class Mysql(extensions.ExtensionDescriptor): def get_name(self): return "Mysql" def get_description(self): return "Non essential MySQL services such as users and schemas" def get_alias(self): return "MYSQL" def get_namespace(self): return "http://TBD" def get_updated(self): return "2011-01-22T13:25:27-06:00" def get_resources(self): resources = [] resource = extensions.ResourceExtension( 'databases', mysql_service.SchemaController(), parent={'member_name': 'instance', 'collection_name': '{tenant_id}/instances'}) resources.append(resource) resource = extensions.ResourceExtension( 'users', mysql_service.UserController(), parent={'member_name': 'instance', 'collection_name': '{tenant_id}/instances'}, member_actions={'update': 'PUT'}, collection_actions={'update_all': 'PUT'}) resources.append(resource) collection_url = '{tenant_id}/instances/:instance_id/users' resource = extensions.ResourceExtension( 'databases', mysql_service.UserAccessController(), parent={'member_name': 'user', 'collection_name': collection_url}, collection_actions={'update': 'PUT'}) resources.append(resource) resource = extensions.ResourceExtension( 'root', common_service.RootController(), parent={'member_name': 'instance', 'collection_name': '{tenant_id}/instances'}, collection_actions={'delete': 'DELETE'}) resources.append(resource) resource = extensions.ResourceExtension( 'root', common_service.RootController(), parent={'member_name': 'instance', 'collection_name': '{tenant_id}/clusters'}) resources.append(resource) return resources trove-5.0.0/trove/extensions/routes/mgmt.py0000664000567000056710000000771612701410316022217 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import extensions from trove.extensions.mgmt.clusters.service import MgmtClusterController from trove.extensions.mgmt.configuration import service as conf_service from trove.extensions.mgmt.datastores.service import DatastoreVersionController from trove.extensions.mgmt.host.instance import service as hostservice from trove.extensions.mgmt.host.service import HostController from trove.extensions.mgmt.instances.service import MgmtInstanceController from trove.extensions.mgmt.quota.service import QuotaController from trove.extensions.mgmt.upgrade.service import UpgradeController from trove.extensions.mgmt.volume.service import StorageController LOG = logging.getLogger(__name__) class Mgmt(extensions.ExtensionDescriptor): def get_name(self): return "Mgmt" def get_description(self): return "MGMT services such as details diagnostics" def get_alias(self): return "Mgmt" def get_namespace(self): return "http://TBD" def get_updated(self): return "2011-01-22T13:25:27-06:00" def get_resources(self): resources = [] instances = extensions.ResourceExtension( '{tenant_id}/mgmt/instances', MgmtInstanceController(), member_actions={'root': 'GET', 'diagnostics': 'GET', 'hwinfo': 'GET', 'rpc_ping': 'GET', 'action': 'POST'}) resources.append(instances) clusters = extensions.ResourceExtension( '{tenant_id}/mgmt/clusters', MgmtClusterController(), member_actions={'action': 'POST'}) resources.append(clusters) hosts = extensions.ResourceExtension( '{tenant_id}/mgmt/hosts', HostController(), member_actions={}) resources.append(hosts) quota = extensions.ResourceExtension( '{tenant_id}/mgmt/quotas', QuotaController(), member_actions={}) resources.append(quota) storage = extensions.ResourceExtension( '{tenant_id}/mgmt/storage', StorageController(), member_actions={}) resources.append(storage) host_instances = extensions.ResourceExtension( 'instances', hostservice.HostInstanceController(), parent={'member_name': 'host', 'collection_name': '{tenant_id}/mgmt/hosts'}, collection_actions={'action': 'POST'}) resources.append(host_instances) upgrade = extensions.ResourceExtension( '{tenant_id}/mgmt/instances/{instance_id}/upgrade', UpgradeController(), member_actions={}) resources.append(upgrade) datastore_configuration_parameters = extensions.ResourceExtension( '{tenant_id}/mgmt/datastores/versions/{version_id}/parameters', conf_service.ConfigurationsParameterController(), member_actions={}) resources.append(datastore_configuration_parameters) datastore_version = extensions.ResourceExtension( '{tenant_id}/mgmt/datastore-versions', DatastoreVersionController(), member_actions={}) resources.append(datastore_version) return resources trove-5.0.0/trove/extensions/vertica/0000775000567000056710000000000012701410521021000 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/vertica/__init__.py0000664000567000056710000000000012701410316023101 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/vertica/service.py0000664000567000056710000000310212701410316023010 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.extensions.common.service import ClusterRootController from trove.instance.models import DBInstance LOG = logging.getLogger(__name__) CONF = cfg.CONF MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'vertica' class VerticaRootController(ClusterRootController): def delete(self, req, tenant_id, instance_id): raise exception.DatastoreOperationNotSupported( operation='disable_root', datastore=MANAGER) def _get_cluster_instance_id(self, tenant_id, cluster_id): instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id) args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'type': 'master'} master_instance = DBInstance.find_by(**args) master_instance_id = master_instance.id return master_instance_id, instance_ids trove-5.0.0/trove/extensions/security_group/0000775000567000056710000000000012701410521022426 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/security_group/__init__.py0000664000567000056710000000000012701410316024527 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/security_group/service.py0000664000567000056710000001373012701410316024446 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.common import wsgi from trove.datastore.models import DatastoreVersion from trove.extensions.security_group import models from trove.extensions.security_group import views from trove.instance import models as instance_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class SecurityGroupController(wsgi.Controller): """Controller for security groups functionality.""" def index(self, req, tenant_id): """Return all security groups tied to a particular tenant_id.""" LOG.debug("Index() called with %s" % (tenant_id)) sec_groups = models.SecurityGroup().find_all(tenant_id=tenant_id, deleted=False) # Construct the mapping from Security Groups to Security Group Rules rules_map = {g.id: g.get_rules() for g in sec_groups} return wsgi.Result( views.SecurityGroupsView(sec_groups, rules_map, req, tenant_id).list(), 200) def show(self, req, tenant_id, id): """Return a single security group.""" LOG.debug("Show() called with %s, %s" % (tenant_id, id)) sec_group = \ models.SecurityGroup.get_security_group_by_id_or_instance_id( id, tenant_id) return wsgi.Result( views.SecurityGroupView(sec_group, sec_group.get_rules(), req, tenant_id).show(), 200) class SecurityGroupRuleController(wsgi.Controller): """Controller for security group rule functionality.""" def delete(self, req, tenant_id, id): LOG.debug("Delete Security Group Rule called %s, %s" % (tenant_id, id)) context = req.environ[wsgi.CONTEXT_KEY] sec_group_rule = models.SecurityGroupRule.find_by(id=id, deleted=False) sec_group = sec_group_rule.get_security_group(tenant_id) if sec_group is None: LOG.error(_("Attempting to delete Group Rule that does not " "exist or does not belong to tenant %s") % tenant_id) raise exception.Forbidden("Unauthorized") sec_group_rule.delete(context) sec_group.save() return wsgi.Result(None, 204) def create(self, req, body, tenant_id): LOG.debug("Creating a Security Group Rule for tenant '%s'" % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] self._validate_create_body(body) sec_group_id = body['security_group_rule']['group_id'] sec_group = models.SecurityGroup.find_by(id=sec_group_id, tenant_id=tenant_id, deleted=False) instance_id = (models.SecurityGroupInstanceAssociation. get_instance_id_by_security_group_id(sec_group_id)) db_info = instance_models.get_db_info(context, id=instance_id) manager = (DatastoreVersion.load_by_uuid( db_info.datastore_version_id).manager) tcp_ports = CONF.get(manager).tcp_ports udp_ports = CONF.get(manager).udp_ports def _create_rules(sec_group, ports, protocol): rules = [] try: for port_or_range in set(ports): from_, to_ = utils.gen_ports(port_or_range) rule = models.SecurityGroupRule.create_sec_group_rule( sec_group, protocol, int(from_), int(to_), body['security_group_rule']['cidr'], context) rules.append(rule) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) return rules tcp_rules = _create_rules(sec_group, tcp_ports, 'tcp') udp_rules = _create_rules(sec_group, udp_ports, 'udp') sec_group.save() all_rules = tcp_rules + udp_rules view = views.SecurityGroupRulesView( all_rules, req, tenant_id).create() return wsgi.Result(view, 201) def _validate_create_body(self, body): try: body['security_group_rule'] body['security_group_rule']['group_id'] body['security_group_rule']['cidr'] except KeyError as e: LOG.error(_("Create Security Group Rules Required field(s) " "- %s") % e) raise exception.SecurityGroupRuleCreationError( "Required element/key - %s was not specified" % e) schemas = { "type": "object", "name": "security_group_rule:create", "required": True, "properties": { "security_group_rule": { "type": "object", "required": True, "properties": { "cidr": { "type": "string", "required": True, "minLength": 9, "maxLength": 18 }, "group_id": { "type": "string", "required": True, "maxLength": 255 }, } } } } trove-5.0.0/trove/extensions/security_group/views.py0000664000567000056710000000742412701410316024146 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from oslo_log import log as logging LOG = logging.getLogger(__name__) def _base_url(req): return req.application_url class SecurityGroupView(object): def __init__(self, secgroup, rules, req, tenant_id): self.secgroup = secgroup self.rules = rules self.request = req self.tenant_id = tenant_id def _build_links(self): """Build the links for the secgroup.""" base_url = _base_url(self.request) href = os.path.join(base_url, self.tenant_id, "security-groups", str(self.secgroup['id'])) links = [ { 'rel': 'self', 'href': href } ] return links def _build_rules(self): rules = [] if self.rules is None: return rules for rule in self.rules: rules.append({'id': str(rule['id']), 'protocol': rule['protocol'], 'from_port': rule['from_port'], 'to_port': rule['to_port'], 'cidr': rule['cidr'], }) return rules def data(self): return {"id": self.secgroup['id'], "name": self.secgroup['name'], "description": self.secgroup['description'], "instance_id": self.secgroup['instance_id'], "rules": self._build_rules(), "links": self._build_links(), "created": self.secgroup['created'], "updated": self.secgroup['updated'] } def show(self): return {"security_group": self.data()} def create(self): return self.show() class SecurityGroupsView(object): def __init__(self, secgroups, rules_dict, req, tenant_id): self.secgroups = secgroups self.rules = rules_dict self.request = req self.tenant_id = tenant_id def list(self): groups_data = [] for secgroup in self.secgroups: rules = (self.rules[secgroup['id']] if self.rules is not None else None) groups_data.append(SecurityGroupView(secgroup, rules, self.request, self.tenant_id).data()) return {"security_groups": groups_data} class SecurityGroupRulesView(object): def __init__(self, rules, req, tenant_id): self.rules = rules self.request = req self.tenant_id = tenant_id def _build_create(self): views = [] for rule in self.rules: to_append = { "id": rule.id, "security_group_id": rule.group_id, "protocol": rule.protocol, "from_port": rule.from_port, "to_port": rule.to_port, "cidr": rule.cidr, "created": rule.created } views.append(to_append) return {"security_group_rule": views} def create(self): return self._build_create() trove-5.0.0/trove/extensions/security_group/models.py0000664000567000056710000002207412701410316024272 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Model classes for Security Groups and Security Group Rules on instances. """ from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.models import NetworkRemoteModelBase from trove.db.models import DatabaseModelBase CONF = cfg.CONF LOG = logging.getLogger(__name__) def persisted_models(): return { 'security_group': SecurityGroup, 'security_group_rule': SecurityGroupRule, 'security_group_instance_association': SecurityGroupInstanceAssociation, } class SecurityGroup(DatabaseModelBase): _data_fields = ['id', 'name', 'description', 'user', 'tenant_id', 'created', 'updated', 'deleted', 'deleted_at'] @property def instance_id(self): return SecurityGroupInstanceAssociation\ .get_instance_id_by_security_group_id(self.id) @classmethod def create_sec_group(cls, name, description, context): try: remote_sec_group = RemoteSecurityGroup.create(name, description, context) if not remote_sec_group: raise exception.SecurityGroupCreationError( _("Failed to create Security Group.")) else: return cls.create( id=remote_sec_group.data()['id'], name=name, description=description, user=context.user, tenant_id=context.tenant) except exception.SecurityGroupCreationError as e: LOG.exception(_("Failed to create remote security group.")) raise e @classmethod def create_for_instance(cls, instance_id, context): # Create a new security group name = "%s_%s" % (CONF.trove_security_group_name_prefix, instance_id) description = _("Security Group for %s") % instance_id sec_group = cls.create_sec_group(name, description, context) # Currently this locked down by default, since we don't create any # default security group rules for the security group. # Create security group instance association SecurityGroupInstanceAssociation.create( security_group_id=sec_group["id"], instance_id=instance_id) return sec_group @classmethod def get_security_group_by_id_or_instance_id(self, id, tenant_id): try: return SecurityGroup.find_by(id=id, tenant_id=tenant_id, deleted=False) except exception.ModelNotFoundError: return SecurityGroupInstanceAssociation.\ get_security_group_by_instance_id(id) def get_rules(self): return SecurityGroupRule.find_all(group_id=self.id, deleted=False) def delete(self, context): try: sec_group_rules = self.get_rules() if sec_group_rules: for rule in sec_group_rules: rule.delete(context) RemoteSecurityGroup.delete(self.id, context) super(SecurityGroup, self).delete() except exception.TroveError: LOG.exception(_('Failed to delete security group.')) raise exception.TroveError("Failed to delete Security Group") @classmethod def delete_for_instance(cls, instance_id, context): try: association = SecurityGroupInstanceAssociation.find_by( instance_id=instance_id, deleted=False) if association: sec_group = association.get_security_group() if sec_group: sec_group.delete(context) association.delete() except (exception.ModelNotFoundError, exception.TroveError): LOG.info(_('Security Group with id: %(id)s ' 'already had been deleted') % {'id': instance_id}) class SecurityGroupRule(DatabaseModelBase): _data_fields = ['id', 'parent_group_id', 'protocol', 'from_port', 'to_port', 'cidr', 'group_id', 'created', 'updated', 'deleted', 'deleted_at'] @classmethod def create_sec_group_rule(cls, sec_group, protocol, from_port, to_port, cidr, context): try: remote_rule_id = RemoteSecurityGroup.add_rule( sec_group_id=sec_group['id'], protocol=protocol, from_port=from_port, to_port=to_port, cidr=cidr, context=context) if not remote_rule_id: raise exception.SecurityGroupRuleCreationError( "Failed to create Security Group Rule") else: # Create db record return cls.create( id=remote_rule_id, protocol=protocol, from_port=from_port, to_port=to_port, cidr=cidr, group_id=sec_group['id']) except exception.SecurityGroupRuleCreationError as e: LOG.exception(_("Failed to create remote security group.")) raise e def get_security_group(self, tenant_id): return SecurityGroup.find_by(id=self.group_id, tenant_id=tenant_id, deleted=False) def delete(self, context): try: # Delete Remote Security Group Rule RemoteSecurityGroup.delete_rule(self.id, context) super(SecurityGroupRule, self).delete() except exception.TroveError: LOG.exception(_('Failed to delete security group.')) raise exception.SecurityGroupRuleDeletionError( "Failed to delete Security Group") class SecurityGroupInstanceAssociation(DatabaseModelBase): _data_fields = ['id', 'security_group_id', 'instance_id', 'created', 'updated', 'deleted', 'deleted_at'] def get_security_group(self): return SecurityGroup.find_by(id=self.security_group_id, deleted=False) @classmethod def get_security_group_by_instance_id(cls, id): association = SecurityGroupInstanceAssociation.find_by( instance_id=id, deleted=False) return association.get_security_group() @classmethod def get_instance_id_by_security_group_id(cls, secgroup_id): association = SecurityGroupInstanceAssociation.find_by( security_group_id=secgroup_id, deleted=False) return association.instance_id class RemoteSecurityGroup(NetworkRemoteModelBase): _data_fields = ['id', 'name', 'description', 'rules'] def __init__(self, security_group=None, id=None, context=None): if id is None and security_group is None: msg = _("Security Group does not have id defined!") raise exception.InvalidModelError(msg) elif security_group is None: driver = self.get_driver(context) self._data_object = driver.get_sec_group_by_id(group_id=id) else: self._data_object = security_group @classmethod def create(cls, name, description, context): """Creates a new Security Group.""" driver = cls.get_driver(context) sec_group = driver.create_security_group( name=name, description=description) return RemoteSecurityGroup(security_group=sec_group) @classmethod def delete(cls, sec_group_id, context): """Deletes a Security Group.""" driver = cls.get_driver(context) driver.delete_security_group(sec_group_id) @classmethod def add_rule(cls, sec_group_id, protocol, from_port, to_port, cidr, context): """Adds a new rule to an existing security group.""" driver = cls.get_driver(context) sec_group_rule = driver.add_security_group_rule( sec_group_id, protocol, from_port, to_port, cidr) return sec_group_rule.id @classmethod def delete_rule(cls, sec_group_rule_id, context): """Deletes a rule from an existing security group.""" driver = cls.get_driver(context) driver.delete_security_group_rule(sec_group_rule_id) trove-5.0.0/trove/extensions/common/0000775000567000056710000000000012701410521020633 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/common/__init__.py0000664000567000056710000000000012701410316022734 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/common/service.py0000664000567000056710000002245612701410320022653 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from oslo_utils import importutils import six from trove.cluster.models import DBCluster from trove.common import cfg from trove.common import exception from trove.common.i18n import _LI from trove.common import wsgi from trove.datastore import models as datastore_models from trove.extensions.common import models from trove.extensions.common import views from trove.instance.models import DBInstance LOG = logging.getLogger(__name__) import_class = importutils.import_class CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class BaseDatastoreRootController(wsgi.Controller): """Base class that defines the contract for root controllers.""" @abc.abstractmethod def root_index(self, req, tenant_id, instance_id, is_cluster): pass @abc.abstractmethod def root_create(self, req, body, tenant_id, instance_id, is_cluster): pass @abc.abstractmethod def root_delete(self, req, tenant_id, instance_id, is_cluster): pass @staticmethod def _get_password_from_body(body=None): if body: return body['password'] if 'password' in body else None return None class DefaultRootController(BaseDatastoreRootController): def root_index(self, req, tenant_id, instance_id, is_cluster): """Returns True if root is enabled; False otherwise.""" if is_cluster: raise exception.ClusterOperationNotSupported( operation='show_root') LOG.info(_LI("Getting root enabled for instance '%s'.") % instance_id) LOG.info(_LI("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] is_root_enabled = models.Root.load(context, instance_id) return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200) def root_create(self, req, body, tenant_id, instance_id, is_cluster): if is_cluster: raise exception.ClusterOperationNotSupported( operation='enable_root') LOG.info(_LI("Enabling root for instance '%s'.") % instance_id) LOG.info(_LI("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] user_name = context.user password = DefaultRootController._get_password_from_body(body) root = models.Root.create(context, instance_id, user_name, password) return wsgi.Result(views.RootCreatedView(root).data(), 200) def root_delete(self, req, tenant_id, instance_id, is_cluster): if is_cluster: raise exception.ClusterOperationNotSupported( operation='disable_root') LOG.info(_LI("Disabling root for instance '%s'.") % instance_id) LOG.info(_LI("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] try: found_user = self._find_root_user(context, instance_id) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not found_user: raise exception.UserNotFound(uuid="root") models.Root.delete(context, instance_id) return wsgi.Result(None, 200) class ClusterRootController(DefaultRootController): def root_index(self, req, tenant_id, instance_id, is_cluster): """Returns True if root is enabled; False otherwise.""" if is_cluster: return self.cluster_root_index(req, tenant_id, instance_id) else: return self.instance_root_index(req, tenant_id, instance_id) def instance_root_index(self, req, tenant_id, instance_id): LOG.info(_LI("Getting root enabled for instance '%s'.") % instance_id) LOG.info(_LI("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] try: is_root_enabled = models.ClusterRoot.load(context, instance_id) except exception.UnprocessableEntity: raise exception.UnprocessableEntity( "Cluster %s is not ready." % instance_id) return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200) def cluster_root_index(self, req, tenant_id, cluster_id): LOG.info(_LI("Getting root enabled for cluster '%s'.") % cluster_id) single_instance_id, cluster_instances = self._get_cluster_instance_id( tenant_id, cluster_id) return self.instance_root_index(req, tenant_id, single_instance_id) def root_create(self, req, body, tenant_id, instance_id, is_cluster): if is_cluster: return self.cluster_root_create(req, body, tenant_id, instance_id) else: return self.instance_root_create(req, body, instance_id) def instance_root_create(self, req, body, instance_id, cluster_instances=None): LOG.info(_LI("Enabling root for instance '%s'.") % instance_id) LOG.info(_LI("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] user_name = context.user password = ClusterRootController._get_password_from_body(body) root = models.ClusterRoot.create(context, instance_id, user_name, password, cluster_instances) return wsgi.Result(views.RootCreatedView(root).data(), 200) def cluster_root_create(self, req, body, tenant_id, cluster_id): LOG.info(_LI("Enabling root for cluster '%s'.") % cluster_id) single_instance_id, cluster_instances = self._get_cluster_instance_id( tenant_id, cluster_id) return self.instance_root_create(req, body, single_instance_id, cluster_instances) def _find_cluster_node_ids(self, tenant_id, cluster_id): args = {'tenant_id': tenant_id, 'cluster_id': cluster_id} cluster_instances = DBInstance.find_all(**args).all() return [db_instance.id for db_instance in cluster_instances] def _get_cluster_instance_id(self, tenant_id, cluster_id): instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id) single_instance_id = instance_ids[0] return single_instance_id, instance_ids class RootController(wsgi.Controller): """Controller for instance functionality.""" def index(self, req, tenant_id, instance_id): """Returns True if root is enabled; False otherwise.""" datastore_manager, is_cluster = self._get_datastore(tenant_id, instance_id) root_controller = self.load_root_controller(datastore_manager) return root_controller.root_index(req, tenant_id, instance_id, is_cluster) def create(self, req, tenant_id, instance_id, body=None): """Enable the root user for the db instance.""" datastore_manager, is_cluster = self._get_datastore(tenant_id, instance_id) root_controller = self.load_root_controller(datastore_manager) if root_controller is not None: return root_controller.root_create(req, body, tenant_id, instance_id, is_cluster) else: raise NoSuchOptError('root_controller', group='datastore_manager') def delete(self, req, tenant_id, instance_id): datastore_manager, is_cluster = self._get_datastore(tenant_id, instance_id) root_controller = self.load_root_controller(datastore_manager) if root_controller is not None: return root_controller.root_delete(req, tenant_id, instance_id, is_cluster) else: raise NoSuchOptError def _get_datastore(self, tenant_id, instance_or_cluster_id): """ Returns datastore manager and a boolean showing if instance_or_cluster_id is a cluster id """ args = {'id': instance_or_cluster_id, 'tenant_id': tenant_id} is_cluster = False try: db_info = DBInstance.find_by(**args) except exception.ModelNotFoundError: is_cluster = True db_info = DBCluster.find_by(**args) ds_version = (datastore_models.DatastoreVersion. load_by_uuid(db_info.datastore_version_id)) ds_manager = ds_version.manager return (ds_manager, is_cluster) def load_root_controller(self, manager): try: clazz = CONF.get(manager).get('root_controller') LOG.debug("Loading Root Controller class %s." % clazz) root_controller = import_class(clazz) return root_controller() except NoSuchOptError: return None trove-5.0.0/trove/extensions/common/views.py0000664000567000056710000000247712701410316022356 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class UserView(object): def __init__(self, user): self.user = user def data(self): user_dict = { "name": self.user.name, "host": self.user.host, "databases": self.user.databases } return {"user": user_dict} class RootCreatedView(UserView): def data(self): user_dict = { "name": self.user.name, "password": self.user.password } return {"user": user_dict} class RootEnabledView(object): def __init__(self, is_root_enabled): self.is_root_enabled = is_root_enabled def data(self): return {'rootEnabled': self.is_root_enabled} trove-5.0.0/trove/extensions/common/models.py0000664000567000056710000001041312701410316022471 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.remote import create_guest_client from trove.common import utils from trove.db import get_db_api from trove.guestagent.db import models as guest_models from trove.instance import models as base_models CONF = cfg.CONF LOG = logging.getLogger(__name__) def load_and_verify(context, instance_id): # Load InstanceServiceStatus to verify if its running instance = base_models.Instance.load(context, instance_id) if not instance.is_datastore_running: raise exception.UnprocessableEntity( "Instance %s is not ready." % instance.id) else: return instance class Root(object): @classmethod def load(cls, context, instance_id): load_and_verify(context, instance_id) # TODO(pdmars): remove the is_root_enabled call from the guest agent, # just check the database for this information. # If the root history returns null or raises an exception, the root # user hasn't been enabled. try: root_history = RootHistory.load(context, instance_id) except exception.NotFound: return False if not root_history: return False return True @classmethod def create(cls, context, instance_id, user, root_password, cluster_instances_list=None): load_and_verify(context, instance_id) if root_password: root = create_guest_client(context, instance_id).enable_root_with_password( root_password) else: root = create_guest_client(context, instance_id).enable_root() root_user = guest_models.RootUser() root_user.deserialize(root) # if cluster_instances_list none, then root create is called for # single instance, adding an RootHistory entry for the instance_id if cluster_instances_list is None: RootHistory.create(context, instance_id, user) return root_user @classmethod def delete(cls, context, instance_id): load_and_verify(context, instance_id) create_guest_client(context, instance_id).disable_root() class ClusterRoot(Root): @classmethod def create(cls, context, instance_id, user, root_password, cluster_instances_list=None): root_user = super(ClusterRoot, cls).create(context, instance_id, user, root_password, cluster_instances_list=None) if cluster_instances_list: for instance in cluster_instances_list: RootHistory.create(context, instance, user) return root_user class RootHistory(object): _auto_generated_attrs = ['id'] _data_fields = ['instance_id', 'user', 'created'] _table_name = 'root_enabled_history' def __init__(self, instance_id, user): self.id = instance_id self.user = user self.created = utils.utcnow() def save(self): LOG.debug("Saving %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self) @classmethod def load(cls, context, instance_id): history = get_db_api().find_by(cls, id=instance_id) return history @classmethod def create(cls, context, instance_id, user): history = cls.load(context, instance_id) if history is not None: return history history = RootHistory(instance_id, user) return history.save() trove-5.0.0/trove/extensions/mysql/0000775000567000056710000000000012701410521020510 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mysql/__init__.py0000664000567000056710000000000012701410316022611 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/extensions/mysql/common.py0000664000567000056710000000620512701410316022357 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import exception from trove.guestagent.db import models as guest_models from urllib import unquote def populate_validated_databases(dbs): """ Create a serializable request with user provided data for creating new databases. """ try: databases = [] unique_identities = set() for database in dbs: mydb = guest_models.ValidatedMySQLDatabase() mydb.name = database.get('name', '') if mydb.name in unique_identities: raise exception.DatabaseInitialDatabaseDuplicateError() unique_identities.add(mydb.name) mydb.character_set = database.get('character_set', '') mydb.collate = database.get('collate', '') databases.append(mydb.serialize()) return databases except ValueError as ve: # str(ve) contains user input and may include '%' which can cause a # format str vulnerability. Escape the '%' to avoid this. This is # okay to do since we're not using dict args here in any case. safe_string = str(ve).replace('%', '%%') raise exception.BadRequest(safe_string) def populate_users(users, initial_databases=None): """Create a serializable request containing users.""" users_data = [] unique_identities = set() for user in users: u = guest_models.MySQLUser() u.name = user.get('name', '') u.host = user.get('host', '%') user_identity = (u.name, u.host) if user_identity in unique_identities: raise exception.DatabaseInitialUserDuplicateError() unique_identities.add(user_identity) u.password = user.get('password', '') user_dbs = user.get('databases', '') # user_db_names guaranteed unique and non-empty by apischema user_db_names = [user_db.get('name', '') for user_db in user_dbs] for user_db_name in user_db_names: if (initial_databases is not None and user_db_name not in initial_databases): raise exception.DatabaseForUserNotInDatabaseListError( user=u.name, database=user_db_name) u.databases = user_db_name users_data.append(u.serialize()) return users_data def unquote_user_host(user_hostname): unquoted = unquote(user_hostname) if '@' not in unquoted: return unquoted, '%' if unquoted.endswith('@'): return unquoted, '%' splitup = unquoted.split('@') host = splitup[-1] user = '@'.join(splitup[:-1]) return user, host trove-5.0.0/trove/extensions/mysql/service.py0000664000567000056710000003602212701410316022527 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import strutils import webob.exc import trove.common.apischema as apischema from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import notification from trove.common.notification import StartNotification from trove.common import pagination from trove.common.utils import correct_id_with_req from trove.common import wsgi from trove.extensions.common.service import DefaultRootController from trove.extensions.mysql.common import populate_users from trove.extensions.mysql.common import populate_validated_databases from trove.extensions.mysql.common import unquote_user_host from trove.extensions.mysql import models from trove.extensions.mysql import views from trove.guestagent.db import models as guest_models LOG = logging.getLogger(__name__) import_class = importutils.import_class CONF = cfg.CONF class UserController(wsgi.Controller): """Controller for instance functionality.""" schemas = apischema.user @classmethod def get_schema(cls, action, body): action_schema = super(UserController, cls).get_schema(action, body) if 'update_all' == action: update_type = list(body.keys())[0] action_schema = action_schema.get(update_type, {}) return action_schema def index(self, req, tenant_id, instance_id): """Return all users.""" LOG.info(_("Listing users for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] users, next_marker = models.Users.load(context, instance_id) view = views.UsersView(users) paged = pagination.SimplePaginatedDataView(req.url, 'users', view, next_marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id, instance_id): """Creates a set of users.""" LOG.info(_("Creating users for instance '%(id)s'\n" "req : '%(req)s'\n\n" "body: '%(body)s'\n'n") % {"id": instance_id, "req": strutils.mask_password(req), "body": strutils.mask_password(body)}) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSUserCreate(context, request=req) users = body['users'] with StartNotification(context, instance_id=instance_id, username=",".join([user['name'] for user in users])): try: model_users = populate_users(users) models.User.create(context, instance_id, model_users) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, id): LOG.info(_("Delete instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] id = correct_id_with_req(id, req) username, host = unquote_user_host(id) context.notification = notification.DBaaSUserDelete(context, request=req) with StartNotification(context, instance_id=instance_id, username=username): user = None try: user = guest_models.MySQLUser() user.name = username user.host = host found_user = models.User.load(context, instance_id, username, host) if not found_user: user = None except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not user: raise exception.UserNotFound(uuid=id) models.User.delete(context, instance_id, user.serialize()) return wsgi.Result(None, 202) def show(self, req, tenant_id, instance_id, id): """Return a single user.""" LOG.info(_("Showing a user for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] id = correct_id_with_req(id, req) username, host = unquote_user_host(id) user = None try: user = models.User.load(context, instance_id, username, host) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not user: raise exception.UserNotFound(uuid=id) view = views.UserView(user) return wsgi.Result(view.data(), 200) def update(self, req, body, tenant_id, instance_id, id): """Change attributes for one user.""" LOG.info(_("Updating user attributes for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": strutils.mask_password(req)}) context = req.environ[wsgi.CONTEXT_KEY] id = correct_id_with_req(id, req) username, hostname = unquote_user_host(id) user = None user_attrs = body['user'] context.notification = notification.DBaaSUserUpdateAttributes( context, request=req) with StartNotification(context, instance_id=instance_id, username=username): try: user = models.User.load(context, instance_id, username, hostname) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not user: raise exception.UserNotFound(uuid=id) try: models.User.update_attributes(context, instance_id, username, hostname, user_attrs) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) return wsgi.Result(None, 202) def update_all(self, req, body, tenant_id, instance_id): """Change the password of one or more users.""" LOG.info(_("Updating user password for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": strutils.mask_password(req)}) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSUserChangePassword( context, request=req) users = body['users'] with StartNotification(context, instance_id=instance_id, username=",".join([user['name'] for user in users])): model_users = [] for user in users: try: mu = guest_models.MySQLUser() mu.name = user['name'] mu.host = user.get('host') mu.password = user['password'] found_user = models.User.load(context, instance_id, mu.name, mu.host) if not found_user: user_and_host = mu.name if mu.host: user_and_host += '@' + mu.host raise exception.UserNotFound(uuid=user_and_host) model_users.append(mu) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) models.User.change_password(context, instance_id, model_users) return wsgi.Result(None, 202) class UserAccessController(wsgi.Controller): """Controller for adding and removing database access for a user.""" schemas = apischema.user @classmethod def get_schema(cls, action, body): schema = {} if 'update_all' == action: schema = cls.schemas.get(action).get('databases') return schema def _get_user(self, context, instance_id, user_id): username, hostname = unquote_user_host(user_id) try: user = models.User.load(context, instance_id, username, hostname) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not user: raise exception.UserNotFound(uuid=user_id) return user def index(self, req, tenant_id, instance_id, user_id): """Show permissions for the given user.""" LOG.info(_("Showing user access for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] # Make sure this user exists. user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error(_("No such user: %(user)s ") % {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) access = models.User.access(context, instance_id, username, hostname) view = views.UserAccessView(access.databases) return wsgi.Result(view.data(), 200) def update(self, req, body, tenant_id, instance_id, user_id): """Grant access for a user to one or more databases.""" LOG.info(_("Granting user access for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSUserGrant( context, request=req) user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error(_("No such user: %(user)s ") % {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) databases = [db['name'] for db in body['databases']] with StartNotification(context, instance_id=instance_id, username=username, database=databases): models.User.grant(context, instance_id, username, hostname, databases) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, user_id, id): """Revoke access for a user.""" LOG.info(_("Revoking user access for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSUserRevoke( context, request=req) user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error(_("No such user: %(user)s ") % {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) access = models.User.access(context, instance_id, username, hostname) databases = [db.name for db in access.databases] with StartNotification(context, instance_id=instance_id, username=username, database=databases): if id not in databases: raise exception.DatabaseNotFound(uuid=id) models.User.revoke(context, instance_id, username, hostname, id) return wsgi.Result(None, 202) class SchemaController(wsgi.Controller): """Controller for instance functionality.""" schemas = apischema.dbschema def index(self, req, tenant_id, instance_id): """Return all schemas.""" LOG.info(_("Listing schemas for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] schemas, next_marker = models.Schemas.load(context, instance_id) view = views.SchemasView(schemas) paged = pagination.SimplePaginatedDataView(req.url, 'databases', view, next_marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id, instance_id): """Creates a set of schemas.""" LOG.info(_("Creating schema for instance '%(id)s'\n" "req : '%(req)s'\n\n" "body: '%(body)s'\n'n") % {"id": instance_id, "req": req, "body": body}) context = req.environ[wsgi.CONTEXT_KEY] schemas = body['databases'] context.notification = notification.DBaaSDatabaseCreate(context, request=req) with StartNotification(context, instance_id=instance_id, dbname=".".join([db['name'] for db in schemas])): model_schemas = populate_validated_databases(schemas) models.Schema.create(context, instance_id, model_schemas) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, id): LOG.info(_("Deleting schema for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSDatabaseDelete( context, request=req) with StartNotification(context, instance_id=instance_id, dbname=id): try: schema = guest_models.ValidatedMySQLDatabase() schema.name = id models.Schema.delete(context, instance_id, schema.serialize()) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) return wsgi.Result(None, 202) def show(self, req, tenant_id, instance_id, id): raise webob.exc.HTTPNotImplemented() class MySQLRootController(DefaultRootController): def _find_root_user(self, context, instance_id): user = guest_models.MySQLRootUser() return models.User.load(context, instance_id, user.name, user.host, root_user=True) trove-5.0.0/trove/extensions/mysql/views.py0000664000567000056710000000355412701410316022230 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class UserView(object): def __init__(self, user): self.user = user def data(self): user_dict = { "name": self.user.name, "host": self.user.host, "databases": self.user.databases } return {"user": user_dict} class UsersView(object): def __init__(self, users): self.users = users def data(self): userlist = [{"name": user.name, "host": user.host, "databases": user.databases} for user in self.users] return {"users": userlist} class UserAccessView(object): def __init__(self, databases): self.databases = databases def data(self): dbs = [{"name": db.name} for db in self.databases] return {"databases": dbs} class SchemaView(object): def __init__(self, schema): self.schema = schema def data(self): return {"name": self.schema.name} class SchemasView(object): def __init__(self, schemas): self.schemas = schemas def data(self): data = [] # These are model instances for schema in self.schemas: data.append(SchemaView(schema).data()) return {"databases": data} trove-5.0.0/trove/extensions/mysql/models.py0000664000567000056710000002335712701410316022361 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Model classes that extend the instances functionality for MySQL instances. """ from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.notification import StartNotification from trove.common.remote import create_guest_client from trove.common import utils from trove.extensions.common.models import load_and_verify from trove.extensions.common.models import RootHistory from trove.guestagent.db import models as guest_models CONF = cfg.CONF LOG = logging.getLogger(__name__) def persisted_models(): return {'root_enabled_history': RootHistory} class User(object): _data_fields = ['name', 'host', 'password', 'databases'] def __init__(self, name, host, password, databases): self.name = name self.host = host self.password = password self.databases = databases @classmethod def load(cls, context, instance_id, username, hostname, root_user=False): load_and_verify(context, instance_id) if root_user: validate = guest_models.RootUser() else: validate = guest_models.MySQLUser() validate.name = username validate.host = hostname client = create_guest_client(context, instance_id) found_user = client.get_user(username=username, hostname=hostname) if not found_user: return None database_names = [{'name': db['_name']} for db in found_user['_databases']] return cls(found_user['_name'], found_user['_host'], found_user['_password'], database_names) @classmethod def create(cls, context, instance_id, users): # Load InstanceServiceStatus to verify if it's running load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) for user in users: user_name = user['_name'] host_name = user['_host'] userhost = "%s@%s" % (user_name, host_name) existing_users, _nadda = Users.load_with_client( client, limit=1, marker=userhost, include_marker=True) if (len(existing_users) > 0 and str(existing_users[0].name) == str(user_name) and str(existing_users[0].host) == str(host_name)): raise exception.UserAlreadyExists(name=user_name, host=host_name) return client.create_user(users) @classmethod def delete(cls, context, instance_id, user): load_and_verify(context, instance_id) with StartNotification(context, instance_id=instance_id, username=user): create_guest_client(context, instance_id).delete_user(user) @classmethod def access(cls, context, instance_id, username, hostname): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) databases = client.list_access(username, hostname) dbs = [] for db in databases: dbs.append(Schema(name=db['_name'], collate=db['_collate'], character_set=db['_character_set'])) return UserAccess(dbs) @classmethod def grant(cls, context, instance_id, username, hostname, databases): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) client.grant_access(username, hostname, databases) @classmethod def revoke(cls, context, instance_id, username, hostname, database): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) client.revoke_access(username, hostname, database) @classmethod def change_password(cls, context, instance_id, users): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) change_users = [] for user in users: change_user = {'name': user.name, 'host': user.host, 'password': user.password, } change_users.append(change_user) client.change_passwords(change_users) @classmethod def update_attributes(cls, context, instance_id, username, hostname, user_attrs): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) user_changed = user_attrs.get('name') host_changed = user_attrs.get('host') validate = guest_models.MySQLUser() if host_changed: validate.host = host_changed if user_changed: validate.name = user_changed user = user_changed or username host = host_changed or hostname userhost = "%s@%s" % (user, host) if user_changed or host_changed: existing_users, _nadda = Users.load_with_client( client, limit=1, marker=userhost, include_marker=True) if (len(existing_users) > 0 and existing_users[0].name == user and existing_users[0].host == host): raise exception.UserAlreadyExists(name=user, host=host) client.update_attributes(username, hostname, user_attrs) class UserAccess(object): _data_fields = ['databases'] def __init__(self, databases): self.databases = databases def load_via_context(cls, context, instance_id): """Creates guest and fetches pagination arguments from the context.""" load_and_verify(context, instance_id) limit = utils.pagination_limit(context.limit, cls.DEFAULT_LIMIT) client = create_guest_client(context, instance_id) # The REST API standard dictates that we *NEVER* include the marker. return cls.load_with_client(client=client, limit=limit, marker=context.marker, include_marker=False) class Users(object): DEFAULT_LIMIT = CONF.users_page_size @classmethod def load(cls, context, instance_id): return load_via_context(cls, context, instance_id) @classmethod def load_with_client(cls, client, limit, marker, include_marker): user_list, next_marker = client.list_users( limit=limit, marker=marker, include_marker=include_marker) model_users = [] for user in user_list: mysql_user = guest_models.MySQLUser() mysql_user.deserialize(user) if mysql_user.name in cfg.get_ignored_users(): continue # TODO(hub-cap): databases are not being returned in the # reference agent dbs = [] for db in mysql_user.databases: dbs.append({'name': db['_name']}) model_users.append(User(mysql_user.name, mysql_user.host, mysql_user.password, dbs)) return model_users, next_marker class Schema(object): _data_fields = ['name', 'collate', 'character_set'] def __init__(self, name, collate, character_set): self.name = name self.collate = collate self.character_set = character_set @classmethod def create(cls, context, instance_id, schemas): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) for schema in schemas: schema_name = schema['_name'] existing_schema, _nadda = Schemas.load_with_client( client, limit=1, marker=schema_name, include_marker=True) if (len(existing_schema) > 0 and str(existing_schema[0].name) == str(schema_name)): raise exception.DatabaseAlreadyExists(name=schema_name) return client.create_database(schemas) @classmethod def delete(cls, context, instance_id, schema): load_and_verify(context, instance_id) create_guest_client(context, instance_id).delete_database(schema) class Schemas(object): DEFAULT_LIMIT = CONF.databases_page_size @classmethod def load(cls, context, instance_id): return load_via_context(cls, context, instance_id) @classmethod def load_with_client(cls, client, limit, marker, include_marker): schemas, next_marker = client.list_databases( limit=limit, marker=marker, include_marker=include_marker) model_schemas = [] for schema in schemas: mysql_schema = guest_models.MySQLDatabase() mysql_schema.deserialize(schema) if mysql_schema.name in cfg.get_ignored_dbs(): continue model_schemas.append(Schema(mysql_schema.name, mysql_schema.collate, mysql_schema.character_set)) return model_schemas, next_marker trove-5.0.0/trove/dns/0000775000567000056710000000000012701410521015730 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/dns/__init__.py0000664000567000056710000000000012701410316020031 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/dns/manager.py0000664000567000056710000000523512701410316017723 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Dns manager. """ from oslo_log import log as logging from trove.common import cfg from trove.common import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF class DnsManager(object): """Handles associating DNS to and from IPs.""" def __init__(self, dns_driver=None, dns_instance_entry_factory=None, *args, **kwargs): if not dns_driver: dns_driver = CONF.dns_driver dns_driver = utils.import_class(dns_driver) self.driver = dns_driver() if not dns_instance_entry_factory: dns_instance_entry_factory = CONF.dns_instance_entry_factory entry_factory = utils.import_class(dns_instance_entry_factory) self.entry_factory = entry_factory() def create_instance_entry(self, instance_id, content): """Connects a new instance with a DNS entry. :param instance_id: The trove instance_id to associate. :param content: The IP content attached to the instance. """ entry = self.entry_factory.create_entry(instance_id) if entry: LOG.debug("Creating entry address %s." % str(entry)) self.driver.create_entry(entry, content) else: LOG.debug("Entry address not found for instance %s" % instance_id) def delete_instance_entry(self, instance_id, content=None): """Removes a DNS entry associated to an instance. :param instance_id: The trove instance id to associate. :param content: The IP content attached to the instance. """ entry = self.entry_factory.create_entry(instance_id) LOG.debug("Deleting instance entry with %s" % str(entry)) if entry: self.driver.delete_entry(entry.name, entry.type) def determine_hostname(self, instance_id): """ Create the hostname field based on the instance id. Use instance by default. """ entry = self.entry_factory.create_entry(instance_id) if entry: return entry.name else: return None trove-5.0.0/trove/dns/designate/0000775000567000056710000000000012701410521017673 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/dns/designate/__init__.py0000664000567000056710000000000012701410316021774 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/dns/designate/driver.py0000664000567000056710000001451012701410316021543 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Dns Driver that uses Designate DNSaaS. """ import base64 import hashlib from designateclient.v1 import Client from designateclient.v1.records import Record from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.dns import driver CONF = cfg.CONF DNS_TENANT_ID = CONF.dns_account_id DNS_AUTH_URL = CONF.dns_auth_url DNS_ENDPOINT_URL = CONF.dns_endpoint_url DNS_SERVICE_TYPE = CONF.dns_service_type DNS_REGION = CONF.dns_region DNS_USERNAME = CONF.dns_username DNS_PASSKEY = CONF.dns_passkey DNS_TTL = CONF.dns_ttl DNS_DOMAIN_ID = CONF.dns_domain_id DNS_DOMAIN_NAME = CONF.dns_domain_name LOG = logging.getLogger(__name__) class DesignateObjectConverter(object): def domain_to_zone(self, domain): return DesignateDnsZone(id=domain.id, name=domain.name) def record_to_entry(self, record, dns_zone): return driver.DnsEntry(name=record.name, content=record.data, type=record.type, ttl=record.ttl, priority=record.priority, dns_zone=dns_zone) def create_designate_client(): """Creates a Designate DNSaaS client.""" client = Client(auth_url=DNS_AUTH_URL, username=DNS_USERNAME, password=DNS_PASSKEY, tenant_id=DNS_TENANT_ID, endpoint=DNS_ENDPOINT_URL, service_type=DNS_SERVICE_TYPE, region_name=DNS_REGION) return client class DesignateDriver(driver.DnsDriver): def __init__(self): self.dns_client = create_designate_client() self.converter = DesignateObjectConverter() self.default_dns_zone = DesignateDnsZone(id=DNS_DOMAIN_ID, name=DNS_DOMAIN_NAME) def create_entry(self, entry, content): """Creates the entry in the driver at the given dns zone.""" dns_zone = entry.dns_zone or self.default_dns_zone if not dns_zone.id: raise TypeError("The entry's dns_zone must have an ID specified.") name = entry.name LOG.debug("Creating DNS entry %s." % name) client = self.dns_client # Record name has to end with a '.' by dns standard record = Record(name=entry.name + '.', type=entry.type, data=content, ttl=entry.ttl, priority=entry.priority) client.records.create(dns_zone.id, record) def delete_entry(self, name, type, dns_zone=None): """Deletes an entry with the given name and type from a dns zone.""" dns_zone = dns_zone or self.default_dns_zone records = self._get_records(dns_zone) matching_record = [rec for rec in records if rec.name == name + '.' and rec.type == type] if not matching_record: raise exception.DnsRecordNotFound(name) LOG.debug("Deleting DNS entry %s." % name) self.dns_client.records.delete(dns_zone.id, matching_record[0].id) def get_entries_by_content(self, content, dns_zone=None): """Retrieves all entries in a DNS zone with matching content field.""" records = self._get_records(dns_zone) return [self.converter.record_to_entry(record, dns_zone) for record in records if record.data == content] def get_entries_by_name(self, name, dns_zone): records = self._get_records(dns_zone) return [self.converter.record_to_entry(record, dns_zone) for record in records if record.name == name] def get_dns_zones(self, name=None): """Returns all dns zones (optionally filtered by the name argument.""" domains = self.dns_client.domains.list() return [self.converter.domain_to_zone(domain) for domain in domains if not name or domain.name == name] def modify_content(self, name, content, dns_zone): # We dont need this in trove for now raise NotImplementedError("Not implemented for Designate DNS.") def rename_entry(self, content, name, dns_zone): # We dont need this in trove for now raise NotImplementedError("Not implemented for Designate DNS.") def _get_records(self, dns_zone): dns_zone = dns_zone or self.default_dns_zone if not dns_zone: raise TypeError('DNS domain is must be specified') return self.dns_client.records.list(dns_zone.id) class DesignateInstanceEntryFactory(driver.DnsInstanceEntryFactory): """Defines how instance DNS entries are created for instances.""" def create_entry(self, instance_id): zone = DesignateDnsZone(id=DNS_DOMAIN_ID, name=DNS_DOMAIN_NAME) # Constructing the hostname by hashing the instance ID. name = base64.b32encode(hashlib.md5(instance_id).digest())[:11].lower() hostname = ("%s.%s" % (name, zone.name)) # Removing the leading dot if present if hostname.endswith('.'): hostname = hostname[:-1] return driver.DnsEntry(name=hostname, content=None, type="A", ttl=DNS_TTL, dns_zone=zone) class DesignateDnsZone(driver.DnsZone): def __init__(self, id, name): self._name = name self._id = id @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def id(self): return self._id @id.setter def id(self, value): self._id = value def __eq__(self, other): return (isinstance(other, DesignateDnsZone) and self.name == other.name and self.id == other.id) def __str__(self): return "%s:%s" % (self.id, self.name) trove-5.0.0/trove/dns/driver.py0000664000567000056710000000730012701410316017577 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Dns Driver base class that all DNS drivers should inherit from """ class DnsDriver(object): """The base class that all Dns drivers should inherit from.""" def __init__(self): pass def create_entry(self, entry): """Creates the entry in the driver at the given dns zone.""" pass def delete_entry(self, name, type, dns_zone=None): """Deletes an entry with the given name and type from a dns zone.""" pass def get_entries_by_content(self, content, dns_zone=None): """Retrieves all entries in a DNS zone with matching content field.""" pass def get_entries_by_name(self, name, dns_zone=None): """Retrieves all entries in a dns zone with the given name field.""" pass def get_dns_zones(self, name=None): """Returns all dns zones (optionally filtered by the name argument.""" pass def modify_content(self, name, content, dns_zone): # TODO(tim.simpson) I've found no use for this in RS impl of DNS w/ # instances. Check to see its really needed. pass def rename_entry(self, content, name, dns_zone): # TODO(tim.simpson) I've found no use for this in RS impl of DNS w/ # instances. Check to see its really needed. pass class DnsInstanceEntryFactory(object): """Defines how instance DNS entries are created for instances. By default, the DNS entry returns None meaning instances do not get entries associated with them. Override the create_entry method to change this behavior. """ def create_entry(self, instance): return None class DnsSimpleInstanceEntryFactory(object): """Creates a CNAME with the name being the instance name.""" def create_entry(self, instance): return DnsEntry(name=instance.name, content=None, type="CNAME") class DnsEntry(object): """Simple representation of a DNS record.""" def __init__(self, name, content, type, ttl=None, priority=None, dns_zone=None): self.content = content self.name = name self.type = type self.priority = priority self.dns_zone = dns_zone self.ttl = ttl def __repr__(self): msg = ('DnsEntry(name="%s", content="%s", type="%s", ' 'ttl=%s, priority=%s, dns_zone=%s)') params = (self.name, self.content, self.type, self.ttl, self.priority, self.dns_zone) return msg % params def __str__(self): return "{ name:%s, content:%s, type:%s, zone:%s }" % \ (self.name, self.content, self.type, self.dns_zone) class DnsZone(object): """Represents a DNS Zone. For some APIs it is inefficient to simply represent a zone as a string because this would necessitate a look up on every call. So this opaque object can contain additional data needed by the DNS driver. The only constant is it must contain the domain name of the zone. """ @property def name(self): return "" def __str__(self): return self.name trove-5.0.0/trove/dns/models.py0000664000567000056710000000461512701410316017575 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Model classes that map instance Ip to dns record. """ from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common.models import ModelBase from trove.db import get_db_api LOG = logging.getLogger(__name__) def persisted_models(): return { 'dns_records': DnsRecord, } class DnsRecord(ModelBase): _data_fields = ['name', 'record_id'] _table_name = 'dns_records' def __init__(self, name, record_id): self.name = name self.record_id = record_id @classmethod def create(cls, **values): record = cls(**values).save() if not record.is_valid(): raise exception.InvalidModelError(errors=record.errors) return record def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) LOG.debug("Saving %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self) def delete(self): LOG.debug("Deleting %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().delete(self) @classmethod def find_by(cls, **conditions): model = cls.get_by(**conditions) if model is None: raise exception.ModelNotFoundError(_("%s Not Found") % cls.__name__) return model @classmethod def get_by(cls, **kwargs): return get_db_api().find_by(cls, **cls._process_conditions(kwargs)) @classmethod def _process_conditions(cls, raw_conditions): """Override in inheritors to format/modify any conditions.""" return raw_conditions trove-5.0.0/trove/limits/0000775000567000056710000000000012701410521016445 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/limits/__init__.py0000664000567000056710000000000012701410316020546 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/limits/service.py0000664000567000056710000000244712701410316020470 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import wsgi from trove.limits import views from trove.quota.quota import QUOTAS class LimitsController(wsgi.Controller): """ Controller for accessing limits in the OpenStack API. """ def index(self, req, tenant_id): """ Return all absolute and rate limit information. """ quotas = QUOTAS.get_all_quotas_by_tenant(tenant_id) abs_limits = {k: v['hard_limit'] for k, v in quotas.items()} rate_limits = req.environ.get("trove.limits", []) return wsgi.Result(views.LimitViews(abs_limits, rate_limits).data(), 200) trove-5.0.0/trove/limits/views.py0000664000567000056710000000355012701410316020161 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils class LimitView(object): def __init__(self, rate_limit): self.rate_limit = rate_limit def data(self): get_utc = datetime.datetime.utcfromtimestamp next_avail = get_utc(self.rate_limit.get("resetTime", 0)) return {"limit": { "nextAvailable": timeutils.isotime(at=next_avail), "remaining": self.rate_limit.get("remaining", 0), "unit": self.rate_limit.get("unit", ""), "value": self.rate_limit.get("value", ""), "verb": self.rate_limit.get("verb", ""), "uri": self.rate_limit.get("URI", ""), "regex": self.rate_limit.get("regex", "") } } class LimitViews(object): def __init__(self, abs_limits, rate_limits): self.abs_limits = abs_limits self.rate_limits = rate_limits def data(self): data = [] abs_view = dict() abs_view["verb"] = "ABSOLUTE" for resource_name, abs_limit in self.abs_limits.items(): abs_view["max_" + resource_name] = abs_limit data.append(abs_view) for l in self.rate_limits: data.append(LimitView(l).data()["limit"]) return {"limits": data} trove-5.0.0/trove/common/0000775000567000056710000000000012701410521016434 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/limits.py0000664000567000056710000003232512701410316020316 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. """ import collections import copy import httplib import math import re import time from oslo_serialization import jsonutils from oslo_utils import importutils import webob.dec import webob.exc from trove.common import base_wsgi from trove.common import cfg from trove.common.i18n import _ from trove.common import wsgi CONF = cfg.CONF # Convenience constants for the limits dictionary passed to Limiter(). PER_SECOND = 1 PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 class Limit(object): """ Stores information about a limit for HTTP requests. """ UNITS = { 1: "SECOND", 60: "MINUTE", 60 * 60: "HOUR", 60 * 60 * 24: "DAY", } UNIT_MAP = {v: k for k, v in UNITS.items()} def __init__(self, verb, uri, regex, value, unit): """ Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = _("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") self.error_message = msg % self.__dict__ def __call__(self, verb, url): """ Represents a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", CONF.http_post_rate, PER_MINUTE), Limit("PUT", "*", ".*", CONF.http_put_rate, PER_MINUTE), Limit("DELETE", "*", ".*", CONF.http_delete_rate, PER_MINUTE), Limit("GET", "*", ".*", CONF.http_get_rate, PER_MINUTE), Limit("POST", "*/mgmt", "^/mgmt", CONF.http_mgmt_post_rate, PER_MINUTE), ] class RateLimitingMiddleware(wsgi.TroveMiddleware): """ Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): """ Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get(wsgi.CONTEXT_KEY) tenant_id = None if context: tenant_id = context.tenant delay, error = self._limiter.check_for_delay(verb, url, tenant_id) if delay and self.enabled(): msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.OverLimitFault(msg, error, retry) req.environ["trove.limits"] = self._limiter.get_limits(tenant_id) return self.application def enabled(self): return True class Limiter(object): """ Rate-limit checking class which handles limits in memory. """ def __init__(self, limits, **kwargs): """ Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith('user:'): username = key[5:] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """ Return the limits for a given user. """ return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """ Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # This was ported from nova. # Keeping it as a static method for the sake of consistency # # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """ Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in Limit.UNIT_MAP: raise ValueError("Invalid units specified") unit = Limit.UNIT_MAP[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """ Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """ Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, request): """ Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """ Rate-limit requests based on answers from a remote source. """ def __init__(self, limiter_address): """ Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dumps({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = httplib.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if 200 >= resp.status < 300: return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # This was ported from nova. # Keeping it as a static method for the sake of consistency # # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """ Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ return [] trove-5.0.0/trove/common/auth.py0000664000567000056710000000646012701410316017757 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log as logging from oslo_utils import strutils import webob.exc import wsgi from trove.common import exception from trove.common.i18n import _ LOG = logging.getLogger(__name__) class AuthorizationMiddleware(wsgi.Middleware): def __init__(self, application, auth_providers, **local_config): self.auth_providers = auth_providers LOG.debug("Auth middleware providers: %s" % auth_providers) super(AuthorizationMiddleware, self).__init__(application, **local_config) def process_request(self, request): roles = request.headers.get('X_ROLE', '').split(',') LOG.debug("Processing auth request with roles: %s" % roles) tenant_id = request.headers.get('X-Tenant-Id', None) LOG.debug("Processing auth request with tenant_id: %s" % tenant_id) for provider in self.auth_providers: provider.authorize(request, tenant_id, roles) @classmethod def factory(cls, global_config, **local_config): def _factory(app): LOG.debug("Created auth middleware with config: %s" % local_config) return cls(app, [TenantBasedAuth()], **local_config) return _factory class TenantBasedAuth(object): # The paths differ from melange, so the regex must differ as well, # trove starts with a tenant_id tenant_scoped_url = re.compile("/(?P.*?)/.*") def authorize(self, request, tenant_id, roles): match_for_tenant = self.tenant_scoped_url.match(request.path_info) if (match_for_tenant and tenant_id == match_for_tenant.group('tenant_id')): LOG.debug(strutils.mask_password( _("Authorized tenant '%(tenant_id)s' request: " "%(request)s") % {'tenant_id': tenant_id, 'request': request})) return True msg = _( "User with tenant id %s cannot access this resource.") % tenant_id LOG.error(msg) raise webob.exc.HTTPForbidden(msg) def admin_context(f): """ Verify that the current context has administrative access, or throw an exception. Trove API functions typically take the form function(self, req), or function(self, req, id). """ def wrapper(*args, **kwargs): try: req = args[1] context = req.environ.get('trove.context') except Exception: raise exception.TroveError("Cannot load request context.") if not context.is_admin: raise exception.Forbidden("User does not have admin privileges.") return f(*args, **kwargs) return wrapper trove-5.0.0/trove/common/schemas/0000775000567000056710000000000012701410521020057 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/schemas/atom.rng0000664000567000056710000003624212701410316021540 0ustar jenkinsjenkins00000000000000 text html xhtml An atom:feed must have an atom:author unless all of its atom:entry children have an atom:author. An atom:entry must have at least one atom:link element with a rel attribute of 'alternate' or an atom:content. An atom:entry must have an atom:author if its feed does not. text html xhtml 1 [^:]* .+/.+ [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* .+@.+ xml:base xml:lang trove-5.0.0/trove/common/schemas/atom-link.rng0000664000567000056710000000700112701410316022462 0ustar jenkinsjenkins00000000000000 1 [^:]* .+/.+ [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* xml:base xml:lang trove-5.0.0/trove/common/schemas/v1.1/0000775000567000056710000000000012701410521020544 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/schemas/v1.1/limits.rng0000664000567000056710000000172312701410316022562 0ustar jenkinsjenkins00000000000000 trove-5.0.0/trove/common/pastedeploy.py0000664000567000056710000001152112701410316021341 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from paste import deploy from trove.common import local class BasePasteFactory(object): """A base class for paste app and filter factories. Sub-classes must override the KEY class attribute and provide a __call__ method. """ KEY = None def __init__(self, data): self.data = data def _import_factory(self, local_conf): """Import an app/filter class. Lookup the KEY from the PasteDeploy local conf and import the class named there. This class can then be used as an app or filter factory. Note we support the : format. Note also that if you do e.g. key = value then ConfigParser returns a value with a leading newline, so we strip() the value before using it. """ mod_str, _sep, class_str = local_conf[self.KEY].strip().rpartition(':') del local_conf[self.KEY] __import__(mod_str) return getattr(sys.modules[mod_str], class_str) class AppFactory(BasePasteFactory): """A Generic paste.deploy app factory. This requires openstack.app_factory to be set to a callable which returns a WSGI app when invoked. The format of the name is : e.g. [app:myfooapp] paste.app_factory = trove.common.pastedeploy:app_factory openstack.app_factory = myapp:Foo The WSGI app constructor must accept a data object and a local config dict as its two arguments. """ KEY = 'openstack.app_factory' def __call__(self, global_conf, **local_conf): """The actual paste.app_factory protocol method.""" factory = self._import_factory(local_conf) return factory(self.data, **local_conf) class FilterFactory(AppFactory): """A Generic paste.deploy filter factory. This requires openstack.filter_factory to be set to a callable which returns a WSGI filter when invoked. The format is : e.g. [filter:myfoofilter] paste.filter_factory = trove.common.pastedeploy:filter_factory openstack.filter_factory = myfilter:Foo The WSGI filter constructor must accept a WSGI app, a data object and a local config dict as its three arguments. """ KEY = 'openstack.filter_factory' def __call__(self, global_conf, **local_conf): """The actual paste.filter_factory protocol method.""" factory = self._import_factory(local_conf) def filter(app): return factory(app, self.data, **local_conf) return filter def app_factory(global_conf, **local_conf): """A paste app factory used with paste_deploy_app().""" return local.store.app_factory(global_conf, **local_conf) def filter_factory(global_conf, **local_conf): """A paste filter factory used with paste_deploy_app().""" return local.store.filter_factory(global_conf, **local_conf) def paste_deploy_app(paste_config_file, app_name, data): """Load a WSGI app from a PasteDeploy configuration. Use deploy.loadapp() to load the app from the PasteDeploy configuration, ensuring that the supplied data object is passed to the app and filter factories defined in this module. To use these factories and the data object, the configuration should look like this: [app:myapp] paste.app_factory = trove.common.pastedeploy:app_factory openstack.app_factory = myapp:App ... [filter:myfilter] paste.filter_factory = trove.common.pastedeploy:filter_factory openstack.filter_factory = myapp:Filter and then: myapp.py: class App(object): def __init__(self, data): ... class Filter(object): def __init__(self, app, data): ... :param paste_config_file: a PasteDeploy config file :param app_name: the name of the app/pipeline to load from the file :param data: a data object to supply to the app and its filters :returns: the WSGI app """ (af, ff) = (AppFactory(data), FilterFactory(data)) local.store.app_factory = af local.store.filter_factory = ff try: return deploy.loadapp("config:%s" % paste_config_file, name=app_name) finally: del local.store.app_factory del local.store.filter_factory trove-5.0.0/trove/common/pagination.py0000664000567000056710000001050112701410316021136 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import bisect import collections import six.moves.urllib.parse as urllib_parse def url_quote(s): if s is None: return s return urllib_parse.quote(str(s)) def paginate_list(li, limit=None, marker=None, include_marker=False): """Sort the given list and return a sublist containing a page of items. :param list li: The list to be paginated. :param int limit: Maximum number of iterms to be returned. :param marker: Key of the first item to appear on the sublist. :param bool include_marker: Include the marker value itself in the sublist. :return: """ li.sort() if include_marker: pos = bisect.bisect_left(li, marker) else: pos = bisect.bisect(li, marker) if limit and pos + limit < len(li): page = li[pos:pos + limit] return page, page[-1] else: return li[pos:], None class PaginatedDataView(object): def __init__(self, collection_type, collection, current_page_url, next_page_marker=None): self.collection_type = collection_type self.collection = collection self.current_page_url = current_page_url self.next_page_marker = url_quote(next_page_marker) def data(self): return {self.collection_type: self.collection, 'links': self._links, } def _links(self): if not self.next_page_marker: return [] app_url = AppUrl(self.current_page_url) next_url = app_url.change_query_params(marker=self.next_page_marker) next_link = { 'rel': 'next', 'href': str(next_url), } return [next_link] class SimplePaginatedDataView(object): # In some cases, we can't create a PaginatedDataView because # we don't have a collection query object to create a view on. # In that case, we have to supply the URL and collection manually. def __init__(self, url, name, view, marker): self.url = url self.name = name self.view = view self.marker = url_quote(marker) def data(self): if not self.marker: return self.view.data() app_url = AppUrl(self.url) next_url = str(app_url.change_query_params(marker=self.marker)) next_link = {'rel': 'next', 'href': next_url} view_data = {self.name: self.view.data()[self.name], 'links': [next_link]} return view_data class AppUrl(object): def __init__(self, url): self.url = url def __str__(self): return self.url def change_query_params(self, **kwargs): # Seeks out the query params in a URL and changes/appends to them # from the kwargs given. So change_query_params(foo='bar') # would remove from the URL any old instance of foo=something and # then add &foo=bar to the URL. parsed_url = urllib_parse.urlparse(self.url) # Build a dictionary out of the query parameters in the URL # with an OrderedDict to preserve the order of the URL. query_params = collections.OrderedDict( urllib_parse.parse_qsl(parsed_url.query)) # Use kwargs to change or update any values in the query dict. query_params.update(kwargs) # Build a new query based on the updated query dict. new_query_params = urllib_parse.urlencode(query_params) return self.__class__( # Force HTTPS. urllib_parse.ParseResult('https', parsed_url.netloc, parsed_url.path, parsed_url.params, new_query_params, parsed_url.fragment).geturl()) trove-5.0.0/trove/common/apischema.py0000664000567000056710000005240312701410316020746 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common import cfg CONF = cfg.CONF url_ref = { "type": "string", "minLength": 8, "pattern": 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]' '|(?:%[0-9a-fA-F][0-9a-fA-F]))+' } boolean_string = { "type": "integer", "minimum": 0, "maximum": 1 } non_empty_string = { "type": "string", "minLength": 1, "maxLength": 255, "pattern": "^.*[0-9a-zA-Z]+.*$" } configuration_data_types = { "type": "string", "minLength": 1, "pattern": "integer|string" } configuration_integer_size = { "type": "string", "maxLength": 40, "pattern": "[0-9]+" } configuration_positive_integer = { "type": "string", "maxLength": 40, "minLength": 1, "pattern": "^[0-9]+$" } configuration_non_empty_string = { "type": "string", "minLength": 1, "maxLength": 128, "pattern": "^.*[0-9a-zA-Z]+.*$" } flavorref = { 'oneOf': [ non_empty_string, { "type": "integer" }] } volume_size = { "oneOf": [ { "type": "integer", "minimum": 0 }, configuration_positive_integer] } host_string = { "type": "string", "minLength": 1, "pattern": "^[%]?[\w(-).]*[%]?$" } name_string = { "type": "string", "minLength": 1, "pattern": "^.*[0-9a-zA-Z]+.*$" } uuid = { "type": "string", "minLength": 1, "maxLength": 64, "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}" "-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$" } volume = { "type": "object", "required": ["size"], "properties": { "size": volume_size, "required": True } } nics = { "type": "array", "items": { "type": "object", } } databases_ref_list = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string } } } databases_ref_list_required = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string } } } databases_ref = { "type": "object", "required": ["databases"], "additionalProperties": True, "properties": { "databases": databases_ref_list_required } } databases_def = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string, "character_set": non_empty_string, "collate": non_empty_string } } } user_attributes = { "type": "object", "additionalProperties": True, "minProperties": 1, "properties": { "name": name_string, "password": non_empty_string, "host": host_string } } users_list = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["name", "password"], "additionalProperties": True, "properties": { "name": name_string, "password": non_empty_string, "host": host_string, "databases": databases_ref_list } } } null_configuration_id = { "type": "null" } configuration_id = { 'oneOf': [ uuid, null_configuration_id ] } module_list = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["id"], "additionalProperties": True, "properties": { "id": uuid, } } } cluster = { "create": { "type": "object", "required": ["cluster"], "additionalProperties": True, "properties": { "cluster": { "type": "object", "required": ["name", "datastore", "instances"], "additionalProperties": True, "properties": { "name": non_empty_string, "datastore": { "type": "object", "required": ["type", "version"], "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "instances": { "type": "array", "items": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "flavorRef": flavorref, "volume": volume, "nics": nics, "availability_zone": non_empty_string, "modules": module_list, } } } } } } }, "add_shard": { "type": "object", "required": ["add_shard"], "additionalProperties": True, "properties": { "add_shard": { "type": "object" } } }, "grow": { "type": "object", "required": ["grow"], "additionalProperties": True, "properties": { "grow": { "type": "array", "items": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "name": non_empty_string, "flavorRef": flavorref, "volume": volume, "nics": nics, "availability_zone": non_empty_string, "related_to": non_empty_string, "type": non_empty_string } } } } }, "shrink": { "type": "object", "required": ["shrink"], "additionalProperties": True, "properties": { "shrink": { "type": "array", "items": { "type": "object", "required": ["id"], "additionalProperties": True, "properties": { "id": uuid } } } } } } instance = { "create": { "type": "object", "required": ["instance"], "additionalProperties": True, "properties": { "instance": { "type": "object", "required": ["name", "flavorRef"], "additionalProperties": True, "properties": { "name": non_empty_string, "configuration_id": configuration_id, "flavorRef": flavorref, "volume": volume, "databases": databases_def, "users": users_list, "restorePoint": { "type": "object", "required": ["backupRef"], "additionalProperties": True, "properties": { "backupRef": uuid } }, "availability_zone": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "nics": nics, "modules": module_list } } } }, "edit": { "name": "instance:edit", "type": "object", "required": ["instance"], "properties": { "instance": { "type": "object", "required": [], "additionalProperties": False, "properties": { "slave_of": {}, "replica_of": {}, "name": non_empty_string, "configuration": configuration_id, } } } }, "action": { "resize": { "volume": { "type": "object", "required": ["resize"], "additionalProperties": True, "properties": { "resize": { "type": "object", "required": ["volume"], "additionalProperties": True, "properties": { "volume": volume } } } }, 'flavorRef': { "type": "object", "required": ["resize"], "additionalProperties": True, "properties": { "resize": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "flavorRef": flavorref } } } } }, "restart": { "type": "object", "required": ["restart"], "additionalProperties": True, "properties": { "restart": { "type": "object" } } } } } mgmt_cluster = { "action": { 'reset-task': { "type": "object", "required": ["reset-task"], "additionalProperties": True, "properties": { "reset-task": { "type": "object" } } } } } mgmt_instance = { "action": { 'migrate': { "type": "object", "required": ["migrate"], "additionalProperties": True, "properties": { "migrate": { "type": "object" } } }, "reboot": { "type": "object", "required": ["reboot"], "additionalProperties": True, "properties": { "reboot": { "type": "object" } } }, "stop": { "type": "object", "required": ["stop"], "additionalProperties": True, "properties": { "stop": { "type": "object" } } } } } user = { "create": { "name": "users:create", "type": "object", "required": ["users"], "properties": { "users": users_list } }, "update_all": { "users": { "type": "object", "required": ["users"], "additionalProperties": True, "properties": { "users": users_list } }, "databases": databases_ref }, "update": { "type": "object", "required": ["user"], "additionalProperties": True, "properties": { "user": user_attributes } } } dbschema = { "create": { "type": "object", "required": ["databases"], "additionalProperties": True, "properties": { "databases": databases_def } } } backup = { "create": { "name": "backup:create", "type": "object", "required": ["backup"], "properties": { "backup": { "type": "object", "required": ["instance", "name"], "properties": { "description": non_empty_string, "instance": uuid, "name": non_empty_string, "parent_id": uuid } } } } } guest_log = { "action": { "name": "guest_log:action", "type": "object", "required": ["name"], "properties": { "name": non_empty_string, "enable": boolean_string, "disable": boolean_string, "publish": boolean_string, "discard": boolean_string } } } module_contents = { "type": "string", "minLength": 1, "maxLength": 16777215, "pattern": "^.*.+.*$" } module = { "create": { "name": "module:create", "type": "object", "required": ["module"], "properties": { "module": { "type": "object", "required": ["name", "module_type", "contents"], "additionalProperties": True, "properties": { "name": non_empty_string, "module_type": non_empty_string, "contents": module_contents, "description": non_empty_string, "datastore": { "type": "object", "properties": { "type": non_empty_string, "version": non_empty_string } }, "auto_apply": boolean_string, "all_tenants": boolean_string, "visible": boolean_string, "live_update": boolean_string, } } } }, "update": { "name": "module:update", "type": "object", "required": ["module"], "properties": { "module": { "type": "object", "required": [], "additionalProperties": True, "properties": { "name": non_empty_string, "type": non_empty_string, "contents": module_contents, "description": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "auto_apply": boolean_string, "all_tenants": boolean_string, "visible": boolean_string, "live_update": boolean_string, } } } }, "apply": { "name": "module:apply", "type": "object", "required": ["modules"], "properties": { "modules": module_list, } }, "list": { "name": "module:list", "type": "object", "required": [], "properties": { "module": uuid, "from_guest": boolean_string, "include_contents": boolean_string } }, } configuration = { "create": { "name": "configuration:create", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": ["values", "name"], "properties": { "description": non_empty_string, "values": { "type": "object", }, "name": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } } } } } }, "update": { "name": "configuration:update", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": [], "properties": { "description": non_empty_string, "values": { "type": "object", }, "name": non_empty_string } } } }, "edit": { "name": "configuration:edit", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": [], "properties": { "values": { "type": "object", } } } } } } mgmt_configuration = { "create": { "name": "configuration_parameter:create", "type": "object", "required": ["configuration-parameter"], "properties": { "configuration-parameter": { "type": "object", "required": ["name", "restart_required", "data_type"], "properties": { "name": configuration_non_empty_string, "data_type": configuration_data_types, "restart_required": boolean_string, "max": configuration_integer_size, "min": configuration_integer_size, } } } }, "update": { "name": "configuration_parameter:update", "type": "object", "required": ["configuration-parameter"], "properties": { "configuration-parameter": { "type": "object", "required": ["name", "restart_required", "data_type"], "properties": { "name": configuration_non_empty_string, "data_type": configuration_data_types, "restart_required": boolean_string, "max": configuration_integer_size, "min": configuration_integer_size, } } } }, } account = { 'create': { "type": "object", "name": "users", "required": ["users"], "additionalProperties": True, "properties": { "users": users_list } } } upgrade = { "create": { "type": "object", "required": ["upgrade"], "additionalProperties": True, "properties": { "upgrade": { "type": "object", "required": [], "additionalProperties": True, "properties": { "instance_version": non_empty_string, "location": non_empty_string, "metadata": {} } } } } } package_list = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "string", "minLength": 1, "maxLength": 255, "pattern": "^.*[0-9a-zA-Z]+.*$" } } mgmt_datastore_version = { "create": { "name": "mgmt_datastore_version:create", "type": "object", "required": ["version"], "properties": { "version": { "type": "object", "required": ["name", "datastore_name", "image", "active"], "additionalProperties": True, "properties": { "name": non_empty_string, "datastore_name": non_empty_string, "datastore_manager": non_empty_string, "packages": package_list, "image": uuid, "active": {"enum": [True, False]}, "default": {"enum": [True, False]} } } } }, "edit": { "name": "mgmt_datastore_version:edit", "type": "object", "required": [], "additionalProperties": True, "properties": { "datastore_manager": non_empty_string, "packages": package_list, "image": uuid, "active": {"enum": [True, False]}, "default": {"enum": [True, False]}, } } } trove-5.0.0/trove/common/base_wsgi.py0000664000567000056710000006631412701410320020760 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" from __future__ import print_function import eventlet eventlet.patcher.monkey_patch(all=False, socket=True) import datetime import errno import socket import sys import time import eventlet.wsgi from oslo_config import cfg from oslo_log import log as logging from oslo_log import loggers from oslo_serialization import jsonutils from oslo_service import service from oslo_service import sslutils import routes import routes.middleware import webob.dec import webob.exc from xml.dom import minidom from xml.parsers import expat from trove.common import base_exception from trove.common.i18n import _ from trove.common import xmlutils socket_opts = [ cfg.IntOpt('backlog', default=4096, help="Number of backlog requests to configure the socket with"), cfg.IntOpt('tcp_keepidle', default=600, help="Sets the value of TCP_KEEPIDLE in seconds for each " "server socket. Not supported on OS X."), ] CONF = cfg.CONF CONF.register_opts(socket_opts) LOG = logging.getLogger(__name__) def run_server(application, port, **kwargs): """Run a WSGI server with the given application.""" sock = eventlet.listen(('0.0.0.0', port)) eventlet.wsgi.server(sock, application, **kwargs) class Service(service.Service): """ Provides a Service API for wsgi servers. This gives us the ability to launch wsgi servers with the Launcher classes in oslo_service.service.py. """ def __init__(self, application, port, host='0.0.0.0', backlog=4096, threads=1000): self.application = application self._port = port self._host = host self._backlog = backlog if backlog else CONF.backlog self._socket = self._get_socket(host, port, self._backlog) super(Service, self).__init__(threads) def _get_socket(self, host, port, backlog): # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] sock = None retry_until = time.time() + 30 while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=backlog, family=family) if sslutils.is_enabled(CONF): sock = sslutils.wrap(CONF, sock) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s " "after trying for 30 seconds") % {'host': host, 'port': port}) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) return sock def start(self): """Start serving this service using the provided server instance. :returns: None """ super(Service, self).start() self.tg.add_thread(self._run, self.application, self._socket) @property def backlog(self): return self._backlog @property def host(self): return self._socket.getsockname()[0] if self._socket else self._host @property def port(self): return self._socket.getsockname()[1] if self._socket else self._port def stop(self): """Stop serving this API. :returns: None """ super(Service, self).stop() def _run(self, application, socket): """Start a WSGI server in a new green thread.""" logger = logging.getLogger('eventlet.wsgi') eventlet.wsgi.server(socket, application, custom_pool=self.tg.pool, log=loggers.WritableLogger(logger)) class Middleware(object): """ Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ def __init__(self, application): self.application = application def process_request(self, req): """ Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) class Debug(Middleware): """ Helper class that can be inserted into any WSGI application chain to get information about the request and response. """ @webob.dec.wsgify def __call__(self, req): print(("*" * 40) + " REQUEST ENVIRON") for key, value in req.environ.items(): print(key, "=", value) print() resp = req.get_response(self.application) print(("*" * 40) + " RESPONSE HEADERS") for (key, value) in resp.headers.iteritems(): print(key, "=", value) print() resp.app_iter = self.print_generator(resp.app_iter) return resp @staticmethod def print_generator(app_iter): """ Iterator that prints the contents of a wrapper string iterator when iterated. """ print(("*" * 40) + " BODY") for part in app_iter: sys.stdout.write(part) sys.stdout.flush() yield part print() class Router(object): """ WSGI middleware that maps incoming requests to WSGI apps. """ def __init__(self, mapper): """ Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be a wsgi.Controller, who will route the request to the action method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, "/svrlist", controller=sc, action="list") # Actions are all implicitly defined mapper.resource("server", "servers", controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @webob.dec.wsgify def __call__(self, req): """ Route the incoming request to a controller based on self.map. If no match, return a 404. """ return self._router @staticmethod @webob.dec.wsgify def _dispatch(req): """ Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return webob.exc.HTTPNotFound() app = match['controller'] return app class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" default_request_content_types = ('application/json', 'application/xml') default_accept_types = ('application/json', 'application/xml') default_accept_type = 'application/json' def best_match_content_type(self, supported_content_types=None): """Determine the requested response content-type. Based on the query extension then the Accept header. Defaults to default_accept_type if we don't find a preference """ supported_content_types = (supported_content_types or self.default_accept_types) parts = self.path.rsplit('.', 1) if len(parts) > 1: ctype = 'application/{0}'.format(parts[1]) if ctype in supported_content_types: return ctype bm = self.accept.best_match(supported_content_types) return bm or self.default_accept_type def get_content_type(self, allowed_content_types=None): """Determine content type of the request body. Does not do any body introspection, only checks header """ if "Content-Type" not in self.headers: return None content_type = self.content_type allowed_content_types = (allowed_content_types or self.default_request_content_types) if content_type not in allowed_content_types: raise base_exception.InvalidContentType(content_type=content_type) return content_type class Resource(object): """ WSGI app that handles (de)serialization and controller dispatch. Reads routing information supplied by RoutesMiddleware and calls the requested action method upon its deserializer, controller, and serializer. Those three objects may implement any of the basic controller action methods (create, update, show, index, delete) along with any that may be specified in the api router. A 'default' method may also be implemented to be used in place of any non-implemented actions. Deserializer methods must accept a request argument and return a dictionary. Controller methods must accept a request argument. Additionally, they must also accept keyword arguments that represent the keys returned by the Deserializer. They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib :param deserializer: object that supports webob request deserialization through controller-like actions :param serializer: object that supports webob response serialization through controller-like actions """ self.controller = controller self.serializer = serializer or ResponseSerializer() self.deserializer = deserializer or RequestDeserializer() @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" try: action, action_args, accept = self.deserialize_request(request) except base_exception.InvalidContentType: msg = _("Unsupported Content-Type") return webob.exc.HTTPUnsupportedMediaType(explanation=msg) except base_exception.MalformedRequestBody: msg = _("Malformed request body") return webob.exc.HTTPBadRequest(explanation=msg) action_result = self.execute_action(action, request, **action_args) try: return self.serialize_response(action, action_result, accept) # return unserializable result (typically a webob exc) except Exception: return action_result def deserialize_request(self, request): return self.deserializer.deserialize(request) def serialize_response(self, action, action_result, accept): return self.serializer.serialize(action_result, accept, action) def execute_action(self, action, request, **action_args): return self.dispatch(self.controller, action, request, **action_args) def dispatch(self, obj, action, *args, **kwargs): """Find action-specific method on self and call it.""" try: method = getattr(obj, action) except AttributeError: method = getattr(obj, 'default') return method(*args, **kwargs) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): def sanitizer(obj): if isinstance(obj, datetime.datetime): _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) return _dtime.isoformat() return obj # return six.text_type(obj) return jsonutils.dumps(data, default=sanitizer) class XMLDictSerializer(DictSerializer): def __init__(self, metadata=None, xmlns=None): """ :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} self.xmlns = xmlns def default(self, data): # We expect data to contain a single key which is the XML root. root_key = list(data.keys())[0] doc = minidom.Document() node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) return self.to_xml_string(node) def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) return node.toprettyxml(indent=' ', encoding='UTF-8') # NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking def _add_xmlns(self, node, has_atom=False): if self.xmlns is not None: node.setAttribute('xmlns', self.xmlns) if has_atom: node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") def _to_xml_node(self, doc, metadata, nodename, data): """Recursive method to convert data members to XML nodes.""" result = doc.createElement(nodename) # Set the xml namespace if one is specified # TODO(justinsb): We could also use prefixes on the keys xmlns = metadata.get('xmlns', None) if xmlns: result.setAttribute('xmlns', xmlns) # TODO(bcwaldon): accomplish this without a type-check if type(data) is list: collections = metadata.get('list_collections', {}) if nodename in collections: metadata = collections[nodename] for item in data: node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(item)) result.appendChild(node) return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): singular = nodename[:-1] else: singular = 'item' for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) # TODO(bcwaldon): accomplish this without a type-check elif type(data) is dict: collections = metadata.get('dict_collections', {}) if nodename in collections: metadata = collections[nodename] for k, v in data.items(): node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(k)) text = doc.createTextNode(str(v)) node.appendChild(text) result.appendChild(node) return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in data.items(): if k in attrs: result.setAttribute(k, str(v)) else: node = self._to_xml_node(doc, metadata, k, v) result.appendChild(node) else: # Type is atom node = doc.createTextNode(str(data)) result.appendChild(node) return result def _create_link_nodes(self, xml_doc, links): link_nodes = [] for link in links: link_node = xml_doc.createElement('atom:link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) if 'type' in link: link_node.setAttribute('type', link['type']) link_nodes.append(link_node) return link_nodes class ResponseHeadersSerializer(ActionDispatcher): """Default response headers serialization.""" def serialize(self, response, data, action): self.dispatch(response, data, action=action) def default(self, response, data): response.status_int = 200 class ResponseSerializer(object): """Encode the necessary pieces into a response object.""" def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/xml': XMLDictSerializer(), 'application/json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) self.headers_serializer = (headers_serializer or ResponseHeadersSerializer()) def serialize(self, response_data, content_type, action='default'): """Serialize a dict into a string and wrap in a wsgi.Request object. :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body """ response = webob.Response() self.serialize_headers(response, response_data, action) self.serialize_body(response, response_data, content_type, action) return response def serialize_headers(self, response, data, action): self.headers_serializer.serialize(response, data, action) def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: return self.body_serializers[content_type] except (KeyError, TypeError): raise base_exception.InvalidContentType(content_type=content_type) class RequestHeadersDeserializer(ActionDispatcher): """Default request headers deserializer""" def deserialize(self, request, action): return self.dispatch(request, action=action) def default(self, request): return {} class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" def __init__(self, body_deserializers=None, headers_deserializer=None, supported_content_types=None): self.supported_content_types = supported_content_types self.body_deserializers = { 'application/xml': XMLDeserializer(), 'application/json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) self.headers_deserializer = (headers_deserializer or RequestHeadersDeserializer()) def deserialize(self, request): """Extract necessary pieces of the request. :param request: Request object :returns: tuple of (expected controller action name, dictionary of keyword arguments to pass to the controller, the expected content type of the response) """ action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) action_args.update(self.deserialize_headers(request, action)) action_args.update(self.deserialize_body(request, action)) accept = self.get_expected_content_type(request) return (action, action_args, accept) def deserialize_headers(self, request, action): return self.headers_deserializer.deserialize(request, action) def deserialize_body(self, request, action): if not len(request.body) > 0: LOG.debug("Empty body provided in request") return {} try: content_type = request.get_content_type() except base_exception.InvalidContentType: LOG.debug("Unrecognized Content-Type provided in request") raise if content_type is None: LOG.debug("No Content-Type provided in request") return {} try: deserializer = self.get_body_deserializer(content_type) except base_exception.InvalidContentType: LOG.debug("Unable to deserialize body as provided Content-Type") raise return deserializer.deserialize(request.body, action) def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] except (KeyError, TypeError): raise base_exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): return request.best_match_content_type(self.supported_content_types) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise base_exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class XMLDeserializer(TextDeserializer): def __init__(self, metadata=None): """ :param metadata: information needed to deserialize xml into a dictionary. """ super(XMLDeserializer, self).__init__() self.metadata = metadata or {} def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) try: node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} except expat.ExpatError: msg = _("cannot understand XML") raise base_exception.MalformedRequestBody(reason=msg) def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. :param listnames: list of XML node names whose subnodes should be considered list items. """ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: return node.childNodes[0].nodeValue elif node.nodeName in listnames: return [self._from_xml_node(n, listnames) for n in node.childNodes] else: result = dict() for attr in node.attributes.keys(): result[attr] = node.attributes[attr].nodeValue for child in node.childNodes: if child.nodeType != node.TEXT_NODE: result[child.nodeName] = self._from_xml_node(child, listnames) return result def find_first_child_named(self, parent, name): """Search a nodes children for the first child with a given name.""" for node in parent.childNodes: if node.nodeName == name: return node return None def find_children_named(self, parent, name): """Return all of a nodes children who have the given name.""" for node in parent.childNodes: if node.nodeName == name: yield node def extract_text(self, node): """Get the text field contained by the given node.""" if len(node.childNodes) == 1: child = node.childNodes[0] if child.nodeType == child.TEXT_NODE: return child.nodeValue return "" def default(self, datastring): return {'body': self._from_xml(datastring)} trove-5.0.0/trove/common/crypto_utils.py0000664000567000056710000000355712701410320021555 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Encryption/decryption handling from Crypto.Cipher import AES from Crypto import Random import hashlib from trove.common import stream_codecs IV_BIT_COUNT = 16 def encode_data(data): return stream_codecs.Base64Codec().serialize(data) def decode_data(data): return stream_codecs.Base64Codec().deserialize(data) # Pad the data string to an multiple of pad_size def pad_for_encryption(data, pad_size=IV_BIT_COUNT): pad_count = pad_size - (len(data) % pad_size) return data + chr(pad_count) * pad_count # Unpad the data string by stripping off excess characters def unpad_after_decryption(data): return data[:len(data) - ord(data[-1])] def encrypt_data(data, key, iv_bit_count=IV_BIT_COUNT): md5_key = hashlib.md5(key).hexdigest() iv = encode_data(Random.new().read(iv_bit_count))[:iv_bit_count] aes = AES.new(md5_key, AES.MODE_CBC, iv) data = pad_for_encryption(data, iv_bit_count) encrypted = aes.encrypt(data) return iv + encrypted def decrypt_data(data, key, iv_bit_count=IV_BIT_COUNT): md5_key = hashlib.md5(key).hexdigest() iv = data[:iv_bit_count] aes = AES.new(md5_key, AES.MODE_CBC, bytes(iv)) decrypted = aes.decrypt(bytes(data[iv_bit_count:])) return unpad_after_decryption(decrypted) trove-5.0.0/trove/common/profile.py0000664000567000056710000000345612701410316020460 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_context import context from oslo_log import log as logging import oslo_messaging as messaging from osprofiler import notifier from osprofiler import web from trove.common import cfg from trove.common import i18n from trove import rpc _LW = i18n._LW LOG = logging.getLogger(__name__) CONF = cfg.CONF def setup_profiler(binary, host): if CONF.profiler.enabled: _notifier = notifier.create( "Messaging", messaging, context.get_admin_context().to_dict(), rpc.TRANSPORT, "trove", binary, host) notifier.set(_notifier) web.enable(CONF.profiler.hmac_keys) LOG.warning(_LW("The OpenStack Profiler is enabled. Using one" " of the hmac_keys specified in the trove.conf file " "(typically in /etc/trove), a trace can be made of " "all requests. Only an admin user can retrieve " "the trace information, however.\n" "To disable the profiler, add the following to the " "configuration file:\n" "[profiler]\n" "enabled=false")) else: web.disable() trove-5.0.0/trove/common/utils.py0000664000567000056710000002336612701410316020162 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """I totally stole most of this from melange, thx guys!!!""" import collections import datetime import inspect import os import shutil import time import types import uuid from eventlet.timeout import Timeout import jinja2 from oslo_concurrency import processutils from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from passlib import utils as passlib_utils import six.moves.urllib.parse as urlparse from trove.common import cfg from trove.common import exception from trove.common.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) import_class = importutils.import_class import_object = importutils.import_object import_module = importutils.import_module bool_from_string = strutils.bool_from_string execute = processutils.execute isotime = timeutils.isotime def build_jinja_environment(): env = jinja2.Environment(loader=jinja2.ChoiceLoader([ jinja2.FileSystemLoader(CONF.template_path), jinja2.PackageLoader("trove", "templates") ])) # Add some basic operation not built-in. env.globals['max'] = max env.globals['min'] = min return env ENV = build_jinja_environment() def pagination_limit(limit, default_limit): limit = int(limit or default_limit) return min(limit, default_limit) def create_method_args_string(*args, **kwargs): """Returns a string representation of args and keyword args. I.e. for args=1,2,3 and kwargs={'a':4, 'b':5} you'd get: "1,2,3,a=4,b=5" """ # While %s turns a var into a string but in some rare cases explicit # repr() is less likely to raise an exception. arg_strs = [repr(arg) for arg in args] arg_strs += ['%s=%s' % (repr(key), repr(value)) for (key, value) in kwargs.items()] return ', '.join(arg_strs) def stringify_keys(dictionary): if dictionary is None: return None return {str(key): value for key, value in dictionary.iteritems()} def exclude(key_values, *exclude_keys): if key_values is None: return None return {key: value for key, value in key_values.iteritems() if key not in exclude_keys} def generate_uuid(): return str(uuid.uuid4()) def utcnow(): return datetime.datetime.utcnow() def raise_if_process_errored(process, exception): try: err = process.stderr.read() if err: raise exception(err) except OSError: pass def clean_out(folder): for root, dirs, files in os.walk(folder): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) class cached_property(object): """A decorator that converts a function into a lazy property. Taken from : https://github.com/nshah/python-memoize The function wrapped is called the first time to retrieve the result and than that calculated result is used the next time you access the value: class Foo(object): @cached_property def bar(self): # calculate something important here return 42 """ def __init__(self, func, name=None, doc=None): self.func = func self.__name__ = name or func.__name__ self.__doc__ = doc or func.__doc__ def __get__(self, obj, owner): if obj is None: return self value = self.func(obj) setattr(obj, self.__name__, value) return value class MethodInspector(object): def __init__(self, func): self._func = func @cached_property def required_args(self): return self.args[0:self.required_args_count] @cached_property def optional_args(self): keys = self.args[self.required_args_count: len(self.args)] return zip(keys, self.defaults) @cached_property def defaults(self): return self.argspec.defaults or () @cached_property def required_args_count(self): return len(self.args) - len(self.defaults) @cached_property def args(self): args = self.argspec.args if inspect.ismethod(self._func): args.pop(0) return args @cached_property def argspec(self): return inspect.getargspec(self._func) def __str__(self): optionals = ["[{0}=<{0}>]".format(k) for k, v in self.optional_args] required = ["{0}=<{0}>".format(arg) for arg in self.required_args] args_str = ' '.join(required + optionals) return "%s %s" % (self._func.__name__, args_str) def build_polling_task(retriever, condition=lambda value: value, sleep_time=1, time_out=None): start_time = time.time() def poll_and_check(): obj = retriever() if condition(obj): raise loopingcall.LoopingCallDone(retvalue=obj) if time_out is not None and time.time() - start_time > time_out: raise exception.PollTimeOut return loopingcall.FixedIntervalLoopingCall( f=poll_and_check).start(sleep_time, initial_delay=False) def poll_until(retriever, condition=lambda value: value, sleep_time=1, time_out=None): """Retrieves object until it passes condition, then returns it. If time_out_limit is passed in, PollTimeOut will be raised once that amount of time is eclipsed. """ return build_polling_task(retriever, condition=condition, sleep_time=sleep_time, time_out=time_out).wait() # Copied from nova.api.openstack.common in the old code. def get_id_from_href(href): """Return the id or uuid portion of a url. Given: 'http://www.foo.com/bar/123?q=4' Returns: '123' Given: 'http://www.foo.com/bar/abc123?q=4' Returns: 'abc123' """ return urlparse.urlsplit("%s" % href).path.split('/')[-1] def execute_with_timeout(*args, **kwargs): time = kwargs.pop('timeout', 30) log_output_on_error = kwargs.pop('log_output_on_error', False) timeout = Timeout(time) try: return execute(*args, **kwargs) except exception.ProcessExecutionError as e: if log_output_on_error: LOG.error( _("Command '%(cmd)s' failed. %(description)s " "Exit code: %(exit_code)s\nstderr: %(stderr)s\n" "stdout: %(stdout)s") % {'cmd': e.cmd, 'description': e.description or '', 'exit_code': e.exit_code, 'stderr': e.stderr, 'stdout': e.stdout}) raise except Timeout as t: if t is not timeout: LOG.error(_("Got a timeout but not the one expected.")) raise else: msg = (_("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s.") % {'time': time, 'args': args, 'kwargs': kwargs}) LOG.error(msg) raise exception.ProcessExecutionError(msg) finally: timeout.cancel() def correct_id_with_req(id, request): # Due to a shortcoming with the way Trove uses routes.mapper, # URL entities right of the last slash that contain at least # one . are routed to our service without that suffix, as # it was interpreted as a filetype This method looks at the # request, and if applicable, reattaches the suffix to the id. routing_args = request.environ.get('wsgiorg.routing_args', []) for routing_arg in routing_args: try: found = routing_arg.get('format', '') if found and found not in CONF.expected_filetype_suffixes: return "%s.%s" % (id, found) except (AttributeError, KeyError): # Not the relevant routing_args entry. pass return id def generate_random_password(password_length=CONF.default_password_length): return passlib_utils.generate_password(size=password_length) def try_recover(func): def _decorator(*args, **kwargs): recover_func = kwargs.pop("recover_func", None) try: func(*args, **kwargs) except Exception: if recover_func is not None: recover_func(func) else: LOG.debug("No recovery method defined for %(func)s" % { 'func': func.__name__}) raise return _decorator def gen_ports(portstr): from_port, sep, to_port = portstr.partition('-') if not (to_port and from_port): if not sep: to_port = from_port if int(from_port) > int(to_port): raise ValueError return from_port, to_port def unpack_singleton(container): """Unpack singleton collections. Check whether a given collection is a singleton (has exactly one element) and unpack it if that is the case. Return the original collection otherwise. """ if is_collection(container) and len(container) == 1: return unpack_singleton(container[0]) return container def is_collection(item): """Return True is a given item is an iterable collection, but not a string. """ return (isinstance(item, collections.Iterable) and not isinstance(item, types.StringTypes)) trove-5.0.0/trove/common/__init__.py0000664000567000056710000000000012701410316020535 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/stream_codecs.py0000664000567000056710000003137312701410316021632 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import ast import base64 import csv import json import re import six import yaml from ConfigParser import SafeConfigParser from trove.common import utils as trove_utils class StringConverter(object): """A passthrough string-to-object converter. """ def __init__(self, object_mappings): """ :param object_mappings: string-to-object mappings :type object_mappings: dict """ self._object_mappings = object_mappings def to_strings(self, items): """Recursively convert collection items to strings. :returns: Copy of the input collection with all items converted. """ if trove_utils.is_collection(items): return map(self.to_strings, items) return self._to_string(items) def to_objects(self, items): """Recursively convert collection string to objects. :returns: Copy of the input collection with all items converted. """ if trove_utils.is_collection(items): return map(self.to_objects, items) return self._to_object(items) def _to_string(self, value): for k, v in self._object_mappings.items(): if v is value: return k return str(value) def _to_object(self, value): # Return known mappings and quoted strings right away. if value in self._object_mappings: return self._object_mappings[value] elif (isinstance(value, basestring) and re.match("^'(.*)'|\"(.*)\"$", value)): return value try: return ast.literal_eval(value) except Exception: return value @six.add_metaclass(abc.ABCMeta) class StreamCodec(object): @abc.abstractmethod def serialize(self, data): """Serialize a Python object into a stream. """ @abc.abstractmethod def deserialize(self, stream): """Deserialize stream data into a Python structure. """ class IdentityCodec(StreamCodec): """ A basic passthrough codec. Does not modify the data in any way. """ def serialize(self, data): return data def deserialize(self, stream): return stream class YamlCodec(StreamCodec): """ Read/write data from/into a YAML config file. a: 1 b: {c: 3, d: 4} ... The above file content (flow-style) would be represented as: {'a': 1, 'b': {'c': 3, 'd': 4,} ... } """ def __init__(self, default_flow_style=False): """ :param default_flow_style: Use flow-style (inline) formatting of nested collections. :type default_flow_style: boolean """ self._default_flow_style = default_flow_style def serialize(self, dict_data): return yaml.dump(dict_data, Dumper=self.dumper, default_flow_style=self._default_flow_style) def deserialize(self, stream): return yaml.load(stream, Loader=self.loader) @property def loader(self): return yaml.loader.Loader @property def dumper(self): return yaml.dumper.Dumper class SafeYamlCodec(YamlCodec): """ Same as YamlCodec except that it uses safe Loader and Dumper which encode Unicode strings and produce only basic YAML tags. """ def __init__(self, default_flow_style=False): super(SafeYamlCodec, self).__init__( default_flow_style=default_flow_style) @property def loader(self): return yaml.loader.SafeLoader @property def dumper(self): return yaml.dumper.SafeDumper class IniCodec(StreamCodec): """ Read/write data from/into an ini-style config file. [section_1] key = value key = value ... [section_2] key = value key = value ... The above file content would be represented as: {'section_1': {'key': 'value', 'key': 'value', ...}, 'section_2': {'key': 'value', 'key': 'value', ...} ... } """ def __init__(self, default_value=None, comment_markers=('#', ';')): """ :param default_value: Default value for keys with no value. If set, all keys are written as 'key = value'. The key is written without trailing '=' if None. :type default_value: string """ self._value_converter = StringConverter({default_value: None}) self._default_value = default_value self._comment_markers = comment_markers def serialize(self, dict_data): parser = self._init_config_parser(dict_data) output = six.StringIO() parser.write(output) return output.getvalue() def deserialize(self, stream): parser = self._init_config_parser() parser.readfp(self._pre_parse(stream)) return {s: {k: self._value_converter.to_strings(v) for k, v in parser.items(s, raw=True)} for s in parser.sections()} def _pre_parse(self, stream): buf = six.StringIO() for line in six.StringIO(stream): # Ignore commented lines. if not line.startswith(self._comment_markers): # Strip leading and trailing whitespaces from each line. buf.write(line.strip() + '\n') # Rewind the output buffer. buf.flush() buf.seek(0) return buf def _init_config_parser(self, sections=None): parser = SafeConfigParser(allow_no_value=True) if sections: for section in sections: parser.add_section(section) for key, value in sections[section].items(): parser.set(section, key, self._value_converter.to_strings(value)) return parser class PropertiesCodec(StreamCodec): """ Read/write data from/into a property-style config file. key1 k1arg1 k1arg2 ... k1argN key2 k2arg1 k2arg2 ... k2argN key3 k3arg1 k3arg2 ... key3 k3arg3 k3arg4 ... ... The above file content would be represented as: {'key1': [k1arg1, k1arg2 ... k1argN], 'key2': [k2arg1, k2arg2 ... k2argN] 'key3': [[k3arg1, k3arg2, ...], [k3arg3, k3arg4, ...]] ... } """ QUOTING_MODE = csv.QUOTE_MINIMAL STRICT_MODE = False SKIP_INIT_SPACE = True def __init__(self, delimiter=' ', comment_markers=('#'), unpack_singletons=True, string_mappings=None): """ :param delimiter: A one-character used to separate fields. :type delimiter: string :param empty_value: Value to represent None in the output. :type empty_value: object :param comment_markers: List of comment markers. :type comment_markers: list :param unpack_singletons: Whether to unpack singleton collections (collections with only a single value). :type unpack_singletons: boolean :param string_mappings: User-defined string representations of Python objects. :type string_mappings: dict """ self._delimiter = delimiter self._comment_markers = comment_markers self._string_converter = StringConverter(string_mappings or {}) self._unpack_singletons = unpack_singletons def serialize(self, dict_data): output = six.StringIO() writer = csv.writer(output, delimiter=self._delimiter, quoting=self.QUOTING_MODE, strict=self.STRICT_MODE, skipinitialspace=self.SKIP_INIT_SPACE) for key, value in dict_data.items(): writer.writerows(self._to_rows(key, value)) return output.getvalue() def deserialize(self, stream): reader = csv.reader(six.StringIO(stream), delimiter=self._delimiter, quoting=self.QUOTING_MODE, strict=self.STRICT_MODE, skipinitialspace=self.SKIP_INIT_SPACE) return self._to_dict(reader) def _to_dict(self, reader): data_dict = {} for row in reader: if row: key = row[0].strip() # Ignore comment lines. if not key.strip().startswith(self._comment_markers): items = self._string_converter.to_objects( [v if v else None for v in map(self._strip_comments, row[1:])]) current = data_dict.get(key) if current is not None: current.append(trove_utils.unpack_singleton(items) if self._unpack_singletons else items) else: data_dict.update({key: [items]}) if self._unpack_singletons: # Unpack singleton values. for k, v in data_dict.items(): data_dict.update({k: trove_utils.unpack_singleton(v)}) return data_dict def _strip_comments(self, value): # Strip in-line comments. for marker in self._comment_markers: value = value.split(marker)[0] return value.strip() def _to_rows(self, header, items): rows = [] if trove_utils.is_collection(items): if any(trove_utils.is_collection(item) for item in items): # This is multi-row property. for item in items: rows.extend(self._to_rows(header, item)) else: # This is a single-row property with multiple arguments. rows.append(self._to_list( header, self._string_converter.to_strings(items))) else: # This is a single-row property with only one argument. rows.append( self._string_converter.to_strings( self._to_list(header, items))) return rows def _to_list(self, *items): container = [] for item in items: if trove_utils.is_collection(item): # This item is a nested collection - unpack it. container.extend(self._to_list(*item)) else: # This item is not a collection - append it to the list. container.append(item) return container class KeyValueCodec(PropertiesCodec): """ Read/write data from/into a simple key=value file. key1=value1 key2=value2 key3=value3 ... The above file content would be represented as: {'key1': 'value1', 'key2': 'value2', 'key3': 'value3', ... } """ def __init__(self, delimiter='=', comment_markers=('#'), unpack_singletons=True, string_mappings=None): super(KeyValueCodec, self).__init__( delimiter=delimiter, comment_markers=comment_markers, unpack_singletons=unpack_singletons, string_mappings=string_mappings) class JsonCodec(StreamCodec): def serialize(self, dict_data): return json.dumps(dict_data) def deserialize(self, stream): return json.load(six.StringIO(stream)) class Base64Codec(StreamCodec): """Serialize (encode) and deserialize (decode) using the base64 codec. To read binary data from a file and b64encode it, used the decode=False flag on operating_system's read calls. Use encode=False to decode binary data before writing to a file as well. """ def serialize(self, data): try: # py27str - if we've got text data, this should encode it # py27aa/py34aa - if we've got a bytearray, this should work too encoded = str(base64.b64encode(data).decode('utf-8')) except TypeError: # py34str - convert to bytes first, then we can encode data_bytes = bytes([ord(item) for item in data]) encoded = base64.b64encode(data_bytes).decode('utf-8') return encoded def deserialize(self, stream): # py27 & py34 seem to understand bytearray the same return bytearray([item for item in base64.b64decode(stream)]) trove-5.0.0/trove/common/wsgi.py0000664000567000056710000005554512701410316017777 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wsgi helper utilities for trove""" import math import re import time import traceback import uuid import eventlet.wsgi import jsonschema from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import service import paste.urlmap import webob import webob.dec import webob.exc from trove.common import base_wsgi from trove.common import cfg from trove.common import context as rd_context from trove.common import exception from trove.common.i18n import _ from trove.common import pastedeploy from trove.common import utils CONTEXT_KEY = 'trove.context' Router = base_wsgi.Router Debug = base_wsgi.Debug Middleware = base_wsgi.Middleware JSONDictSerializer = base_wsgi.JSONDictSerializer RequestDeserializer = base_wsgi.RequestDeserializer CONF = cfg.CONF # Raise the default from 8192 to accommodate large tokens eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line eventlet.patcher.monkey_patch(all=False, socket=True) LOG = logging.getLogger('trove.common.wsgi') def versioned_urlmap(*args, **kwargs): urlmap = paste.urlmap.urlmap_factory(*args, **kwargs) return VersionedURLMap(urlmap) def launch(app_name, port, paste_config_file, data={}, host='0.0.0.0', backlog=128, threads=1000, workers=None): """Launches a wsgi server based on the passed in paste_config_file. Launch provides a easy way to create a paste app from the config file and launch it via the service launcher. It takes care of all of the plumbing. The only caveat is that the paste_config_file must be a file that paste.deploy can find and handle. There is a helper method in cfg.py that finds files. Example: conf_file = CONF.find_file(CONF.api_paste_config) launcher = wsgi.launch('myapp', CONF.bind_port, conf_file) launcher.wait() """ LOG.debug("Trove started on %s", host) app = pastedeploy.paste_deploy_app(paste_config_file, app_name, data) server = base_wsgi.Service(app, port, host=host, backlog=backlog, threads=threads) return service.launch(CONF, server, workers) # Note: taken from Nova def serializers(**serializers): """Attaches serializers to a method. This decorator associates a dictionary of serializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_serializers'): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator class TroveMiddleware(Middleware): # Note: taken from nova @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = nova.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import nova.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory class VersionedURLMap(object): def __init__(self, urlmap): self.urlmap = urlmap def __call__(self, environ, start_response): req = Request(environ) if req.url_version is None and req.accept_version is not None: version = "/v" + req.accept_version http_exc = webob.exc.HTTPNotAcceptable(_("version not supported")) app = self.urlmap.get(version, Fault(http_exc)) else: app = self.urlmap return app(environ, start_response) class Router(base_wsgi.Router): # Original router did not allow for serialization of the 404 error. # To fix this the _dispatch was modified to use Fault() objects. @staticmethod @webob.dec.wsgify def _dispatch(req): """ Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return Fault(webob.exc.HTTPNotFound()) app = match['controller'] return app class Request(base_wsgi.Request): @property def params(self): return utils.stringify_keys(super(Request, self).params) def best_match_content_type(self, supported_content_types=None): """Determine the most acceptable content-type. Based on the query extension then the Accept header. """ parts = self.path.rsplit('.', 1) if len(parts) > 1: format = parts[1] if format in ['json']: return 'application/{0}'.format(parts[1]) ctypes = { 'application/vnd.openstack.trove+json': "application/json", 'application/json': "application/json", } bm = self.accept.best_match(ctypes.keys()) return ctypes.get(bm, 'application/json') @utils.cached_property def accept_version(self): accept_header = self.headers.get('ACCEPT', "") accept_version_re = re.compile(".*?application/vnd.openstack.trove" "(\+.+?)?;" "version=(?P\d+\.?\d*)") match = accept_version_re.search(accept_header) return match.group("version_no") if match else None @utils.cached_property def url_version(self): versioned_url_re = re.compile("/v(?P\d+\.?\d*)") match = versioned_url_re.search(self.path) return match.group("version_no") if match else None class Result(object): """A result whose serialization is compatible with JSON.""" def __init__(self, data, status=200): self._data = data self.status = status def data(self, serialization_type): """Return an appropriate serialized type for the body. serialization_type is not used presently, but may be in the future, so it stays. """ if hasattr(self._data, "data_for_json"): return self._data.data_for_json() return self._data class Resource(base_wsgi.Resource): def __init__(self, controller, deserializer, serializer, exception_map=None): exception_map = exception_map or {} self.model_exception_map = self._invert_dict_list(exception_map) super(Resource, self).__init__(controller, deserializer, serializer) @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): return super(Resource, self).__call__(request) def execute_action(self, action, request, **action_args): if getattr(self.controller, action, None) is None: return Fault(webob.exc.HTTPNotFound()) try: self.controller.validate_request(action, action_args) result = super(Resource, self).execute_action( action, request, **action_args) if type(result) is dict: result = Result(result) return result except exception.TroveError as trove_error: LOG.debug(traceback.format_exc()) LOG.debug("Caught Trove Error %s", trove_error) httpError = self._get_http_error(trove_error) LOG.debug("Mapped Error to %s", httpError) return Fault(httpError(str(trove_error), request=request)) except webob.exc.HTTPError as http_error: LOG.debug(traceback.format_exc()) return Fault(http_error) except Exception as error: exception_uuid = str(uuid.uuid4()) LOG.exception(exception_uuid + ": " + str(error)) return Fault(webob.exc.HTTPInternalServerError( "Internal Server Error. Please keep this ID to help us " "figure out what went wrong: (%s)." % exception_uuid, request=request)) def _get_http_error(self, error): return self.model_exception_map.get(type(error), webob.exc.HTTPBadRequest) def _invert_dict_list(self, exception_dict): """Flattens values of keys and inverts keys and values. Example: {'x': [1, 2, 3], 'y': [4, 5, 6]} converted to {1: 'x', 2: 'x', 3: 'x', 4: 'y', 5: 'y', 6: 'y'} """ inverted_dict = {} for key, value_list in exception_dict.items(): for value in value_list: inverted_dict[value] = key return inverted_dict def serialize_response(self, action, action_result, accept): # If an exception is raised here in the base class, it is swallowed, # and the action_result is returned as-is. For us, that's bad news - # we never want that to happen except in the case of webob types. # So we override the behavior here so we can at least log it. try: return super(Resource, self).serialize_response( action, action_result, accept) except Exception: # execute_action either returns the results or a Fault object. # If action_result is not a Fault then there really was a # serialization error which we log. Otherwise return the Fault. if not isinstance(action_result, Fault): LOG.exception(_("Unserializable result detected.")) raise return action_result class Controller(object): """Base controller that creates a Resource with default serializers.""" exception_map = { webob.exc.HTTPUnprocessableEntity: [ exception.UnprocessableEntity, ], webob.exc.HTTPUnauthorized: [ exception.Forbidden, exception.SwiftAuthError, ], webob.exc.HTTPForbidden: [ exception.ReplicaSourceDeleteForbidden, exception.BackupTooLarge, exception.ModuleAccessForbidden, exception.ModuleAppliedToInstance, ], webob.exc.HTTPBadRequest: [ exception.InvalidModelError, exception.BadRequest, exception.CannotResizeToSameSize, exception.BadValue, exception.DatabaseAlreadyExists, exception.UserAlreadyExists, exception.LocalStorageNotSpecified, exception.ModuleAlreadyExists, ], webob.exc.HTTPNotFound: [ exception.NotFound, exception.ComputeInstanceNotFound, exception.ModelNotFoundError, exception.UserNotFound, exception.DatabaseNotFound, exception.QuotaResourceUnknown, exception.BackupFileNotFound, exception.ClusterNotFound, exception.DatastoreNotFound, exception.SwiftNotFound, exception.ModuleTypeNotFound, ], webob.exc.HTTPConflict: [ exception.BackupNotCompleteError, exception.RestoreBackupIntegrityError, ], webob.exc.HTTPRequestEntityTooLarge: [ exception.OverLimit, exception.QuotaExceeded, exception.VolumeQuotaExceeded, ], webob.exc.HTTPServerError: [ exception.VolumeCreationFailure, exception.UpdateGuestError, ], webob.exc.HTTPNotImplemented: [ exception.VolumeNotSupported, exception.LocalStorageNotSupported, exception.DatastoreOperationNotSupported, exception.ClusterInstanceOperationNotSupported, exception.ClusterDatastoreNotSupported ], } schemas = {} @classmethod def get_schema(cls, action, body): LOG.debug("Getting schema for %s:%s" % (cls.__class__.__name__, action)) if cls.schemas: matching_schema = cls.schemas.get(action, {}) if matching_schema: LOG.debug( "Found Schema: %s" % matching_schema.get("name", matching_schema)) return matching_schema @staticmethod def format_validation_msg(errors): # format path like object['field1'][i]['subfield2'] messages = [] for error in errors: path = list(error.path) f_path = "%s%s" % (path[0], ''.join(['[%r]' % i for i in path[1:]])) messages.append("%s %s" % (f_path, error.message)) for suberror in sorted(error.context, key=lambda e: e.schema_path): messages.append(suberror.message) error_msg = "; ".join(messages) return "Validation error: %s" % error_msg def validate_request(self, action, action_args): body = action_args.get('body', {}) schema = self.get_schema(action, body) if schema: validator = jsonschema.Draft4Validator(schema) if not validator.is_valid(body): errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_msg = self.format_validation_msg(errors) LOG.info(error_msg) raise exception.BadRequest(message=error_msg) def create_resource(self): return Resource( self, RequestDeserializer(), TroveResponseSerializer(), self.exception_map) def _extract_limits(self, params): return {key: params[key] for key in params.keys() if key in ["limit", "marker"]} class TroveResponseSerializer(base_wsgi.ResponseSerializer): def serialize_body(self, response, data, content_type, action): """Overrides body serialization in base_wsgi.ResponseSerializer. If the "data" argument is the Result class, its data method is called and *that* is passed to the superclass implementation instead of the actual data. """ if isinstance(data, Result): data = data.data(content_type) super(TroveResponseSerializer, self).serialize_body( response, data, content_type, action) def serialize_headers(self, response, data, action): super(TroveResponseSerializer, self).serialize_headers( response, data, action) if isinstance(data, Result): response.status = data.status class Fault(webob.exc.HTTPException): """Error codes for API faults.""" code_wrapper = { 400: webob.exc.HTTPBadRequest, 401: webob.exc.HTTPUnauthorized, 403: webob.exc.HTTPUnauthorized, 404: webob.exc.HTTPNotFound, } resp_codes = [int(code) for code in code_wrapper.keys()] def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception @staticmethod def _get_error_name(exc): # Displays a Red Dwarf specific error name instead of a webob exc name. named_exceptions = { 'HTTPBadRequest': 'badRequest', 'HTTPUnauthorized': 'unauthorized', 'HTTPForbidden': 'forbidden', 'HTTPNotFound': 'itemNotFound', 'HTTPMethodNotAllowed': 'badMethod', 'HTTPRequestEntityTooLarge': 'overLimit', 'HTTPUnsupportedMediaType': 'badMediaType', 'HTTPInternalServerError': 'instanceFault', 'HTTPNotImplemented': 'notImplemented', 'HTTPServiceUnavailable': 'serviceUnavailable', } name = exc.__class__.__name__ if name in named_exceptions: return named_exceptions[name] # If the exception isn't in our list, at least strip off the # HTTP from the name, and then drop the case on the first letter. name = name.split("HTTP").pop() name = name[:1].lower() + name[1:] return name @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. fault_name = Fault._get_error_name(self.wrapped_exc) fault_data = { fault_name: { 'code': self.wrapped_exc.status_int, } } if self.wrapped_exc.detail: fault_data[fault_name]['message'] = self.wrapped_exc.detail else: fault_data[fault_name]['message'] = self.wrapped_exc.explanation content_type = req.best_match_content_type() serializer = { 'application/json': base_wsgi.JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data, content_type) self.wrapped_exc.content_type = content_type return self.wrapped_exc class ContextMiddleware(base_wsgi.Middleware): def __init__(self, application): self.admin_roles = CONF.admin_roles super(ContextMiddleware, self).__init__(application) def _extract_limits(self, params): return {key: params[key] for key in params.keys() if key in ["limit", "marker"]} def process_request(self, request): service_catalog = None catalog_header = request.headers.get('X-Service-Catalog', None) if catalog_header: try: service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) tenant_id = request.headers.get('X-Tenant-Id', None) auth_token = request.headers["X-Auth-Token"] user_id = request.headers.get('X-User-ID', None) roles = request.headers.get('X-Role', '').split(',') is_admin = False for role in roles: if role.lower() in self.admin_roles: is_admin = True break limits = self._extract_limits(request.params) context = rd_context.TroveContext(auth_token=auth_token, tenant=tenant_id, user=user_id, is_admin=is_admin, limit=limits.get('limit'), marker=limits.get('marker'), service_catalog=service_catalog) request.environ[CONTEXT_KEY] = context @classmethod def factory(cls, global_config, **local_config): def _factory(app): LOG.debug("Created context middleware with config: %s" % local_config) return cls(app) return _factory class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): try: resp = req.get_response(self.application) if resp.status_int in Fault.resp_codes: for (header, value) in resp._headerlist: if header == "Content-Type" and \ value == "text/plain; charset=UTF-8": return Fault(Fault.code_wrapper[resp.status_int]()) return resp return resp except Exception as ex: LOG.exception(_("Caught error: %s."), unicode(ex)) exc = webob.exc.HTTPInternalServerError() return Fault(exc) @classmethod def factory(cls, global_config, **local_config): def _factory(app): return cls(app) return _factory # ported from Nova class OverLimitFault(webob.exc.HTTPException): """ Rate-limited request response. """ def __init__(self, message, details, retry_time): """ Initialize new `OverLimitFault` with relevant information. """ hdrs = OverLimitFault._retry_after(retry_time) self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) self.content = {"overLimit": {"code": self.wrapped_exc.status_int, "message": message, "details": details, "retryAfter": hdrs['Retry-After'], }, } @staticmethod def _retry_after(retry_time): delay = int(math.ceil(retry_time - time.time())) retry_after = delay if delay > 0 else 0 headers = {'Retry-After': '%d' % retry_after} return headers @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """ Return the wrapped exception with a serialized body conforming to our error format. """ content_type = request.best_match_content_type() serializer = {'application/json': JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) self.wrapped_exc.body = content self.wrapped_exc.content_type = content_type return self.wrapped_exc class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): return jsonutils.dumps(data) trove-5.0.0/trove/common/debug_utils.py0000664000567000056710000001265112701410316021323 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # """Help utilities for debugging""" import sys from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) CONF = cfg.CONF __debug_state = None pydev_debug_opts = [ cfg.StrOpt("pydev_debug", choices=("disabled", "enabled", "auto"), default="disabled", help="Enable or disable pydev remote debugging. " "If value is 'auto' tries to connect to remote " "debugger server, but in case of error " "continues running with debugging disabled."), cfg.StrOpt("pydev_debug_host", help="Pydev debug server host (localhost by default)."), cfg.PortOpt("pydev_debug_port", default=5678, help="Pydev debug server port (5678 by default)."), cfg.StrOpt("pydev_path", help="Set path to pydevd library, used if pydevd is " "not found in python sys.path.") ] CONF.register_opts(pydev_debug_opts) def setup(): """ Analyze configuration for pydev remote debugging and establish connection to remote debugger service if needed @return: True if remote debugging was enabled successfully, otherwise - False """ global __debug_state if CONF.pydev_debug == "enabled": __debug_state = __setup_remote_pydev_debug( pydev_debug_host=CONF.pydev_debug_host, pydev_debug_port=CONF.pydev_debug_port, pydev_path=CONF.pydev_path) elif CONF.pydev_debug == "auto": __debug_state = __setup_remote_pydev_debug_safe( pydev_debug_host=CONF.pydev_debug_host, pydev_debug_port=CONF.pydev_debug_port, pydev_path=CONF.pydev_path) else: __debug_state = False def enabled(): """ @return: True if connection to remote debugger established, otherwise False """ assert __debug_state is not None, ("debug_utils are not initialized. " "Please call setup() method first") # if __debug_state is set and we have monkey patched # eventlet.thread, issue a warning. # You can't safely use eventlet.is_monkey_patched() on the # threading module so you have to do this little dance. # Discovered after much head scratching, see also # # http://stackoverflow.com/questions/32452110/ # does-eventlet-do-monkey-patch-for-threading-module # # note multi-line URL if __debug_state: import threading if threading.current_thread.__module__ == 'eventlet.green.threading': LOG.warning(_("Enabling debugging with eventlet monkey" " patched produce unexpected behavior.")) return __debug_state def __setup_remote_pydev_debug_safe(pydev_debug_host=None, pydev_debug_port=5678, pydev_path=None): """ Safe version of __setup_remote_pydev_debug method. In error case returns False as result instead of Exception raising @see: __setup_remote_pydev_debug """ try: return __setup_remote_pydev_debug( pydev_debug_host=pydev_debug_host, pydev_debug_port=pydev_debug_port, pydev_path=pydev_path) except Exception as e: LOG.warning(_("Can't connect to remote debug server." " Continuing to work in standard mode." " Error: %s."), e) return False def __setup_remote_pydev_debug(pydev_debug_host=None, pydev_debug_port=None, pydev_path=None): """ Method connects to remote debug server, and attach current thread trace to debugger. Also thread.start_new_thread thread.start_new are patched to enable debugging of new threads @param pydev_debug_host: remote debug server host hame, 'localhost' if not specified or None @param pydev_debug_port: remote debug server port, 5678 if not specified or None @param pydev_path: optional path to pydevd library, used it pydevd is not found in python sys.path @return: True if debugging initialized, otherwise exception should be raised """ try: import pydevd LOG.debug("pydevd module was imported from system path") except ImportError: LOG.debug("Can't load pydevd module from system path. Try loading it " "from pydev_path: %s", pydev_path) assert pydev_path, "pydev_path is not set" if pydev_path not in sys.path: sys.path.append(pydev_path) import pydevd LOG.debug("pydevd module was imported from pydev_path: %s", pydev_path) pydevd.settrace( host=pydev_debug_host, port=pydev_debug_port, stdoutToServer=True, stderrToServer=True, trace_only_current_thread=False, suspend=False, ) return True trove-5.0.0/trove/common/local.py0000664000567000056710000000321512701410316020103 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Local storage of variables using weak references""" import threading import weakref class WeakLocal(threading.local): def __getattribute__(self, attr): rval = super(WeakLocal, self).__getattribute__(attr) if rval: # NOTE(mikal): this bit is confusing. What is stored is a weak # reference, not the value itself. We therefore need to lookup # the weak reference and return the inner value here. rval = rval() return rval def __setattr__(self, attr, value): value = weakref.ref(value) return super(WeakLocal, self).__setattr__(attr, value) # NOTE(mikal): the name "store" should be deprecated in the future store = WeakLocal() # A "weak" store uses weak references and allows an object to fall out of scope # when it falls out of scope in the code that uses the thread local storage. A # "strong" store will hold a reference to the object so that it never falls out # of scope. weak_store = WeakLocal() strong_store = threading.local() trove-5.0.0/trove/common/rpc/0000775000567000056710000000000012701410521017220 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/rpc/__init__.py0000664000567000056710000000000012701410316021321 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/rpc/service.py0000664000567000056710000000570712701410316021245 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import inspect import os from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import loopingcall from oslo_service import service from oslo_utils import importutils from osprofiler import profiler from trove.common import cfg from trove.common.i18n import _ from trove.common import profile from trove import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class RpcService(service.Service): def __init__(self, host=None, binary=None, topic=None, manager=None, rpc_api_version=None): super(RpcService, self).__init__() self.host = host or CONF.host self.binary = binary or os.path.basename(inspect.stack()[-1][1]) self.topic = topic or self.binary.rpartition('trove-')[2] _manager = importutils.import_object(manager) self.manager_impl = profiler.trace_cls("rpc")(_manager) self.rpc_api_version = rpc_api_version or \ self.manager_impl.RPC_API_VERSION profile.setup_profiler(self.binary, self.host) def start(self): LOG.debug("Creating RPC server for service %s", self.topic) target = messaging.Target(topic=self.topic, server=self.host, version=self.rpc_api_version) if not hasattr(self.manager_impl, 'target'): self.manager_impl.target = target endpoints = [self.manager_impl] self.rpcserver = rpc.get_server(target, endpoints) self.rpcserver.start() # TODO(hub-cap): Currently the context is none... do we _need_ it here? report_interval = CONF.report_interval if report_interval > 0: pulse = loopingcall.FixedIntervalLoopingCall( self.manager_impl.run_periodic_tasks, context=None) pulse.start(interval=report_interval, initial_delay=report_interval) pulse.wait() def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpcserver.stop() except Exception: LOG.info(_("Failed to stop RPC server before shutdown. ")) pass super(RpcService, self).stop() trove-5.0.0/trove/common/rpc/version.py0000664000567000056710000000153012701410316021260 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # based on configured release version RPC_API_VERSION = "1.0" # API version history: # # 1.0 - Initial version. (We started keeping track at icehouse-3) # 1.1 - # 1.2 - ... VERSION_ALIASES = { 'icehouse': '1.0' } trove-5.0.0/trove/common/base_exception.py0000664000567000056710000000633212701410316022004 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exceptions common to OpenStack projects """ from oslo_log import log as logging from trove.common.i18n import _ _FATAL_EXCEPTION_FORMAT_ERRORS = False LOG = logging.getLogger(__name__) class Error(Exception): def __init__(self, message=None): super(Error, self).__init__(message) class ApiError(Error): def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code super(ApiError, self).__init__('%s: %s' % (code, message)) class NotFound(Error): pass class UnknownScheme(Error): msg = "Unknown scheme '%s' found in URI" def __init__(self, scheme): msg = self.__class__.msg % scheme super(UnknownScheme, self).__init__(msg) class BadStoreUri(Error): msg = "The Store URI %s was malformed. Reason: %s" def __init__(self, uri, reason): msg = self.__class__.msg % (uri, reason) super(BadStoreUri, self).__init__(msg) class Duplicate(Error): pass class NotAuthorized(Error): pass class NotEmpty(Error): pass class Invalid(Error): pass class BadInputError(Exception): """Error resulting from a client sending bad input to a server""" pass class MissingArgumentError(Error): pass class DatabaseMigrationError(Error): pass class ClientConnectionError(Exception): """Error resulting from a client connecting to a server""" pass def wrap_exception(f): def _wrap(*args, **kw): try: return f(*args, **kw) except Exception as e: if not isinstance(e, Error): LOG.exception(_('Uncaught exception')) raise Error(str(e)) raise _wrap.func_name = f.func_name return _wrap class OpenstackException(Exception): """ Base Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = "An unknown exception occurred" def __init__(self, **kwargs): try: self._error_string = self.message % kwargs except Exception as e: if _FATAL_EXCEPTION_FORMAT_ERRORS: raise e else: # at least get the core message out if something happened self._error_string = self.message def __str__(self): return self._error_string class MalformedRequestBody(OpenstackException): message = "Malformed message body: %(reason)s" class InvalidContentType(OpenstackException): message = "Invalid content type %(content_type)s" trove-5.0.0/trove/common/i18n.py0000664000567000056710000000266412701410316017577 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html """ import oslo_i18n # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the # application name when this module is synced into the separate # repository. It is OK to have more than one translation function # using the same domain, since there will still only be one message # catalog. _translators = oslo_i18n.TranslatorFactory(domain='trove') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical trove-5.0.0/trove/common/configurations.py0000664000567000056710000000435412701410316022050 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import stream_codecs class RedisConfParser(object): CODEC = stream_codecs.PropertiesCodec() def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class MySQLConfParser(object): SERVER_CONF_SECTION = 'mysqld' CODEC = stream_codecs.IniCodec( default_value='1', comment_markers=('#', ';', '!')) def __init__(self, config): self.config = config def parse(self): config_dict = self.CODEC.deserialize(self.config) mysqld_section_dict = config_dict[self.SERVER_CONF_SECTION] return mysqld_section_dict.items() class MongoDBConfParser(object): CODEC = stream_codecs.SafeYamlCodec(default_flow_style=False) def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class PostgresqlConfParser(object): CODEC = stream_codecs.PropertiesCodec(delimiter='=') def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class CassandraConfParser(object): CODEC = stream_codecs.SafeYamlCodec(default_flow_style=False) def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class VerticaConfParser(object): CODEC = stream_codecs.PropertiesCodec(delimiter='=') def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() trove-5.0.0/trove/common/context.py0000664000567000056710000000516712701410316020505 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Simple class that stores security context information in the web request. Projects should subclass this class if they wish to enhance the request context or provide additional information in their specific WSGI pipeline. """ from oslo_context import context from oslo_log import log as logging from trove.common import local from trove.common.serializable_notification import SerializableNotification LOG = logging.getLogger(__name__) class TroveContext(context.RequestContext): """ Stores information about the security context under which the user accesses the system, as well as additional request information. """ def __init__(self, **kwargs): self.limit = kwargs.pop('limit', None) self.marker = kwargs.pop('marker', None) self.service_catalog = kwargs.pop('service_catalog', None) self.user_identity = kwargs.pop('user_identity', None) # TODO(esp): not sure we need this self.timeout = kwargs.pop('timeout', None) super(TroveContext, self).__init__(**kwargs) if not hasattr(local.store, 'context'): self.update_store() def to_dict(self): parent_dict = super(TroveContext, self).to_dict() parent_dict.update({'limit': self.limit, 'marker': self.marker, 'service_catalog': self.service_catalog }) if hasattr(self, 'notification'): serialized = SerializableNotification.serialize(self, self.notification) parent_dict['trove_notification'] = serialized return parent_dict def update_store(self): local.store.context = self @classmethod def from_dict(cls, values): n_values = values.pop('trove_notification', None) context = cls(**values) if n_values: context.notification = SerializableNotification.deserialize( context, n_values) return context trove-5.0.0/trove/common/extensions.py0000664000567000056710000005040712701410316021215 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from lxml import etree from oslo_log import log as logging import routes import six import stevedore import webob.dec import webob.exc from trove.common import base_exception as exception from trove.common import base_wsgi from trove.common import cfg from trove.common.i18n import _ from trove.common import wsgi LOG = logging.getLogger(__name__) CONF = cfg.CONF DEFAULT_XMLNS = "http://docs.openstack.org/trove" XMLNS_ATOM = "http://www.w3.org/2005/Atom" @six.add_metaclass(abc.ABCMeta) class ExtensionDescriptor(object): """Base class that defines the contract for extensions. Note that you don't have to derive from this class to have a valid extension; it is purely a convenience. """ @abc.abstractmethod def get_name(self): """The name of the extension. e.g. 'Fox In Socks' """ pass @abc.abstractmethod def get_alias(self): """The alias for the extension. e.g. 'FOXNSOX' """ pass @abc.abstractmethod def get_description(self): """Friendly description for the extension. e.g. 'The Fox In Socks Extension' """ pass @abc.abstractmethod def get_namespace(self): """The XML namespace for the extension. e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0' """ pass @abc.abstractmethod def get_updated(self): """The timestamp when the extension was last updated. e.g. '2011-01-22T13:25:27-06:00' """ pass def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_actions(self): """List of extensions.ActionExtension extension objects. Actions are verbs callable from the API. """ actions = [] return actions def get_request_extensions(self): """List of extensions.RequestException extension objects. Request extensions are used to handle custom request data. """ request_exts = [] return request_exts class ActionExtensionController(object): def __init__(self, application): self.application = application self.action_handlers = {} def add_action(self, action_name, handler): self.action_handlers[action_name] = handler def action(self, req, id, body): for action_name, handler in self.action_handlers.iteritems(): if action_name in body: return handler(body, req, id) # no action handler found (bump to downstream application) res = self.application return res class ActionExtensionResource(wsgi.Resource): def __init__(self, application): controller = ActionExtensionController(application) wsgi.Resource.__init__(self, controller) def add_action(self, action_name, handler): self.controller.add_action(action_name, handler) class RequestExtensionController(object): def __init__(self, application): self.application = application self.handlers = [] def add_handler(self, handler): self.handlers.append(handler) def process(self, req, *args, **kwargs): res = req.get_response(self.application) # currently request handlers are un-ordered for handler in self.handlers: res = handler(req, res) return res class RequestExtensionResource(wsgi.Resource): def __init__(self, application): controller = RequestExtensionController(application) wsgi.Resource.__init__(self, controller) def add_handler(self, handler): self.controller.add_handler(handler) class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager body_serializers = {'application/xml': ExtensionsXMLSerializer()} serializer = base_wsgi.ResponseSerializer( body_serializers=body_serializers) super(ExtensionsResource, self).__init__(self, None, serializer) def _translate(self, ext): ext_data = {} ext_data['name'] = ext.get_name() ext_data['alias'] = ext.get_alias() ext_data['description'] = ext.get_description() ext_data['namespace'] = ext.get_namespace() ext_data['updated'] = ext.get_updated() ext_data['links'] = [] return ext_data def index(self, req): extensions = [] for _alias, ext in self.extension_manager.extensions.iteritems(): extensions.append(self._translate(ext)) return dict(extensions=extensions) def show(self, req, id): # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions.get(id, None) if not ext: raise webob.exc.HTTPNotFound( _("Extension with alias %s does not exist") % id) return dict(extension=self._translate(ext)) def delete(self, req, id): raise webob.exc.HTTPNotFound() def create(self, req): raise webob.exc.HTTPNotFound() class ExtensionMiddleware(wsgi.Middleware): """Extensions middleware for WSGI.""" @classmethod def factory(cls, global_config, **local_config): """Paste factory.""" def _factory(app): return cls(app, global_config, **local_config) return _factory def _action_ext_resources(self, application, ext_mgr, mapper): """Return a dict of ActionExtensionResource-s by collection.""" action_resources = {} for action in ext_mgr.get_actions(): if action.collection not in action_resources.keys(): resource = ActionExtensionResource(application) mapper.connect("/%s/:(id)/action.:(format)" % action.collection, action='action', controller=resource, conditions=dict(method=['POST'])) mapper.connect("/%s/:(id)/action" % action.collection, action='action', controller=resource, conditions=dict(method=['POST'])) action_resources[action.collection] = resource return action_resources def _request_ext_resources(self, application, ext_mgr, mapper): """Returns a dict of RequestExtensionResource-s by collection.""" request_ext_resources = {} for req_ext in ext_mgr.get_request_extensions(): if req_ext.key not in request_ext_resources.keys(): resource = RequestExtensionResource(application) mapper.connect(req_ext.url_route + '.:(format)', action='process', controller=resource, conditions=req_ext.conditions) mapper.connect(req_ext.url_route, action='process', controller=resource, conditions=req_ext.conditions) request_ext_resources[req_ext.key] = resource return request_ext_resources def __init__(self, application, config, ext_mgr=None): ext_mgr = (ext_mgr or ExtensionManager()) mapper = routes.Mapper() # extended resources for resource_ext in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource_ext.collection) controller_resource = wsgi.Resource(resource_ext.controller, resource_ext.deserializer, resource_ext.serializer) self._map_custom_collection_actions(resource_ext, mapper, controller_resource) kargs = dict(controller=controller_resource, collection=resource_ext.collection_actions, member=resource_ext.member_actions) if resource_ext.parent: kargs['parent_resource'] = resource_ext.parent mapper.resource(resource_ext.collection, resource_ext.collection, **kargs) # extended actions action_resources = self._action_ext_resources(application, ext_mgr, mapper) for action in ext_mgr.get_actions(): LOG.debug('Extended action: %s', action.action_name) resource = action_resources[action.collection] resource.add_action(action.action_name, action.handler) # extended requests req_controllers = self._request_ext_resources(application, ext_mgr, mapper) for request_ext in ext_mgr.get_request_extensions(): LOG.debug('Extended request: %s', request_ext.key) controller = req_controllers[request_ext.key] controller.add_handler(request_ext.handler) self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) super(ExtensionMiddleware, self).__init__(application) def _map_custom_collection_actions(self, resource_ext, mapper, controller_resource): for action, method in resource_ext.collection_actions.iteritems(): parent = resource_ext.parent conditions = dict(method=[method]) path = "/%s/%s" % (resource_ext.collection, action) path_prefix = "" if parent: path_prefix = "/%s/{%s_id}" % (parent["collection_name"], parent["member_name"]) with mapper.submapper(controller=controller_resource, action=action, path_prefix=path_prefix, conditions=conditions) as submap: submap.connect(path_prefix + path, path) submap.connect(path_prefix + path + "_format", "%s.:(format)" % path) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Route the incoming request with router.""" req.environ['extended.app'] = self.application return self._router @staticmethod @webob.dec.wsgify(RequestClass=wsgi.Request) def _dispatch(req): """Dispatch the request. Returns the routed WSGI app's response or defers to the extended application. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return req.environ['extended.app'] app = match['controller'] return app class ExtensionManager(object): EXT_NAMESPACE = 'trove.api.extensions' def __init__(self): LOG.debug('Initializing extension manager.') self.extensions = {} self._load_all_extensions() def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] extension_resource = ExtensionsResource(self) res_ext = ResourceExtension('extensions', extension_resource, serializer=extension_resource.serializer) resources.append(res_ext) for alias, ext in self.extensions.iteritems(): try: resources.extend(ext.get_resources()) except AttributeError: pass return resources def get_actions(self): """Returns a list of ActionExtension objects.""" actions = [] for alias, ext in self.extensions.iteritems(): try: actions.extend(ext.get_actions()) except AttributeError: pass return actions def get_request_extensions(self): """Returns a list of RequestExtension objects.""" request_exts = [] for alias, ext in self.extensions.iteritems(): try: request_exts.extend(ext.get_request_extensions()) except AttributeError: pass return request_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.get_name()) LOG.debug('Ext alias: %s', extension.get_alias()) LOG.debug('Ext description: %s', extension.get_description()) LOG.debug('Ext namespace: %s', extension.get_namespace()) LOG.debug('Ext updated: %s', extension.get_updated()) except AttributeError as ex: LOG.exception(_("Exception loading extension: %s"), unicode(ex)) return False return True def _check_load_extension(self, ext): LOG.debug('Ext: %s', ext.obj) return isinstance(ext.obj, ExtensionDescriptor) def _load_all_extensions(self): self.api_extension_manager = stevedore.enabled.EnabledExtensionManager( namespace=self.EXT_NAMESPACE, check_func=self._check_load_extension, invoke_on_load=True, invoke_kwds={}) self.api_extension_manager.map(self.add_extension) def add_extension(self, ext): ext = ext.obj # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.get_alias() LOG.debug('Loaded extension: %s', alias) if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) self.extensions[alias] = ext class RequestExtension(object): def __init__(self, method, url_route, handler): self.url_route = url_route self.handler = handler self.conditions = dict(method=[method]) self.key = "%s-%s" % (method, url_route) class ActionExtension(object): def __init__(self, collection, action_name, handler): self.collection = collection self.action_name = action_name self.handler = handler class BaseResourceExtension(object): def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, deserializer=None, serializer=None): if not collection_actions: collection_actions = {} if not member_actions: member_actions = {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions self.deserializer = deserializer self.serializer = serializer class ExtensionsXMLSerializer(base_wsgi.XMLDictSerializer): def __init__(self): self.nsmap = {None: DEFAULT_XMLNS, 'atom': XMLNS_ATOM} def show(self, ext_dict): ext = etree.Element('extension', nsmap=self.nsmap) self._populate_ext(ext, ext_dict['extension']) return self._to_xml(ext) def index(self, exts_dict): exts = etree.Element('extensions', nsmap=self.nsmap) for ext_dict in exts_dict['extensions']: ext = etree.SubElement(exts, 'extension') self._populate_ext(ext, ext_dict) return self._to_xml(exts) def _populate_ext(self, ext_elem, ext_dict): """Populate an extension xml element from a dict.""" ext_elem.set('name', ext_dict['name']) ext_elem.set('namespace', ext_dict['namespace']) ext_elem.set('alias', ext_dict['alias']) ext_elem.set('updated', ext_dict['updated']) desc = etree.Element('description') desc.text = ext_dict['description'] ext_elem.append(desc) for link in ext_dict.get('links', []): elem = etree.SubElement(ext_elem, '{%s}link' % XMLNS_ATOM) elem.set('rel', link['rel']) elem.set('href', link['href']) elem.set('type', link['type']) return ext_elem def _to_xml(self, root): """Convert the xml object to an xml string.""" return etree.tostring(root, encoding='UTF-8') class ResourceExtension(BaseResourceExtension): def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, deserializer=None, serializer=None): super(ResourceExtension, self).__init__( collection, controller, parent=parent, collection_actions=collection_actions, member_actions=member_actions, deserializer=wsgi.RequestDeserializer(), serializer=wsgi.TroveResponseSerializer()) class TroveExtensionMiddleware(ExtensionMiddleware): def __init__(self, application, ext_mgr=None): ext_mgr = (ext_mgr or ExtensionManager()) mapper = routes.Mapper() # extended resources for resource_ext in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource_ext.collection) # The only difference here is that we are using our common # wsgi.Resource instead of the openstack common wsgi.Resource exception_map = None if hasattr(resource_ext.controller, 'exception_map'): exception_map = resource_ext.controller.exception_map controller_resource = wsgi.Resource(resource_ext.controller, resource_ext.deserializer, resource_ext.serializer, exception_map) self._map_custom_collection_actions(resource_ext, mapper, controller_resource) kargs = dict(controller=controller_resource, collection=resource_ext.collection_actions, member=resource_ext.member_actions) if resource_ext.parent: kargs['parent_resource'] = resource_ext.parent mapper.resource(resource_ext.collection, resource_ext.collection, **kargs) mapper.connect(("/%s/{id}" % resource_ext.collection), controller=controller_resource, action='edit', conditions={'method': ['PATCH']}) # extended actions action_resources = self._action_ext_resources(application, ext_mgr, mapper) for action in ext_mgr.get_actions(): LOG.debug('Extended action: %s', action.action_name) resource = action_resources[action.collection] resource.add_action(action.action_name, action.handler) # extended requests req_controllers = self._request_ext_resources(application, ext_mgr, mapper) for request_ext in ext_mgr.get_request_extensions(): LOG.debug('Extended request: %s', request_ext.key) controller = req_controllers[request_ext.key] controller.add_handler(request_ext.handler) self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) super(ExtensionMiddleware, self).__init__(application) def factory(global_config, **local_config): """Paste factory.""" def _factory(app): ext_mgr = ExtensionManager() return TroveExtensionMiddleware(app, ext_mgr) return _factory trove-5.0.0/trove/common/single_tenant_remote.py0000664000567000056710000000777212701410316023232 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from trove.common import cfg from trove.common.remote import normalize_url import trove.openstack.common.log as logging from cinderclient.v2 import client as CinderClient from neutronclient.v2_0 import client as NeutronClient from novaclient.v1_1.client import Client as NovaClient CONF = cfg.CONF """ trove.conf ... The following should be set in the trove CONF file for this single_tenant_remote config to work correctly. nova_proxy_admin_user = nova_proxy_admin_pass = nova_proxy_admin_tenant_name = trove_auth_url = nova_compute_service_type = nova_compute_url = cinder_service_type = os_region_name = remote_nova_client = \ trove.common.single_tenant_remote.nova_client_trove_admin remote_cinder_client = \ trove.common.single_tenant_remote.cinder_client_trove_admin remote_neutron_client = \ trove.common.single_tenant_remote.neutron_client_trove_admin ... """ PROXY_AUTH_URL = CONF.trove_auth_url LOG = logging.getLogger(__name__) def nova_client_trove_admin(context=None): """ Returns a nova client object with the trove admin credentials :param context: original context from user request :type context: trove.common.context.TroveContext :return novaclient: novaclient with trove admin credentials :rtype: novaclient.v1_1.client.Client """ client = NovaClient(CONF.nova_proxy_admin_user, CONF.nova_proxy_admin_pass, CONF.nova_proxy_admin_tenant_name, auth_url=PROXY_AUTH_URL, service_type=CONF.nova_compute_service_type, region_name=CONF.os_region_name) if CONF.nova_compute_url and CONF.nova_proxy_admin_tenant_id: client.client.management_url = "%s/%s/" % ( normalize_url(CONF.nova_compute_url), CONF.nova_proxy_admin_tenant_id) return client def cinder_client_trove_admin(context=None): """ Returns a cinder client object with the trove admin credentials :param context: original context from user request :type context: trove.common.context.TroveContext :return cinderclient: cinderclient with trove admin credentials """ client = CinderClient.Client(CONF.nova_proxy_admin_user, CONF.nova_proxy_admin_pass, project_id=CONF.nova_proxy_admin_tenant_name, auth_url=PROXY_AUTH_URL, service_type=CONF.cinder_service_type, region_name=CONF.os_region_name) if CONF.cinder_url and CONF.nova_proxy_admin_tenant_id: client.client.management_url = "%s/%s/" % ( normalize_url(CONF.cinder_url), CONF.nova_proxy_admin_tenant_id) return client def neutron_client_trove_admin(context=None): """ Returns a neutron client object with the trove admin credentials :param context: original context from user request :type context: trove.common.context.TroveContext :return neutronclient: neutronclient with trove admin credentials """ client = NeutronClient.Client( username=CONF.nova_proxy_admin_user, password=CONF.nova_proxy_admin_pass, tenant_name=CONF.nova_proxy_admin_tenant_name, auth_url=PROXY_AUTH_URL, service_type=CONF.neutron_service_type, region_name=CONF.os_region_name) if CONF.neutron_url: client.management_url = CONF.neutron_url return client trove-5.0.0/trove/common/xmlutils.py0000664000567000056710000000544012701410316020674 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from xml.dom import minidom from xml.parsers import expat from xml import sax from xml.sax import expatreader class ProtectedExpatParser(expatreader.ExpatParser): """An expat parser which disables DTD's and entities by default.""" def __init__(self, forbid_dtd=True, forbid_entities=True, *args, **kwargs): # Python 2.x old style class expatreader.ExpatParser.__init__(self, *args, **kwargs) self.forbid_dtd = forbid_dtd self.forbid_entities = forbid_entities def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): raise ValueError("Inline DTD forbidden") def entity_decl(self, entityName, is_parameter_entity, value, base, systemId, publicId, notationName): raise ValueError(" entity declaration forbidden") def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): # expat 1.2 raise ValueError(" unparsed entity forbidden") def external_entity_ref(self, context, base, systemId, publicId): raise ValueError(" external entity forbidden") def notation_decl(self, name, base, sysid, pubid): raise ValueError(" notation forbidden") def reset(self): expatreader.ExpatParser.reset(self) if self.forbid_dtd: self._parser.StartDoctypeDeclHandler = self.start_doctype_decl self._parser.EndDoctypeDeclHandler = None if self.forbid_entities: self._parser.EntityDeclHandler = self.entity_decl self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl self._parser.ExternalEntityRefHandler = self.external_entity_ref self._parser.NotationDeclHandler = self.notation_decl try: self._parser.SkippedEntityHandler = None except AttributeError: # some pyexpat versions do not support SkippedEntity pass def safe_minidom_parse_string(xml_string): """Parse an XML string using minidom safely. """ try: return minidom.parseString(xml_string, parser=ProtectedExpatParser()) except sax.SAXParseException: raise expat.ExpatError() trove-5.0.0/trove/common/exception.py0000664000567000056710000004073612701410316021020 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """I totally stole most of this from melange, thx guys!!!""" import re from oslo_concurrency import processutils from oslo_log import log as logging from trove.common import base_exception as openstack_exception from trove.common.i18n import _ ClientConnectionError = openstack_exception.ClientConnectionError ProcessExecutionError = processutils.ProcessExecutionError DatabaseMigrationError = openstack_exception.DatabaseMigrationError LOG = logging.getLogger(__name__) wrap_exception = openstack_exception.wrap_exception def safe_fmt_string(text): return re.sub(r'%([0-9]+)', r'\1', text) class TroveError(openstack_exception.OpenstackException): """Base exception that all custom trove app exceptions inherit from.""" internal_message = None def __init__(self, message=None, **kwargs): if message is not None: self.message = message if self.internal_message is not None: try: LOG.error(safe_fmt_string(self.internal_message) % kwargs) except Exception: LOG.error(self.internal_message) self.message = safe_fmt_string(self.message) super(TroveError, self).__init__(**kwargs) class DBConstraintError(TroveError): message = _("Failed to save %(model_name)s because: %(error)s.") class InvalidRPCConnectionReuse(TroveError): message = _("Invalid RPC Connection Reuse.") class NotFound(TroveError): message = _("Resource %(uuid)s cannot be found.") class CapabilityNotFound(NotFound): message = _("Capability '%(capability)s' cannot be found.") class CapabilityDisabled(TroveError): message = _("Capability '%(capability)s' is disabled.") class FlavorNotFound(TroveError): message = _("Resource %(uuid)s cannot be found.") class UserNotFound(NotFound): message = _("User %(uuid)s cannot be found on the instance.") class DatabaseNotFound(NotFound): message = _("Database %(uuid)s cannot be found on the instance.") class ComputeInstanceNotFound(NotFound): internal_message = _("Cannot find compute instance %(server_id)s for " "instance %(instance_id)s.") message = _("Resource %(instance_id)s can not be retrieved.") class DnsRecordNotFound(NotFound): message = _("DnsRecord with name= %(name)s not found.") class DatastoreNotFound(NotFound): message = _("Datastore '%(datastore)s' cannot be found.") class DatastoreVersionNotFound(NotFound): message = _("Datastore version '%(version)s' cannot be found.") class DatastoresNotFound(NotFound): message = _("Datastores cannot be found.") class DatastoreFlavorAssociationNotFound(NotFound): message = _("Flavor %(flavor_id)s is not supported for datastore " "%(datastore)s version %(datastore_version)s") class DatastoreFlavorAssociationAlreadyExists(TroveError): message = _("Flavor %(flavor_id)s is already associated with " "datastore %(datastore)s version %(datastore_version)s") class DatastoreNoVersion(TroveError): message = _("Datastore '%(datastore)s' has no version '%(version)s'.") class DatastoreVersionInactive(TroveError): message = _("Datastore version '%(version)s' is not active.") class DatastoreDefaultDatastoreNotFound(TroveError): message = _("Please specify datastore. Default datastore " "cannot be found.") class DatastoreDefaultVersionNotFound(TroveError): message = _("Default version for datastore '%(datastore)s' not found.") class InvalidDatastoreManager(TroveError): message = _("Datastore manager %(datastore_manager)s cannot be found.") class DatastoreOperationNotSupported(TroveError): message = _("The '%(operation)s' operation is not supported for " "the '%(datastore)s' datastore.") class NoUniqueMatch(TroveError): message = _("Multiple matches found for '%(name)s', " "use an UUID to be more specific.") class OverLimit(TroveError): internal_message = _("The server rejected the request due to its size or " "rate.") class QuotaExceeded(TroveError): message = _("Quota exceeded for resources: %(overs)s.") class VolumeQuotaExceeded(QuotaExceeded): message = _("Instance volume quota exceeded.") class GuestError(TroveError): message = _("An error occurred communicating with the guest: " "%(original_message)s.") class GuestTimeout(TroveError): message = _("Timeout trying to connect to the Guest Agent.") class BadRequest(TroveError): message = _("The server could not comply with the request since it is " "either malformed or otherwise incorrect.") class MissingKey(BadRequest): message = _("Required element/key - %(key)s was not specified.") class DatabaseAlreadyExists(BadRequest): message = _('A database with the name "%(name)s" already exists.') class UserAlreadyExists(BadRequest): message = _('A user with the name "%(name)s" already exists.') class InstanceAssignedToConfiguration(BadRequest): message = _('A configuration group cannot be deleted if it is ' 'associated with one or more non-terminated instances. ' 'Detach the configuration group from all non-terminated ' 'instances and please try again.') class UnprocessableEntity(TroveError): message = _("Unable to process the contained request.") class UnauthorizedRequest(TroveError): message = _("Unauthorized request.") class CannotResizeToSameSize(TroveError): message = _("No change was requested in the size of the instance.") class VolumeAttachmentsNotFound(NotFound): message = _("Cannot find the volumes attached to compute " "instance %(server_id)s.") class VolumeCreationFailure(TroveError): message = _("Failed to create a volume in Nova.") class VolumeSizeNotSpecified(BadRequest): message = _("Volume size was not specified.") class LocalStorageNotSpecified(BadRequest): message = _("Local storage not specified in flavor ID: %(flavor)s.") class LocalStorageNotSupported(TroveError): message = _("Local storage support is not enabled.") class VolumeNotSupported(TroveError): message = _("Volume support is not enabled.") class ReplicationNotSupported(TroveError): message = _("Replication is not supported for " "the '%(datastore)s' datastore.") class ReplicationSlaveAttachError(TroveError): message = _("Exception encountered attaching slave to new replica source.") class TaskManagerError(TroveError): message = _("An error occurred communicating with the task manager: " "%(original_message)s.") class BadValue(TroveError): message = _("Value could not be converted: %(msg)s.") class PollTimeOut(TroveError): message = _("Polling request timed out.") class Forbidden(TroveError): message = _("User does not have admin privileges.") class InvalidModelError(TroveError): message = _("The following values are invalid: %(errors)s.") class ModelNotFoundError(NotFound): message = _("Not Found.") class UpdateGuestError(TroveError): message = _("Failed to update instances.") class ConfigNotFound(NotFound): message = _("Config file not found.") class PasteAppNotFound(NotFound): message = _("Paste app not found.") class QuotaNotFound(NotFound): message = _("Quota could not be found.") class TenantQuotaNotFound(QuotaNotFound): message = _("Quota for tenant %(tenant_id)s could not be found.") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class BackupUploadError(TroveError): message = _("Unable to upload Backup to swift.") class BackupDownloadError(TroveError): message = _("Unable to download Backup from swift") class BackupCreationError(TroveError): message = _("Unable to create Backup.") class BackupUpdateError(TroveError): message = _("Unable to update Backup table in database.") class SecurityGroupCreationError(TroveError): message = _("Failed to create Security Group.") class SecurityGroupDeletionError(TroveError): message = _("Failed to delete Security Group.") class SecurityGroupRuleCreationError(TroveError): message = _("Failed to create Security Group Rule.") class SecurityGroupRuleDeletionError(TroveError): message = _("Failed to delete Security Group Rule.") class MalformedSecurityGroupRuleError(TroveError): message = _("Error creating security group rules." " Malformed port(s). Port must be an integer." " FromPort = %(from)s greater than ToPort = %(to)s.") class BackupNotCompleteError(TroveError): message = _("Unable to create instance because backup %(backup_id)s is " "not completed. Actual state: %(state)s.") class BackupFileNotFound(NotFound): message = _("Backup file in %(location)s was not found in the object " "storage.") class BackupDatastoreMismatchError(TroveError): message = _("The datastore from which the backup was taken, " "%(datastore1)s, does not match the destination" " datastore of %(datastore2)s.") class SwiftAuthError(TroveError): message = _("Swift account not accessible for tenant %(tenant_id)s.") class SwiftNotFound(TroveError): message = _("Swift is disabled for tenant %(tenant_id)s.") class DatabaseForUserNotInDatabaseListError(TroveError): message = _("The request indicates that user %(user)s should have access " "to database %(database)s, but database %(database)s is not " "included in the initial databases list.") class DatabaseInitialDatabaseDuplicateError(TroveError): message = _("Two or more databases share the same name in the initial " "databases list. Please correct the names or remove the " "duplicate entries.") class DatabaseInitialUserDuplicateError(TroveError): message = _("Two or more users share the same name and host in the " "initial users list. Please correct the names or remove the " "duplicate entries.") class RestoreBackupIntegrityError(TroveError): message = _("Current Swift object checksum does not match original " "checksum for backup %(backup_id)s.") class ConfigKeyNotFound(NotFound): message = _("%(key)s is not a supported configuration parameter.") class NoConfigParserFound(NotFound): message = _("No configuration parser found for datastore " "%(datastore_manager)s.") class ConfigurationDatastoreNotMatchInstance(TroveError): message = _("Datastore Version on Configuration " "%(config_datastore_version)s does not " "match the Datastore Version on the instance " "%(instance_datastore_version)s.") class ConfigurationParameterDeleted(TroveError): message = _("%(parameter_name)s parameter can no longer be " "set as of %(parameter_deleted_at)s.") class ConfigurationParameterAlreadyExists(TroveError): message = _("%(parameter_name)s parameter already exists " "for datastore version %(datastore_version)s.") class ConfigurationAlreadyAttached(TroveError): message = _("Instance %(instance_id)s already has a " "Configuration Group attached: %(configuration_id)s.") class InvalidInstanceState(TroveError): message = _("The operation you have requested cannot be executed because " "the instance status is currently: %(status)s.") class NoServiceEndpoint(TroveError): """Could not find requested endpoint in Service Catalog.""" message = _("Endpoint not found for service_type=%(service_type)s, " "endpoint_type=%(endpoint_type)s, " "endpoint_region=%(endpoint_region)s.") class EmptyCatalog(NoServiceEndpoint): """The service catalog is empty.""" message = _("Empty catalog.") class IncompatibleReplicationStrategy(TroveError): message = _("Instance with replication strategy %(guest_strategy)s " "cannot replicate from instance with replication strategy " "%(replication_strategy)s.") class InsufficientSpaceForReplica(TroveError): message = _("The target instance has only %(slave_volume_size)sG free, " "but the replication snapshot contains %(dataset_size)sG " "of data.") class ReplicaSourceDeleteForbidden(Forbidden): message = _("The replica source cannot be deleted without detaching the " "replicas.") class ModuleTypeNotFound(NotFound): message = _("Module type '%(module_type)s' was not found.") class ModuleAppliedToInstance(BadRequest): message = _("A module cannot be deleted or its contents modified if it " "has been applied to a non-terminated instance, unless the " "module has been marked as 'live_update.' " "Please remove the module from all non-terminated " "instances and try again.") class ModuleAlreadyExists(BadRequest): message = _("A module with the name '%(name)s' already exists for " "datastore '%(datastore)s' and datastore version " "'%(ds_version)s'") class ModuleAccessForbidden(Forbidden): message = _("You must be admin to %(action)s a module with these " "options. %(options)s") class ClusterNotFound(NotFound): message = _("Cluster '%(cluster)s' cannot be found.") class ClusterFlavorsNotEqual(TroveError): message = _("The flavor for each instance in a cluster must be the same.") class ClusterNetworksNotEqual(TroveError): message = _("The network for each instance in a cluster must be the same.") class NetworkNotFound(TroveError): message = _("Network Resource %(uuid)s cannot be found.") class ClusterVolumeSizeRequired(TroveError): message = _("A volume size is required for each instance in the cluster.") class ClusterVolumeSizesNotEqual(TroveError): message = _("The volume size for each instance in a cluster must be " "the same.") class ClusterNumInstancesNotSupported(TroveError): message = _("The number of instances for your initial cluster must " "be %(num_instances)s.") class ClusterNumInstancesNotLargeEnough(TroveError): message = _("The number of instances for your initial cluster must " "be at least %(num_instances)s.") class ClusterNumInstancesBelowSafetyThreshold(TroveError): message = _("The number of instances in your cluster cannot " "safely be lowered below the current level based" "on your current fault-tolerance settings.") class ClusterShrinkMustNotLeaveClusterEmpty(TroveError): message = _("Must leave at least one instance in the cluster when " "shrinking.") class ClusterShrinkInstanceInUse(TroveError): message = _("Instance(s) %(id)s currently in use and cannot be deleted. " "Details: %(reason)s") class ClusterInstanceOperationNotSupported(TroveError): message = _("Operation not supported for instances that are part of a " "cluster.") class ClusterOperationNotSupported(TroveError): message = _("The '%(operation)s' operation is not supported for cluster.") class TroveOperationAuthError(TroveError): message = _("Operation not allowed for tenant %(tenant_id)s.") class ClusterDatastoreNotSupported(TroveError): message = _("Clusters not supported for " "%(datastore)s-%(datastore_version)s.") class BackupTooLarge(TroveError): message = _("Backup is too large for given flavor or volume. " "Backup size: %(backup_size)s GBs. " "Available size: %(disk_size)s GBs.") class ImageNotFound(NotFound): message = _("Image %(uuid)s cannot be found.") class DatastoreVersionAlreadyExists(BadRequest): message = _("A datastore version with the name '%(name)s' already exists.") trove-5.0.0/trove/common/views.py0000664000567000056710000000251512701410316020150 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import wsgi def create_links(resource_path, request, id): """Creates the links dictionary in the format typical of most resources.""" context = request.environ[wsgi.CONTEXT_KEY] link_info = { 'host': request.host, 'version': request.url_version, 'tenant_id': context.tenant, 'resource_path': resource_path, 'id': id, } return [ { "href": "https://%(host)s/v%(version)s/%(tenant_id)s" "/%(resource_path)s/%(id)s" % link_info, "rel": "self" }, { "href": "https://%(host)s/%(resource_path)s/%(id)s" % link_info, "rel": "bookmark" } ] trove-5.0.0/trove/common/models.py0000664000567000056710000000751112701410316020277 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes that form the core of instances functionality.""" from oslo_utils.importutils import import_class from trove.common import cfg from trove.common import remote CONF = cfg.CONF class ModelBase(object): """ An object which can be stored in the database. """ _data_fields = [] _auto_generated_attrs = [] def _validate(self, errors): """Subclasses override this to offer additional validation. For each validation error a key with the field name and an error message is added to the dict. """ pass def data(self, **options): """Called to serialize object to a dictionary.""" data_fields = self._data_fields + self._auto_generated_attrs return {field: self[field] for field in data_fields} def is_valid(self): """Called when persisting data to ensure the format is correct.""" self.errors = {} self._validate(self.errors) # self._validate_columns_type() # self._before_validate() # self._validate() return self.errors == {} def __setitem__(self, key, value): """Overloaded to cause this object to look like a data entity.""" setattr(self, key, value) def __getitem__(self, key): """Overloaded to cause this object to look like a data entity.""" return getattr(self, key) def __eq__(self, other): """Overloaded to cause this object to look like a data entity.""" if not hasattr(other, 'id'): return False return type(other) == type(self) and other.id == self.id def __ne__(self, other): """Overloaded to cause this object to look like a data entity.""" return not self == other def __hash__(self): """Overloaded to cause this object to look like a data entity.""" return self.id.__hash__() class RemoteModelBase(ModelBase): # This should be set by the remote model during init time # The data() method will be using this _data_object = None def _data_item(self, data_object): data_fields = self._data_fields + self._auto_generated_attrs return {field: getattr(data_object, field) for field in data_fields} # data magic that will allow for a list of _data_object or a single item # if the object is a list, it will turn it into a list of hash's again def data(self, **options): if self._data_object is None: raise LookupError("data object is None") if isinstance(self._data_object, list): return [self._data_item(item) for item in self._data_object] else: return self._data_item(self._data_object) class NetworkRemoteModelBase(RemoteModelBase): network_driver = None @classmethod def get_driver(cls, context): if not cls.network_driver: cls.network_driver = import_class(CONF.network_driver) return cls.network_driver(context) class NovaRemoteModelBase(RemoteModelBase): @classmethod def get_client(cls, context): return remote.create_nova_client(context) class SwiftRemoteModelBase(RemoteModelBase): @classmethod def get_client(cls, context): return remote.create_swift_client(context) trove-5.0.0/trove/common/strategies/0000775000567000056710000000000012701410521020606 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/0000775000567000056710000000000012701410521022267 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/__init__.py0000664000567000056710000000000012701410316024370 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/base.py0000664000567000056710000000256012701410316023560 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class BaseAPIStrategy(object): @property def cluster_class(self): raise NotImplementedError() @property def cluster_controller_actions(self): raise NotImplementedError() @property def cluster_view_class(self): raise NotImplementedError() @property def mgmt_cluster_view_class(self): raise NotImplementedError() class BaseTaskManagerStrategy(object): @property def task_manager_api_class(self, context): raise NotImplementedError() @property def task_manager_cluster_tasks_class(self, context): raise NotImplementedError() class BaseGuestAgentStrategy(object): @property def guest_client_class(self): raise NotImplementedError() trove-5.0.0/trove/common/strategies/cluster/experimental/0000775000567000056710000000000012701410521024764 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/mongodb/0000775000567000056710000000000012701410521026411 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/mongodb/guestagent.py0000664000567000056710000001036512701410316031140 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) CONF = cfg.CONF ADD_MEMBERS_TIMEOUT = CONF.mongodb.add_members_timeout class MongoDbGuestAgentStrategy(base.BaseGuestAgentStrategy): @property def guest_client_class(self): return MongoDbGuestAgentAPI class MongoDbGuestAgentAPI(guest_api.API): def add_shard(self, replica_set_name, replica_set_member): LOG.debug("Adding shard with replSet %(replica_set_name)s and member " "%(replica_set_member)s for instance " "%(id)s" % {'replica_set_name': replica_set_name, 'replica_set_member': replica_set_member, 'id': self.id}) return self._call("add_shard", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, replica_set_name=replica_set_name, replica_set_member=replica_set_member) def add_members(self, members): LOG.debug("Adding members %(members)s on instance %(id)s" % { 'members': members, 'id': self.id}) return self._call("add_members", ADD_MEMBERS_TIMEOUT, self.version_cap, members=members) def add_config_servers(self, config_servers): LOG.debug("Adding config servers %(config_servers)s for instance " "%(id)s" % {'config_servers': config_servers, 'id': self.id}) return self._call("add_config_servers", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, config_servers=config_servers) def cluster_complete(self): LOG.debug("Notify regarding cluster install completion") return self._call("cluster_complete", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def get_key(self): LOG.debug("Requesting cluster key from guest") return self._call("get_key", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def prep_primary(self): LOG.debug("Preparing member to be primary member.") return self._call("prep_primary", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap) def create_admin_user(self, password): LOG.debug("Creating admin user") return self._call("create_admin_user", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, password=password) def store_admin_password(self, password): LOG.debug("Storing admin password") return self._call("store_admin_password", guest_api.AGENT_LOW_TIMEOUT, self.version_cap, password=password) def get_replica_set_name(self): LOG.debug("Querying member for its replica set name") return self._call("get_replica_set_name", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap) def get_admin_password(self): LOG.debug("Querying instance for its admin password") return self._call("get_admin_password", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def is_shard_active(self, replica_set_name): LOG.debug("Checking if replica set %s is active" % replica_set_name) return self._call("is_shard_active", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, replica_set_name=replica_set_name) trove-5.0.0/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py0000664000567000056710000004064512701410316031273 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.exception import PollTimeOut from trove.common.i18n import _ from trove.common.instance import ServiceStatuses from trove.common.strategies.cluster import base from trove.common import utils from trove.instance import models from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds. class MongoDbTaskManagerStrategy(base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return MongoDbTaskManagerAPI @property def task_manager_cluster_tasks_class(self): return MongoDbClusterTasks @property def task_manager_manager_actions(self): return {'add_shard_cluster': self._manager_add_shard} def _manager_add_shard(self, context, cluster_id, shard_id, replica_set_name): cluster_tasks = task_models.ClusterTasks.load( context, cluster_id, MongoDbClusterTasks) cluster_tasks.add_shard_cluster(context, cluster_id, shard_id, replica_set_name) class MongoDbClusterTasks(task_models.ClusterTasks): def create_cluster(self, context, cluster_id): LOG.debug("begin create_cluster for id: %s" % cluster_id) def _create_cluster(): # fetch instances by cluster_id against instances table db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("instances in cluster %s: %s" % (cluster_id, instance_ids)) if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("all instances in cluster %s ready." % cluster_id) instances = [Instance.load(context, instance_id) for instance_id in instance_ids] # filter query routers in instances into a new list: query_routers query_routers = [instance for instance in instances if instance.type == 'query_router'] LOG.debug("query routers: %s" % [instance.id for instance in query_routers]) # filter config servers in instances into new list: config_servers config_servers = [instance for instance in instances if instance.type == 'config_server'] LOG.debug("config servers: %s" % [instance.id for instance in config_servers]) # filter members (non router/configsvr) into a new list: members members = [instance for instance in instances if instance.type == 'member'] LOG.debug("members: %s" % [instance.id for instance in members]) # for config_server in config_servers, append ip/hostname to # "config_server_hosts", then # peel off the replica-set name and ip/hostname from 'x' config_server_ips = [self.get_ip(instance) for instance in config_servers] LOG.debug("config server ips: %s" % config_server_ips) if not self._add_query_routers(query_routers, config_server_ips): return if not self._create_shard(query_routers[0], members): return # call to start checking status for instance in instances: self.get_guest(instance).cluster_complete() cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end create_cluster for id: %s" % cluster_id) def add_shard_cluster(self, context, cluster_id, shard_id, replica_set_name): LOG.debug("begin add_shard_cluster for cluster %s shard %s" % (cluster_id, shard_id)) def _add_shard_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, shard_id=shard_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("instances in shard %s: %s" % (shard_id, instance_ids)) if not self._all_instances_ready(instance_ids, cluster_id, shard_id): return members = [Instance.load(context, instance_id) for instance_id in instance_ids] db_query_routers = DBInstance.find_all(cluster_id=cluster_id, type='query_router', deleted=False).all() query_routers = [Instance.load(context, db_query_router.id) for db_query_router in db_query_routers] if not self._create_shard(query_routers[0], members): return for member in members: self.get_guest(member).cluster_complete() cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _add_shard_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("timeout for building shard.")) self.update_statuses_on_failure(cluster_id, shard_id) finally: timeout.cancel() LOG.debug("end add_shard_cluster for cluster %s shard %s" % (cluster_id, shard_id)) def grow_cluster(self, context, cluster_id, instance_ids): LOG.debug("begin grow_cluster for MongoDB cluster %s" % cluster_id) def _grow_cluster(): new_instances = [db_instance for db_instance in self.db_instances if db_instance.id in instance_ids] new_members = [db_instance for db_instance in new_instances if db_instance.type == 'member'] new_query_routers = [db_instance for db_instance in new_instances if db_instance.type == 'query_router'] instances = [] if new_members: shard_ids = set([db_instance.shard_id for db_instance in new_members]) query_router_id = self._get_running_query_router_id() if not query_router_id: return for shard_id in shard_ids: LOG.debug('growing cluster by adding shard %s on query ' 'router %s' % (shard_id, query_router_id)) member_ids = [db_instance.id for db_instance in new_members if db_instance.shard_id == shard_id] if not self._all_instances_ready( member_ids, cluster_id, shard_id ): return members = [Instance.load(context, member_id) for member_id in member_ids] query_router = Instance.load(context, query_router_id) if not self._create_shard(query_router, members): return instances.extend(members) if new_query_routers: query_router_ids = [db_instance.id for db_instance in new_query_routers] config_servers_ids = [db_instance.id for db_instance in self.db_instances if db_instance.type == 'config_server'] LOG.debug('growing cluster by adding query routers %s, ' 'with config servers %s' % (query_router_ids, config_servers_ids)) if not self._all_instances_ready( query_router_ids, cluster_id ): return query_routers = [Instance.load(context, instance_id) for instance_id in query_router_ids] config_servers_ips = [ self.get_ip(Instance.load(context, config_server_id)) for config_server_id in config_servers_ids ] if not self._add_query_routers( query_routers, config_servers_ips, admin_password=self.get_cluster_admin_password(context) ): return instances.extend(query_routers) for instance in instances: self.get_guest(instance).cluster_complete() cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end grow_cluster for MongoDB cluster %s" % self.id) def shrink_cluster(self, context, cluster_id, instance_ids): LOG.debug("begin shrink_cluster for MongoDB cluster %s" % cluster_id) def _shrink_cluster(): def all_instances_marked_deleted(): non_deleted_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() non_deleted_ids = [db_instance.id for db_instance in non_deleted_instances] return not bool( set(instance_ids).intersection(set(non_deleted_ids)) ) try: utils.poll_until(all_instances_marked_deleted, sleep_time=2, time_out=CONF.cluster_delete_time_out) except PollTimeOut: LOG.error(_("timeout for instances to be marked as deleted.")) return cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("timeout for shrinking cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end shrink_cluster for MongoDB cluster %s" % self.id) def get_cluster_admin_password(self, context): """The cluster admin's user credentials are stored on all query routers. Find one and get the guest to return the password. """ instance = Instance.load(context, self._get_running_query_router_id()) return self.get_guest(instance).get_admin_password() def _init_replica_set(self, primary_member, other_members): """Initialize the replica set by calling the primary member guest's add_members. """ LOG.debug('initializing replica set on %s' % primary_member.id) other_members_ips = [] try: for member in other_members: other_members_ips.append(self.get_ip(member)) self.get_guest(member).restart() self.get_guest(primary_member).prep_primary() self.get_guest(primary_member).add_members(other_members_ips) except Exception: LOG.exception(_("error initializing replica set")) self.update_statuses_on_failure(self.id, shard_id=primary_member.shard_id) return False return True def _create_shard(self, query_router, members): """Create a replica set out of the given member instances and add it as a shard to the cluster. """ primary_member = members[0] other_members = members[1:] if not self._init_replica_set(primary_member, other_members): return False replica_set = self.get_guest(primary_member).get_replica_set_name() LOG.debug('adding replica set %s as shard %s to cluster %s' % (replica_set, primary_member.shard_id, self.id)) try: self.get_guest(query_router).add_shard( replica_set, self.get_ip(primary_member)) except Exception: LOG.exception(_("error adding shard")) self.update_statuses_on_failure(self.id, shard_id=primary_member.shard_id) return False return True def _get_running_query_router_id(self): """Get a query router in this cluster that is in the RUNNING state.""" for instance_id in [db_instance.id for db_instance in self.db_instances if db_instance.type == 'query_router']: status = models.InstanceServiceStatus.find_by( instance_id=instance_id).get_status() if status == ServiceStatuses.RUNNING: return instance_id LOG.exception(_("no query routers ready to accept requests")) self.update_statuses_on_failure(self.id) return False def _add_query_routers(self, query_routers, config_server_ips, admin_password=None): """Configure the given query routers for the cluster. If this is a new_cluster an admin user will be created with a randomly generated password, else the password needs to be retrieved from and existing query router. """ LOG.debug('adding new query router(s) %s with config server ' 'ips %s' % ([i.id for i in query_routers], config_server_ips)) for query_router in query_routers: try: LOG.debug("calling add_config_servers on query router %s" % query_router.id) guest = self.get_guest(query_router) guest.add_config_servers(config_server_ips) if not admin_password: LOG.debug("creating cluster admin user") admin_password = utils.generate_random_password() guest.create_admin_user(admin_password) else: guest.store_admin_password(admin_password) except Exception: LOG.exception(_("error adding config servers")) self.update_statuses_on_failure(self.id) return False return True class MongoDbTaskManagerAPI(task_api.API): def mongodb_add_shard_cluster(self, cluster_id, shard_id, replica_set_name): LOG.debug("Making async call to add shard cluster %s " % cluster_id) cctxt = self.client.prepare(version=self.version_cap) cctxt.cast(self.context, "add_shard_cluster", cluster_id=cluster_id, shard_id=shard_id, replica_set_name=replica_set_name) trove-5.0.0/trove/common/strategies/cluster/experimental/mongodb/__init__.py0000664000567000056710000000000012701410316030512 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/mongodb/api.py0000664000567000056710000006675012701410316027554 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.cluster import models from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.notification import DBaaSClusterGrow from trove.common.notification import StartNotification from trove.common import remote from trove.common.strategies.cluster import base from trove.common import utils from trove.datastore import models as datastore_models from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance import models as inst_models from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class MongoDbAPIStrategy(base.BaseAPIStrategy): @property def cluster_class(self): return MongoDbCluster @property def cluster_view_class(self): return MongoDbClusterView @property def mgmt_cluster_view_class(self): return MongoDbMgmtClusterView class MongoDbCluster(models.Cluster): @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties): # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = mongo_conf.num_config_servers_per_cluster num_mongos = mongo_conf.num_query_routers_per_cluster delta_instances = num_instances + num_configsvr + num_mongos deltas = {'instances': delta_instances} volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] volume_size = None if mongo_conf.volume_support: if len(volume_sizes) != num_instances: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) # TODO(amcreynolds): for now, mongos+configsvr same flavor+disk deltas['volumes'] = volume_size * delta_instances else: # TODO(amcreynolds): is ephemeral possible for mongodb clusters? if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = mongo_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) check_quotas(context.tenant, deltas) nics = [instance.get('nics', None) for instance in instances] azs = [instance.get('availability_zone', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = {"id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name} configsvr_config = {"id": db_info.id, "instance_type": "config_server"} mongos_config = {"id": db_info.id, "instance_type": "query_router"} if mongo_conf.cluster_secure: cluster_key = utils.generate_random_password() member_config['key'] = cluster_key configsvr_config['key'] = cluster_key mongos_config['key'] = cluster_key for i in range(0, num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nics[i], configuration_id=None, cluster_config=member_config) for i in range(1, num_configsvr + 1): instance_name = "%s-%s-%s" % (name, "configsvr", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=configsvr_config) for i in range(1, num_mongos + 1): instance_name = "%s-%s-%s" % (name, "mongos", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=mongos_config) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version) def _parse_grow_item(self, item): used_keys = [] def _check_option(key, required=False, valid_values=None): if required and key not in item: raise exception.TroveError( _('An instance with the options %(given)s is missing ' 'the MongoDB required option %(expected)s.') % {'given': item.keys(), 'expected': key} ) value = item.get(key, None) if valid_values and value not in valid_values: raise exception.TroveError( _('The value %(value)s for key %(key)s is invalid. ' 'Allowed values are %(valid)s.') % {'value': value, 'key': key, 'valid': valid_values} ) used_keys.append(key) return value flavor_id = utils.get_id_from_href(_check_option('flavorRef', required=True)) volume_size = int(_check_option('volume', required=True)['size']) instance_type = _check_option('type', required=True, valid_values=['replica', 'query_router']) name = _check_option('name') related_to = _check_option('related_to') unused_keys = list(set(item.keys()).difference(set(used_keys))) if unused_keys: raise exception.TroveError( _('The arguments %s are not supported by MongoDB.') % unused_keys ) instance = {'flavor_id': flavor_id, 'volume_size': volume_size, 'instance_type': instance_type} if name: instance['name'] = name if related_to: instance['related_to'] = related_to return instance def action(self, context, req, action, param): if action == 'grow': context.notification = DBaaSClusterGrow(context, request=req) with StartNotification(context, cluster_id=self.id): return self.grow([self._parse_grow_item(item) for item in param]) elif action == 'add_shard': context.notification = DBaaSClusterGrow(context, request=req) with StartNotification(context, cluster_id=self.id): return self.add_shard() else: super(MongoDbCluster, self).action(context, req, action, param) def add_shard(self): if self.db_info.task_status != ClusterTasks.NONE: current_task = self.db_info.task_status.name msg = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") % current_task LOG.error(msg) raise exception.UnprocessableEntity(msg) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, type='member').all() num_unique_shards = len(set([db_inst.shard_id for db_inst in db_insts])) if num_unique_shards == 0: msg = _("This action cannot be performed on the cluster as no " "reference shard exists.") LOG.error(msg) raise exception.UnprocessableEntity(msg) arbitrary_shard_id = db_insts[0].shard_id members_in_shard = [db_inst for db_inst in db_insts if db_inst.shard_id == arbitrary_shard_id] num_members_per_shard = len(members_in_shard) a_member = inst_models.load_any_instance(self.context, members_in_shard[0].id) deltas = {'instances': num_members_per_shard} volume_size = a_member.volume_size if volume_size: deltas['volumes'] = volume_size * num_members_per_shard check_quotas(self.context.tenant, deltas) new_replica_set_name = "rs" + str(num_unique_shards + 1) new_shard_id = utils.generate_uuid() dsv_manager = (datastore_models.DatastoreVersion. load_by_uuid(db_insts[0].datastore_version_id).manager) manager = task_api.load(self.context, dsv_manager) key = manager.get_key(a_member) member_config = {"id": self.id, "shard_id": new_shard_id, "instance_type": "member", "replica_set_name": new_replica_set_name, "key": key} for i in range(1, num_members_per_shard + 1): instance_name = "%s-%s-%s" % (self.name, new_replica_set_name, str(i)) inst_models.Instance.create(self.context, instance_name, a_member.flavor_id, a_member.datastore_version.image_id, [], [], a_member.datastore, a_member.datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=member_config) self.update_db(task_status=ClusterTasks.ADDING_SHARD) manager.mongodb_add_shard_cluster( self.id, new_shard_id, new_replica_set_name) def grow(self, instances): """Extend a cluster by adding new instances. Currently only supports adding a replica set to the cluster. """ if not len(instances) > 0: raise exception.TroveError( _('Not instances specified for grow operation.') ) self._prep_resize() self._check_quotas(self.context, instances) query_routers, shards = self._group_instances(instances) for shard in shards: self._check_instances( self.context, shard, self.datastore_version, allowed_instance_count=[3] ) if query_routers: self._check_instances(self.context, query_routers, self.datastore_version) # all checks are done before any instances are created instance_ids = [] for shard in shards: instance_ids.extend(self._create_shard_instances(shard)) if query_routers: instance_ids.extend( self._create_query_router_instances(query_routers) ) self.update_db(task_status=ClusterTasks.GROWING_CLUSTER) self.manager.grow_cluster(self.id, instance_ids) def shrink(self, instance_ids): """Removes instances from a cluster. Currently only supports removing entire replica sets from the cluster. """ if not len(instance_ids) > 0: raise exception.TroveError( _('Not instances specified for grow operation.') ) self._prep_resize() all_member_ids = set([member.id for member in self.members]) all_query_router_ids = set([query_router.id for query_router in self.query_routers]) target_ids = set(instance_ids) target_member_ids = target_ids.intersection(all_member_ids) target_query_router_ids = target_ids.intersection(all_query_router_ids) target_configsvr_ids = target_ids.difference( target_member_ids.union(target_query_router_ids) ) if target_configsvr_ids: raise exception.ClusterShrinkInstanceInUse( id=list(target_configsvr_ids), reason="Cannot remove config servers." ) remaining_query_router_ids = all_query_router_ids.difference( target_query_router_ids ) if len(remaining_query_router_ids) < 1: raise exception.ClusterShrinkInstanceInUse( id=list(target_query_router_ids), reason="Cannot remove all remaining query routers. At least " "one query router must be available in the cluster." ) if target_member_ids: target_members = [member for member in self.members if member.id in target_member_ids] target_shards = {} for member in target_members: if member.shard_id in target_shards: target_shards[member.shard_id].append(member.id) else: target_shards[member.shard_id] = [member.id] for target_shard_id in target_shards.keys(): # check the whole shard is being deleted target_shard_member_ids = [ member.id for member in target_members if member.shard_id == target_shard_id ] all_shard_member_ids = [ member.id for member in self.members if member.shard_id == target_shard_id ] if set(target_shard_member_ids) != set(all_shard_member_ids): raise exception.TroveError( _('MongoDB cluster shrink only supports removing an ' 'entire shard. Shard %(shard)s has members: ' '%(instances)s') % {'shard': target_shard_id, 'instances': all_shard_member_ids} ) self._check_shard_status(target_shard_member_ids[0]) # all checks are done by now self.update_db(task_status=ClusterTasks.SHRINKING_CLUSTER) for instance_id in instance_ids: instance = inst_models.load_any_instance(self.context, instance_id) instance.delete() self.manager.shrink_cluster(self.id, instance_ids) def _create_instances(self, instances, cluster_config, default_name_tag, key=None): """Loop through the instances and create them in this cluster.""" cluster_config['id'] = self.id if CONF.get(self.datastore_version.manager).cluster_secure: if not key: key = self.get_guest(self.arbitrary_query_router).get_key() cluster_config['key'] = key instance_ids = [] for i, instance in enumerate(instances): name = instance.get('name', '%s-%s-%s' % ( self.name, default_name_tag, i + 1)) new_instance = inst_models.Instance.create( self.context, name, instance['flavor_id'], self.datastore_version.image_id, [], [], self.datastore, self.datastore_version, instance['volume_size'], None, availability_zone=instance.get('availability_zone', None), nics=instance.get('nics', None), cluster_config=cluster_config ) instance_ids.append(new_instance.id) return instance_ids def _create_shard_instances(self, instances, replica_set_name=None, key=None): """Create the instances for a new shard in the cluster.""" shard_id = utils.generate_uuid() if not replica_set_name: replica_set_name = self._gen_replica_set_name() cluster_config = {'shard_id': shard_id, 'instance_type': 'member', 'replica_set_name': replica_set_name} return self._create_instances(instances, cluster_config, replica_set_name, key=key) def _create_query_router_instances(self, instances, key=None): """Create the instances for the new query router.""" cluster_config = {'instance_type': 'query_router'} return self._create_instances(instances, cluster_config, 'mongos', key=key) def _prep_resize(self): """Get information about the cluster's current state.""" if self.db_info.task_status != ClusterTasks.NONE: current_task = self.db_info.task_status.name msg = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") % current_task LOG.error(msg) raise exception.UnprocessableEntity(msg) def _instances_of_type(instance_type): return [db_inst for db_inst in self.db_instances if db_inst.type == instance_type] self.config_svrs = _instances_of_type('config_server') self.query_routers = _instances_of_type('query_router') self.members = _instances_of_type('member') self.shard_ids = set([member.shard_id for member in self.members]) self.arbitrary_query_router = inst_models.load_any_instance( self.context, self.query_routers[0].id ) self.manager = task_api.load(self.context, self.datastore_version.manager) def _group_instances(self, instances): """Group the instances into logical sets (type, shard, etc).""" replicas = [] query_routers = [] for item in instances: if item['instance_type'] == 'replica': replica_requirements = ['related_to', 'name'] if not all(key in item for key in replica_requirements): raise exception.TroveError( _('Replica instance does not have required field(s) ' '%s.') % replica_requirements ) replicas.append(item) elif item['instance_type'] == 'query_router': query_routers.append(item) else: raise exception.TroveError( _('Instance type %s not supported for MongoDB cluster ' 'grow.') % item['instance_type'] ) return query_routers, self._group_shard_instances(replicas) def _group_shard_instances(self, instances): """Group the replica instances into shards.""" # Create the sets. Dictionary keys correspond to instance names. # Dictionary values are the same if related. sets = {} specified_names = [] for instance in instances: name = instance['name'] specified_names.append(name) if name in sets: sets[name].append(instance) else: sets[name] = [instance] if 'related_to' in instance: if instance['related_to'] == instance['name']: continue relative = instance['related_to'] if relative in sets: if sets[relative] is not sets[name]: sets[relative].extend(sets[name]) sets[name] = sets[relative] else: sets[relative] = sets[name] specified_names_set = set(specified_names) if len(specified_names) != len(specified_names_set): raise exception.TroveError( _('Duplicate member names not allowed.') ) unknown_relations = set(sets.keys()).difference((specified_names_set)) if unknown_relations: raise exception.TroveError( _('related_to target(s) %(targets)s do not match any ' 'specified names.') % {'targets': list(unknown_relations)} ) # reduce the set to unique values shards = [] for key in sets.keys(): exists = False for item in shards: if item is sets[key]: exists = True break if exists: continue shards.append(sets[key]) for shard in shards: flavor = None size = None for member in shard: if ((flavor and member['flavor_id'] != flavor) or ( size and member['volume_size'] != size)): raise exception.TroveError( _('Members of the same shard have mismatching ' 'flavorRef and/or volume values.') ) flavor = member['flavor_id'] size = member['volume_size'] return shards def _gen_replica_set_name(self): """Check the replica set names of all shards in the cluster to determine the next available name. Names are in the form 'rsX' where X is an integer. """ used_names = [] for shard_id in self.shard_ids: # query the guest for the replica name on one member of each shard members = [mem for mem in self.members if mem.shard_id == shard_id] member = inst_models.load_any_instance(self.context, members[0].id) used_names.append(self.get_guest(member).get_replica_set_name()) # find the first unused name i = 0 while True: i += 1 name = 'rs%s' % i if name not in used_names: return name def _check_shard_status(self, member_id): member = inst_models.load_any_instance(self.context, member_id) guest = self.get_guest(member) rs_name = guest.get_replica_set_name() if self.get_guest( self.arbitrary_query_router).is_shard_active(rs_name): raise exception.TroveError( _('Shard with instance %s is still active. Please remove the ' 'shard from the MongoDB cluster before shrinking.') % member_id ) @staticmethod def _check_quotas(context, instances): deltas = {'instances': len(instances), 'volumes': sum([instance['volume_size'] for instance in instances])} check_quotas(context.tenant, deltas) @staticmethod def _check_instances(context, instances, datastore_version, allowed_instance_count=None): instance_count = len(instances) if allowed_instance_count: if instance_count not in allowed_instance_count: raise exception.ClusterNumInstancesNotSupported( num_instances=allowed_instance_count ) flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) mongo_conf = CONF.get(datastore_version.manager) volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] if mongo_conf.volume_support: if len(volume_sizes) != instance_count: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) else: # TODO(amcreynolds): is ephemeral possible for mongodb clusters? if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = mongo_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) class MongoDbClusterView(ClusterView): def build_instances(self): return self._build_instances(['query_router'], ['member']) class MongoDbMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['query_router'], ['config_server', 'member', 'query_router']) trove-5.0.0/trove/common/strategies/cluster/experimental/cassandra/0000775000567000056710000000000012701410521026723 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/cassandra/guestagent.py0000664000567000056710000000730612701410316031453 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class CassandraGuestAgentStrategy(base.BaseGuestAgentStrategy): @property def guest_client_class(self): return CassandraGuestAgentAPI class CassandraGuestAgentAPI(guest_api.API): def get_data_center(self): LOG.debug("Retrieving the data center for node: %s" % self.id) return self._call("get_data_center", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def get_rack(self): LOG.debug("Retrieving the rack for node: %s" % self.id) return self._call("get_rack", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def set_seeds(self, seeds): LOG.debug("Configuring the gossip seeds for node: %s" % self.id) return self._call("set_seeds", guest_api.AGENT_LOW_TIMEOUT, self.version_cap, seeds=seeds) def get_seeds(self): LOG.debug("Retrieving the gossip seeds for node: %s" % self.id) return self._call("get_seeds", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def set_auto_bootstrap(self, enabled): LOG.debug("Setting the auto-bootstrap to '%s' for node: %s" % (enabled, self.id)) return self._call("set_auto_bootstrap", guest_api.AGENT_LOW_TIMEOUT, self.version_cap, enabled=enabled) def cluster_complete(self): LOG.debug("Sending a setup completion notification for node: %s" % self.id) return self._call("cluster_complete", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def node_cleanup_begin(self): LOG.debug("Signaling the node to prepare for cleanup: %s" % self.id) return self._call("node_cleanup_begin", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def node_cleanup(self): LOG.debug("Running cleanup on node: %s" % self.id) return self._cast('node_cleanup', self.version_cap) def node_decommission(self): LOG.debug("Decommission node: %s" % self.id) return self._cast("node_decommission", self.version_cap) def cluster_secure(self, password): LOG.debug("Securing the cluster via node: %s" % self.id) return self._call( "cluster_secure", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, password=password) def get_admin_credentials(self): LOG.debug("Retrieving the admin credentials from node: %s" % self.id) return self._call("get_admin_credentials", guest_api.AGENT_LOW_TIMEOUT, self.version_cap) def store_admin_credentials(self, admin_credentials): LOG.debug("Storing the admin credentials on node: %s" % self.id) return self._call("store_admin_credentials", guest_api.AGENT_LOW_TIMEOUT, self.version_cap, admin_credentials=admin_credentials) trove-5.0.0/trove/common/strategies/cluster/experimental/cassandra/taskmanager.py0000664000567000056710000003465712701410316031613 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common.strategies.cluster import base from trove.common import utils from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds. class CassandraTaskManagerStrategy(base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return CassandraTaskManagerAPI @property def task_manager_cluster_tasks_class(self): return CassandraClusterTasks class CassandraClusterTasks(task_models.ClusterTasks): def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s." % cluster_id) def _create_cluster(): cluster_node_ids = self.find_cluster_node_ids(cluster_id) # Wait for cluster nodes to get to cluster-ready status. LOG.debug("Waiting for all nodes to become ready.") if not self._all_instances_ready(cluster_node_ids, cluster_id): return cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) LOG.debug("All nodes ready, proceeding with cluster setup.") seeds = self.choose_seed_nodes(cluster_nodes) # Configure each cluster node with the list of seeds. # Once all nodes are configured, start the seed nodes one at a time # followed by the rest of the nodes. try: LOG.debug("Selected seed nodes: %s" % seeds) for node in cluster_nodes: LOG.debug("Configuring node: %s." % node['id']) node['guest'].set_seeds(seeds) node['guest'].set_auto_bootstrap(False) LOG.debug("Starting seed nodes.") for node in cluster_nodes: if node['ip'] in seeds: node['guest'].restart() node['guest'].set_auto_bootstrap(True) LOG.debug("All seeds running, starting remaining nodes.") for node in cluster_nodes: if node['ip'] not in seeds: node['guest'].restart() node['guest'].set_auto_bootstrap(True) # Create the in-database user via the first node. The remaining # nodes will replicate in-database changes automatically. # Only update the local authentication file on the other nodes. LOG.debug("Securing the cluster.") key = utils.generate_random_password() admin_creds = None for node in cluster_nodes: if admin_creds is None: admin_creds = node['guest'].cluster_secure(key) else: node['guest'].store_admin_credentials(admin_creds) node['guest'].cluster_complete() LOG.debug("Cluster configuration finished successfully.") except Exception: LOG.exception(_("Error creating cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s." % cluster_id) @classmethod def find_cluster_node_ids(cls, cluster_id): db_instances = DBInstance.find_all(cluster_id=cluster_id).all() return [db_instance.id for db_instance in db_instances] @classmethod def load_cluster_nodes(cls, context, node_ids): return [cls.build_node_info(Instance.load(context, node_id)) for node_id in node_ids] @classmethod def build_node_info(cls, instance): guest = cls.get_guest(instance) return {'instance': instance, 'guest': guest, 'id': instance.id, 'ip': cls.get_ip(instance), 'dc': guest.get_data_center(), 'rack': guest.get_rack()} @classmethod def choose_seed_nodes(cls, node_info): """Select gossip seeds. The seeds are cluster nodes from which any new/other cluster nodes request information on the cluster geometry. They should include at least one node from each data center and rack. Gossip optimization is not critical, but it is recommended to use a small seed list. Select one (random) node from each dc and rack. :param node_info: List of cluster nodes. :type node_info: list of dicts """ ips_by_affinity = cls._group_by_affinity(node_info) return {ips_by_affinity[dc][rack][0] for dc in ips_by_affinity for rack in ips_by_affinity[dc]} @classmethod def _group_by_affinity(cls, node_info): """Group node IPs by affinity to data center and rack.""" ips_by_affinity = dict() for node in node_info: ip = node['ip'] dc = node['dc'] rack = node['rack'] if dc in ips_by_affinity: dc_nodes = ips_by_affinity[dc] if rack in dc_nodes: rack_nodes = dc_nodes[rack] rack_nodes.append(ip) else: dc_nodes.update({rack: [ip]}) else: ips_by_affinity.update({dc: {rack: [ip]}}) return ips_by_affinity def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s." % cluster_id) def _grow_cluster(): # Wait for new nodes to get to cluster-ready status. LOG.debug("Waiting for new nodes to become ready.") if not self._all_instances_ready(new_instance_ids, cluster_id): return new_instances = [Instance.load(context, instance_id) for instance_id in new_instance_ids] added_nodes = [self.build_node_info(instance) for instance in new_instances] LOG.debug("All nodes ready, proceeding with cluster setup.") cluster_node_ids = self.find_cluster_node_ids(cluster_id) cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) # Recompute the seed nodes based on the updated cluster geometry. seeds = self.choose_seed_nodes(cluster_nodes) # Configure each cluster node with the updated list of seeds. # Since we are adding to an existing cluster, ensure that the # new nodes have auto-bootstrapping enabled. # Start the added nodes. try: LOG.debug("Selected seed nodes: %s" % seeds) # Update the seeds on all nodes. # Also retrieve the superuser password from one previously # existing node. admin_creds = None for node in cluster_nodes: LOG.debug("Configuring node: %s." % node['id']) node['guest'].set_seeds(seeds) if (admin_creds is None) and (node not in added_nodes): admin_creds = node['guest'].get_admin_credentials() # Start any seeds from the added nodes first. LOG.debug("Starting new seed nodes.") for node in added_nodes: if node['ip'] in seeds: node['guest'].set_auto_bootstrap(True) node['guest'].store_admin_credentials(admin_creds) node['guest'].restart() node['guest'].cluster_complete() LOG.debug("All new seeds running, starting the remaining of " "added nodes.") for node in added_nodes: if node['ip'] not in seeds: node['guest'].set_auto_bootstrap(True) node['guest'].store_admin_credentials(admin_creds) node['guest'].restart() node['guest'].cluster_complete() # Run nodetool cleanup on each of the previously existing nodes # to remove the keys that no longer belong to those nodes. # Wait for cleanup to complete on one node before running # it on the next node. LOG.debug("Cleaning up orphan data on old cluster nodes.") for node in cluster_nodes: if node not in added_nodes: nid = node['id'] node['guest'].node_cleanup_begin() node['guest'].node_cleanup() LOG.debug("Waiting for node to finish its " "cleanup: %s" % nid) if not self._all_instances_running([nid], cluster_id): LOG.warning(_("Node did not complete cleanup " "successfully: %s") % nid) LOG.debug("Cluster configuration finished successfully.") except Exception: LOG.exception(_("Error growing cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s." % cluster_id) def shrink_cluster(self, context, cluster_id, removal_ids): LOG.debug("Begin shrink_cluster for id: %s." % cluster_id) def _shrink_cluster(): cluster_node_ids = self.find_cluster_node_ids(cluster_id) cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) removed_nodes = CassandraClusterTasks.load_cluster_nodes( context, removal_ids) LOG.debug("All nodes ready, proceeding with cluster setup.") # Update the list of seeds on remaining nodes if necessary. # Once all nodes are configured, decommission the removed nodes. # Cassandra will stream data from decommissioned nodes to the # remaining ones. try: # All nodes should have the same seeds. # We retrieve current seeds from the first node. test_node = self.load_cluster_nodes( context, cluster_node_ids[:1])[0] current_seeds = test_node['guest'].get_seeds() # The seeds will have to be updated on all remaining instances # if any of the seed nodes is going to be removed. update_seeds = any(node['ip'] in current_seeds for node in removed_nodes) LOG.debug("Decommissioning removed nodes.") for node in removed_nodes: node['guest'].node_decommission() node['instance'].update_db(cluster_id=None) # Recompute the seed nodes based on the updated cluster # geometry if any of the existing seed nodes was removed. if update_seeds: LOG.debug("Updating seeds on the remaining nodes.") cluster_nodes = self.load_cluster_nodes( context, cluster_node_ids) remaining_nodes = [node for node in cluster_nodes if node not in removed_nodes] seeds = self.choose_seed_nodes(remaining_nodes) LOG.debug("Selected seed nodes: %s" % seeds) for node in remaining_nodes: LOG.debug("Configuring node: %s." % node['id']) node['guest'].set_seeds(seeds) # Wait for the removed nodes to go SHUTDOWN. LOG.debug("Waiting for all decommissioned nodes to shutdown.") if not self._all_instances_shutdown(removal_ids, cluster_id): # Now detached, failed nodes will stay available # in the list of standalone instances. return # Delete decommissioned instances only when the cluster is in a # consistent state. LOG.debug("Deleting decommissioned instances.") for node in removed_nodes: Instance.delete(node['instance']) LOG.debug("Cluster configuration finished successfully.") except Exception: LOG.exception(_("Error shrinking cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for shrinking cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End shrink_cluster for id: %s." % cluster_id) class CassandraTaskManagerAPI(task_api.API): pass trove-5.0.0/trove/common/strategies/cluster/experimental/cassandra/__init__.py0000664000567000056710000000000012701410316031024 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/cassandra/api.py0000664000567000056710000001644612701410316030063 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.cluster import models from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common.strategies.cluster import base from trove.common.strategies.cluster.experimental.cassandra.taskmanager import( CassandraClusterTasks) from trove.common import utils from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance import models as inst_models from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class CassandraAPIStrategy(base.BaseAPIStrategy): @property def cluster_class(self): return CassandraCluster @property def cluster_controller_actions(self): return { 'grow': self._action_grow_cluster, 'shrink': self._action_shrink_cluster } def _action_grow_cluster(self, cluster, body): nodes = body['grow'] instances = [] for node in nodes: instance = { 'flavor_id': utils.get_id_from_href(node['flavorRef']) } if 'name' in node: instance['name'] = node['name'] if 'volume' in node: instance['volume_size'] = int(node['volume']['size']) instances.append(instance) return cluster.grow(instances) def _action_shrink_cluster(self, cluster, body): nodes = body['shrink'] instance_ids = [node['id'] for node in nodes] return cluster.shrink(instance_ids) @property def cluster_view_class(self): return CassandraClusterView @property def mgmt_cluster_view_class(self): return CassandraMgmtClusterView class CassandraCluster(models.Cluster): DEFAULT_DATA_CENTER = "dc1" DEFAULT_RACK = "rack1" @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties): LOG.debug("Processing a request for creating a new cluster.") # Updating Cluster Task. db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances, extended_properties) # Calling taskmanager to further proceed for cluster-configuration. task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return CassandraCluster(context, db_info, datastore, datastore_version) @classmethod def _create_cluster_instances( cls, context, cluster_id, cluster_name, datastore, datastore_version, instances, extended_properties=None): LOG.debug("Processing a request for new cluster instances.") cassandra_conf = CONF.get(datastore_version.manager) eph_enabled = cassandra_conf.device_path vol_enabled = cassandra_conf.volume_support # Validate instance flavors. models.get_flavors_from_instance_defs(context, instances, vol_enabled, eph_enabled) # Compute the total volume allocation. req_volume_size = models.get_required_volume_size(instances, vol_enabled) # Check requirements against quota. num_new_instances = len(instances) deltas = {'instances': num_new_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) # Creating member instances. num_instances = len( CassandraClusterTasks.find_cluster_node_ids(cluster_id)) new_instances = [] for instance_idx, instance in enumerate(instances, num_instances + 1): instance_az = instance.get('availability_zone', None) member_config = {"id": cluster_id, "instance_type": "member", "dc": cls.DEFAULT_DATA_CENTER, "rack": instance_az or cls.DEFAULT_RACK} instance_name = instance.get('name') if not instance_name: instance_name = cls._build_instance_name( cluster_name, member_config['dc'], member_config['rack'], instance_idx) new_instance = inst_models.Instance.create( context, instance_name, instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance['volume_size'], None, nics=instance.get('nics', None), availability_zone=instance_az, configuration_id=None, cluster_config=member_config) new_instances.append(new_instance) return new_instances @classmethod def _build_instance_name(cls, cluster_name, dc, rack, instance_idx): return "%s-member-%s-%s-%d" % (cluster_name, dc, rack, instance_idx) def grow(self, instances): LOG.debug("Processing a request for growing cluster: %s" % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) new_instances = self._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return CassandraCluster(context, db_info, datastore, datastore_version) def shrink(self, removal_ids): LOG.debug("Processing a request for shrinking cluster: %s" % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) task_api.load(context, datastore_version.manager).shrink_cluster( db_info.id, removal_ids) return CassandraCluster(context, db_info, datastore, datastore_version) class CassandraClusterView(ClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) class CassandraMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) trove-5.0.0/trove/common/strategies/cluster/experimental/__init__.py0000664000567000056710000000000012701410316027065 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/redis/0000775000567000056710000000000012701410521026072 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/redis/guestagent.py0000664000567000056710000000453012701410316030616 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class RedisGuestAgentStrategy(base.BaseGuestAgentStrategy): @property def guest_client_class(self): return RedisGuestAgentAPI class RedisGuestAgentAPI(guest_api.API): def get_node_ip(self): LOG.debug("Retrieve ip info from node.") return self._call("get_node_ip", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap) def get_node_id_for_removal(self): LOG.debug("Validating cluster node removal.") return self._call("get_node_id_for_removal", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap) def remove_nodes(self, node_ids): LOG.debug("Removing nodes from cluster.") return self._call("remove_nodes", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, node_ids=node_ids) def cluster_meet(self, ip, port): LOG.debug("Joining node to cluster.") return self._call("cluster_meet", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, ip=ip, port=port) def cluster_addslots(self, first_slot, last_slot): LOG.debug("Adding slots %s-%s to cluster.", first_slot, last_slot) return self._call("cluster_addslots", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, first_slot=first_slot, last_slot=last_slot) def cluster_complete(self): LOG.debug("Notifying cluster install completion.") return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap) trove-5.0.0/trove/common/strategies/cluster/experimental/redis/taskmanager.py0000664000567000056710000001334712701410316030753 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.exception import TroveError from trove.common.i18n import _ from trove.common.strategies.cluster import base from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds. class RedisTaskManagerStrategy(base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return RedisTaskManagerAPI @property def task_manager_cluster_tasks_class(self): return RedisClusterTasks class RedisClusterTasks(task_models.ClusterTasks): def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s." % cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") instances = [Instance.load(context, instance_id) for instance_id in instance_ids] # Connect nodes to the first node guests = [self.get_guest(instance) for instance in instances] try: cluster_head = instances[0] cluster_head_port = '6379' cluster_head_ip = self.get_ip(cluster_head) for guest in guests[1:]: guest.cluster_meet(cluster_head_ip, cluster_head_port) num_nodes = len(instances) total_slots = 16384 slots_per_node = total_slots / num_nodes leftover_slots = total_slots % num_nodes first_slot = 0 for guest in guests: last_slot = first_slot + slots_per_node if leftover_slots > 0: leftover_slots -= 1 else: last_slot -= 1 guest.cluster_addslots(first_slot, last_slot) first_slot = last_slot + 1 for guest in guests: guest.cluster_complete() except Exception: LOG.exception(_("Error creating cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s." % cluster_id) def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s." % cluster_id) def _grow_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id).all() cluster_head = next(Instance.load(context, db_inst.id) for db_inst in db_instances if db_inst.id not in new_instance_ids) if not cluster_head: raise TroveError("Unable to determine existing Redis cluster " "member") (cluster_head_ip, cluster_head_port) = ( self.get_guest(cluster_head).get_node_ip()) # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") new_insts = [Instance.load(context, instance_id) for instance_id in new_instance_ids] new_guests = map(self.get_guest, new_insts) # Connect nodes to the cluster head for guest in new_guests: guest.cluster_meet(cluster_head_ip, cluster_head_port) for guest in new_guests: guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) except Exception: LOG.exception(_("Error growing cluster %s.") % cluster_id) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s." % cluster_id) class RedisTaskManagerAPI(task_api.API): pass trove-5.0.0/trove/common/strategies/cluster/experimental/redis/__init__.py0000664000567000056710000000000012701410316030173 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/redis/api.py0000664000567000056710000001745612701410316027234 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.cluster import models from trove.cluster.models import Cluster from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.strategies.cluster import base from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance import models as inst_models from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class RedisAPIStrategy(base.BaseAPIStrategy): @property def cluster_class(self): return RedisCluster @property def cluster_view_class(self): return RedisClusterView @property def mgmt_cluster_view_class(self): return RedisMgmtClusterView class RedisCluster(models.Cluster): @staticmethod def _create_instances(context, db_info, datastore, datastore_version, instances): Redis_conf = CONF.get(datastore_version.manager) num_instances = len(instances) total_volume_allocation = 0 # Validate and Cache flavors nova_client = remote.create_nova_client(context) unique_flavors = set(map(lambda i: i['flavor_id'], instances)) flavor_cache = {} for fid in unique_flavors: try: flavor_cache.update({fid: nova_client.flavors.get(fid)}) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=fid) # Checking volumes name_index = 1 for instance in instances: if not instance.get('name'): instance['name'] = "%s-member-%s" % (db_info.name, name_index) name_index += 1 volume_size = instance.get('volume_size') if Redis_conf.volume_support: models.validate_volume_size(volume_size) total_volume_allocation += volume_size else: if volume_size: raise exception.VolumeNotSupported() ephemeral_support = Redis_conf.device_path flavor_id = instance['flavor_id'] flavor = flavor_cache[flavor_id] if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) # Check quotas quota_request = {'instances': num_instances, 'volumes': total_volume_allocation} check_quotas(context.tenant, quota_request) # Creating member instances return map(lambda instance: inst_models.Instance.create(context, instance['name'], instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance.get('volume_size'), None, instance.get( 'availability_zone', None), instance.get('nics', None), configuration_id=None, cluster_config={ "id": db_info.id, "instance_type": "member"} ), instances) @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties): LOG.debug("Initiating cluster creation.") # Updating Cluster Task db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_instances(context, db_info, datastore, datastore_version, instances) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return RedisCluster(context, db_info, datastore, datastore_version) def grow(self, instances): LOG.debug("Growing cluster.") self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) new_instances = self._create_instances(context, db_info, datastore, datastore_version, instances) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return RedisCluster(context, db_info, datastore, datastore_version) def shrink(self, removal_ids): LOG.debug("Shrinking cluster %s.", self.id) self.validate_cluster_available() cluster_info = self.db_info cluster_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) try: removal_insts = [inst_models.Instance.load(self.context, inst_id) for inst_id in removal_ids] node_ids = [] error_ids = [] for instance in removal_insts: node_id = Cluster.get_guest(instance).get_node_id_for_removal() if node_id: node_ids.append(node_id) else: error_ids.append(instance.id) if error_ids: raise exception.ClusterShrinkInstanceInUse( id=error_ids, reason="Nodes cannot be removed. Check slots." ) all_instances = ( inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False).all()) remain_insts = [inst_models.Instance.load(self.context, inst.id) for inst in all_instances if inst.id not in removal_ids] map(lambda x: Cluster.get_guest(x).remove_nodes(node_ids), remain_insts) map(lambda x: x.update_db(cluster_id=None), removal_insts) map(inst_models.Instance.delete, removal_insts) return RedisCluster(self.context, cluster_info, self.ds, self.ds_version) finally: cluster_info.update(task_status=ClusterTasks.NONE) class RedisClusterView(ClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) class RedisMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) trove-5.0.0/trove/common/strategies/cluster/experimental/vertica/0000775000567000056710000000000012701410521026421 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/vertica/guestagent.py0000664000567000056710000000524612701410316031152 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class VerticaGuestAgentStrategy(base.BaseGuestAgentStrategy): @property def guest_client_class(self): return VerticaGuestAgentAPI class VerticaGuestAgentAPI(guest_api.API): def get_public_keys(self, user): LOG.debug("Getting public keys for user: %s." % user) return self._call("get_public_keys", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, user=user) def authorize_public_keys(self, user, public_keys): LOG.debug("Authorizing public keys for user: %s." % user) return self._call("authorize_public_keys", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, user=user, public_keys=public_keys) def install_cluster(self, members): LOG.debug("Installing Vertica cluster on members: %s." % members) return self._call("install_cluster", CONF.cluster_usage_timeout, self.version_cap, members=members) def grow_cluster(self, members): LOG.debug("Growing Vertica cluster with members: %s." % members) return self._call("grow_cluster", CONF.cluster_usage_timeout, self.version_cap, members=members) def shrink_cluster(self, members): LOG.debug("Shrinking Vertica cluster with members: %s." % members) return self._call("shrink_cluster", CONF.cluster_usage_timeout, self.version_cap, members=members) def mark_design_ksafe(self, k): LOG.debug("Setting vertica k-safety level to : %s." % k) return self._call("mark_design_ksafe", CONF.cluster_usage_timeout, self.version_cap, k=k) def cluster_complete(self): LOG.debug("Notifying cluster install completion.") return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap) trove-5.0.0/trove/common/strategies/cluster/experimental/vertica/taskmanager.py0000664000567000056710000002210112701410316031266 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common.strategies.cluster import base from trove.common.strategies.cluster.experimental.vertica.api import \ VerticaCluster from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds. class VerticaTaskManagerStrategy(base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return VerticaTaskManagerAPI @property def task_manager_cluster_tasks_class(self): return VerticaClusterTasks class VerticaClusterTasks(task_models.ClusterTasks): def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s." % cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") instances = [Instance.load(context, instance_id) for instance_id in instance_ids] member_ips = [self.get_ip(instance) for instance in instances] guests = [self.get_guest(instance) for instance in instances] # Users to be configured for password-less SSH. authorized_users_without_password = ['root', 'dbadmin'] # Configuring password-less SSH for cluster members. # Strategy for setting up SSH: # get public keys for user from member-instances in cluster, # combine them, finally push it back to all instances, # and member instances add them to authorized keys. LOG.debug("Configuring password-less SSH on cluster members.") try: for user in authorized_users_without_password: pub_key = [guest.get_public_keys(user) for guest in guests] for guest in guests: guest.authorize_public_keys(user, pub_key) LOG.debug("Installing cluster with members: %s." % member_ips) for db_instance in db_instances: if db_instance['type'] == 'master': master_instance = Instance.load(context, db_instance.id) self.get_guest(master_instance).install_cluster( member_ips) break LOG.debug("Finalizing cluster configuration.") for guest in guests: guest.cluster_complete() except Exception: LOG.exception(_("Error creating cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s." % cluster_id) def grow_cluster(self, context, cluster_id, new_instance_ids): def _grow_cluster(): LOG.debug("begin grow_cluster for Vertica cluster %s" % cluster_id) db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for new cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): return new_insts = [Instance.load(context, instance_id) for instance_id in new_instance_ids] existing_instances = [Instance.load(context, instance_id) for instance_id in instance_ids if instance_id not in new_instance_ids] existing_guests = [self.get_guest(i) for i in existing_instances] new_guests = [self.get_guest(i) for i in new_insts] all_guests = new_guests + existing_guests authorized_users_without_password = ['root', 'dbadmin'] new_ips = [self.get_ip(instance) for instance in new_insts] for user in authorized_users_without_password: pub_key = [guest.get_public_keys(user) for guest in all_guests] for guest in all_guests: guest.authorize_public_keys(user, pub_key) for db_instance in db_instances: if db_instance['type'] == 'master': LOG.debug("Found 'master' instance, calling grow on guest") master_instance = Instance.load(context, db_instance.id) self.get_guest(master_instance).grow_cluster(new_ips) break for guest in new_guests: guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) except Exception: LOG.exception(_("Error growing cluster %s.") % cluster_id) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() def shrink_cluster(self, context, cluster_id, instance_ids): def _shrink_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() all_instance_ids = [db_instance.id for db_instance in db_instances] remove_instances = [Instance.load(context, instance_id) for instance_id in instance_ids] left_instances = [Instance.load(context, instance_id) for instance_id in all_instance_ids if instance_id not in instance_ids] remove_member_ips = [self.get_ip(instance) for instance in remove_instances] k = VerticaCluster.k_safety(len(left_instances)) for db_instance in db_instances: if db_instance['type'] == 'master': master_instance = Instance.load(context, db_instance.id) if self.get_ip(master_instance) in remove_member_ips: raise RuntimeError(_("Cannot remove master instance!")) LOG.debug(_("Marking cluster k-safety: %s") % k) self.get_guest(master_instance).mark_design_ksafe(k) self.get_guest(master_instance).shrink_cluster( remove_member_ips) break for r in remove_instances: Instance.delete(r) timeout = Timeout(CONF.cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise LOG.exception(_("Timeout for shrinking cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end shrink_cluster for Vertica cluster id %s" % self.id) class VerticaTaskManagerAPI(task_api.API): def _cast(self, method_name, version, **kwargs): LOG.debug("Casting %s" % method_name) cctxt = self.client.prepare(version=version) cctxt.cast(self.context, method_name, **kwargs) trove-5.0.0/trove/common/strategies/cluster/experimental/vertica/__init__.py0000664000567000056710000000000012701410316030522 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/vertica/api.py0000664000567000056710000002261512701410316027554 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.cluster import models from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.strategies.cluster import base from trove.common import utils from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance import models as inst_models from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class VerticaAPIStrategy(base.BaseAPIStrategy): @property def cluster_class(self): return VerticaCluster def _action_grow(self, cluster, body): nodes = body['grow'] instances = [] for node in nodes: instance = { 'flavor_id': utils.get_id_from_href(node['flavorRef']) } if 'name' in node: instance['name'] = node['name'] if 'volume' in node: instance['volume_size'] = int(node['volume']['size']) instances.append(instance) return cluster.grow(instances) def _action_shrink(self, cluster, body): nodes = body['shrink'] instance_ids = [node['id'] for node in nodes] return cluster.shrink(instance_ids) @property def cluster_view_class(self): return VerticaClusterView @property def mgmt_cluster_view_class(self): return VerticaMgmtClusterView class VerticaCluster(models.Cluster): @staticmethod def _create_instances(context, db_info, datastore, datastore_version, instances, new_cluster): vertica_conf = CONF.get(datastore_version.manager) num_instances = len(instances) existing = inst_models.DBInstance.find_all(cluster_id=db_info.id).all() num_existing = len(existing) # Matching number of instances with configured cluster_member_count if new_cluster \ and num_instances != vertica_conf.cluster_member_count: raise exception.ClusterNumInstancesNotSupported( num_instances=vertica_conf.cluster_member_count) # Checking flavors flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) deltas = {'instances': num_instances} # Checking volumes volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] volume_size = None if vertica_conf.volume_support: if len(volume_sizes) != num_instances: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) deltas['volumes'] = volume_size * num_instances else: if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = vertica_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) check_quotas(context.tenant, deltas) nics = [instance.get('nics', None) for instance in instances] azs = [instance.get('availability_zone', None) for instance in instances] # Creating member instances minstances = [] for i in range(0, num_instances): if i == 0 and new_cluster: member_config = {"id": db_info.id, "instance_type": "master"} else: member_config = {"id": db_info.id, "instance_type": "member"} instance_name = "%s-member-%s" % (db_info.name, str(i + num_existing + 1)) minstances.append( inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, nics=nics[i], availability_zone=azs[i], configuration_id=None, cluster_config=member_config) ) return minstances @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties): LOG.debug("Initiating cluster creation.") vertica_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Matching number of instances with configured cluster_member_count if num_instances != vertica_conf.cluster_member_count: raise exception.ClusterNumInstancesNotSupported( num_instances=vertica_conf.cluster_member_count) db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_instances(context, db_info, datastore, datastore_version, instances, new_cluster=True) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return VerticaCluster(context, db_info, datastore, datastore_version) @staticmethod def k_safety(n): """ Vertica defines k-safety values of 0, 1 or 2: https://my.vertica.com/docs/7.1.x/HTML/Content/Authoring/Glossary/ K-Safety.htm """ if n < 3: return 0 elif n < 5: return 1 else: return 2 def grow(self, instances): LOG.debug("Growing cluster.") self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) new_instances = self._create_instances(context, db_info, datastore, datastore_version, instances, new_cluster=False) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return VerticaCluster(context, db_info, datastore, datastore_version) def shrink(self, instance_ids): self.validate_cluster_available() context = self.context db_info = self.db_info datastore_version = self.ds_version for db_instance in self.db_instances: if db_instance.type == 'master': if db_instance.id in instance_ids: raise exception.ClusterShrinkInstanceInUse( id=db_instance.id, reason="Cannot remove master node." ) all_instance_ids = [db_instance.id for db_instance in self.db_instances] left_instances = [instance_id for instance_id in all_instance_ids if instance_id not in instance_ids] k = self.k_safety(len(left_instances)) vertica_conf = CONF.get(datastore_version.manager) if k < vertica_conf.min_ksafety: raise exception.ClusterNumInstancesBelowSafetyThreshold() db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) task_api.load(context, datastore_version.manager).shrink_cluster( self.db_info.id, instance_ids) return VerticaCluster(self.context, db_info, self.ds, self.ds_version) class VerticaClusterView(ClusterView): def build_instances(self): return self._build_instances(['member', 'master'], ['member', 'master']) class VerticaMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['member', 'master'], ['member', 'master']) trove-5.0.0/trove/common/strategies/cluster/experimental/galera_common/0000775000567000056710000000000012701410521027567 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/galera_common/guestagent.py0000664000567000056710000000531012701410316032310 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Copyright 2016 Tesora Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base as cluster_base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class GaleraCommonGuestAgentStrategy(cluster_base.BaseGuestAgentStrategy): @property def guest_client_class(self): return GaleraCommonGuestAgentAPI class GaleraCommonGuestAgentAPI(guest_api.API): def install_cluster(self, replication_user, cluster_configuration, bootstrap): """Install the cluster.""" LOG.debug("Installing Galera cluster.") self._call("install_cluster", CONF.cluster_usage_timeout, self.version_cap, replication_user=replication_user, cluster_configuration=cluster_configuration, bootstrap=bootstrap) def reset_admin_password(self, admin_password): """Store this password on the instance as the admin password.""" self._call("reset_admin_password", CONF.cluster_usage_timeout, self.version_cap, admin_password=admin_password) def cluster_complete(self): """Set the status that the cluster is build is complete.""" LOG.debug("Notifying cluster install completion.") return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap) def get_cluster_context(self): """Get the context of the cluster.""" LOG.debug("Getting the cluster context.") return self._call("get_cluster_context", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap) def write_cluster_configuration_overrides(self, cluster_configuration): """Write an updated the cluster configuration.""" LOG.debug("Writing an updated the cluster configuration.") self._call("write_cluster_configuration_overrides", guest_api.AGENT_HIGH_TIMEOUT, self.version_cap, cluster_configuration=cluster_configuration) trove-5.0.0/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py0000664000567000056710000003377012701410316032452 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Copyright 2016 Tesora Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.exception import PollTimeOut from trove.common.exception import TroveError from trove.common.i18n import _ from trove.common.remote import create_nova_client from trove.common.strategies.cluster import base as cluster_base from trove.common.template import ClusterConfigTemplate from trove.common import utils from trove.extensions.common import models as ext_models from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance import tasks as inst_tasks from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class GaleraCommonTaskManagerStrategy(cluster_base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return task_api.API @property def task_manager_cluster_tasks_class(self): return GaleraCommonClusterTasks class GaleraCommonClusterTasks(task_models.ClusterTasks): CLUSTER_REPLICATION_USER = "clusterrepuser" def _render_cluster_config(self, context, instance, cluster_ips, cluster_name, replication_user): client = create_nova_client(context) flavor = client.flavors.get(instance.flavor_id) instance_ip = self.get_ip(instance) config = ClusterConfigTemplate( self.datastore_version, flavor, instance.id) replication_user_pass = "%(name)s:%(password)s" % replication_user config_rendered = config.render( replication_user_pass=replication_user_pass, cluster_ips=cluster_ips, cluster_name=cluster_name, instance_ip=instance_ip, instance_name=instance.name, ) return config_rendered def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s." % cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("Waiting for instances to get to cluster-ready status.") # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): raise TroveError("Instances in cluster did not report ACTIVE") LOG.debug("All members ready, proceeding for cluster setup.") instances = [Instance.load(context, instance_id) for instance_id in instance_ids] cluster_ips = [self.get_ip(instance) for instance in instances] instance_guests = [self.get_guest(instance) for instance in instances] # Create replication user and password for synchronizing the # galera cluster replication_user = { "name": self.CLUSTER_REPLICATION_USER, "password": utils.generate_random_password(), } # Galera cluster name must be unique and be shorter than a full # uuid string so we remove the hyphens and chop it off. It was # recommended to be 16 chars or less. # (this is not currently documented on Galera docs) cluster_name = utils.generate_uuid().replace("-", "")[:16] LOG.debug("Configuring cluster configuration.") try: # Set the admin password for all the instances because the # password in the my.cnf will be wrong after the joiner # instances syncs with the donor instance. admin_password = str(utils.generate_random_password()) for guest in instance_guests: guest.reset_admin_password(admin_password) bootstrap = True for instance in instances: guest = self.get_guest(instance) # render the conf.d/cluster.cnf configuration cluster_configuration = self._render_cluster_config( context, instance, ",".join(cluster_ips), cluster_name, replication_user) # push the cluster config and bootstrap the first instance guest.install_cluster(replication_user, cluster_configuration, bootstrap) bootstrap = False LOG.debug("Finalizing cluster configuration.") for guest in instance_guests: guest.cluster_complete() except Exception: LOG.exception(_("Error creating cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) except TroveError: LOG.exception(_("Error creating cluster %s.") % cluster_id) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s." % cluster_id) def _check_cluster_for_root(self, context, existing_instances, new_instances): """Check for existing instances root enabled""" for instance in existing_instances: if ext_models.Root.load(context, instance.id): for new_instance in new_instances: ext_models.RootHistory.create(context, new_instance.id, context.user) return def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin Galera grow_cluster for id: %s." % cluster_id) def _grow_cluster(): db_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() existing_instances = [Instance.load(context, db_inst.id) for db_inst in db_instances if db_inst.id not in new_instance_ids] if not existing_instances: raise TroveError("Unable to determine existing cluster " "member(s)") # get list of ips of existing cluster members existing_cluster_ips = [self.get_ip(instance) for instance in existing_instances] existing_instance_guests = [self.get_guest(instance) for instance in existing_instances] # get the cluster context to setup new members cluster_context = existing_instance_guests[0].get_cluster_context() # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): raise TroveError("Instances in cluster did not report ACTIVE") LOG.debug("All members ready, proceeding for cluster setup.") # Get the new instances to join the cluster new_instances = [Instance.load(context, instance_id) for instance_id in new_instance_ids] new_cluster_ips = [self.get_ip(instance) for instance in new_instances] for instance in new_instances: guest = self.get_guest(instance) guest.reset_admin_password(cluster_context['admin_password']) # render the conf.d/cluster.cnf configuration cluster_configuration = self._render_cluster_config( context, instance, ",".join(existing_cluster_ips), cluster_context['cluster_name'], cluster_context['replication_user']) # push the cluster config and bootstrap the first instance bootstrap = False guest.install_cluster(cluster_context['replication_user'], cluster_configuration, bootstrap) self._check_cluster_for_root(context, existing_instances, new_instances) # apply the new config to all instances for instance in existing_instances + new_instances: guest = self.get_guest(instance) # render the conf.d/cluster.cnf configuration cluster_configuration = self._render_cluster_config( context, instance, ",".join(existing_cluster_ips + new_cluster_ips), cluster_context['cluster_name'], cluster_context['replication_user']) guest.write_cluster_configuration_overrides( cluster_configuration) for instance in new_instances: guest = self.get_guest(instance) guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) except Exception: LOG.exception(_("Error growing cluster %s.") % cluster_id) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s." % cluster_id) def shrink_cluster(self, context, cluster_id, removal_instance_ids): LOG.debug("Begin Galera shrink_cluster for id: %s." % cluster_id) def _shrink_cluster(): removal_instances = [Instance.load(context, instance_id) for instance_id in removal_instance_ids] for instance in removal_instances: Instance.delete(instance) # wait for instances to be deleted def all_instances_marked_deleted(): non_deleted_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() non_deleted_ids = [db_instance.id for db_instance in non_deleted_instances] return not bool( set(removal_instance_ids).intersection( set(non_deleted_ids)) ) try: LOG.info(_("Deleting instances (%s)") % removal_instance_ids) utils.poll_until(all_instances_marked_deleted, sleep_time=2, time_out=CONF.cluster_delete_time_out) except PollTimeOut: LOG.error(_("timeout for instances to be marked as deleted.")) return db_instances = DBInstance.find_all(cluster_id=cluster_id).all() leftover_instances = [Instance.load(context, db_inst.id) for db_inst in db_instances if db_inst.id not in removal_instance_ids] leftover_cluster_ips = [self.get_ip(instance) for instance in leftover_instances] # Get config changes for left over instances rnd_cluster_guest = self.get_guest(leftover_instances[0]) cluster_context = rnd_cluster_guest.get_cluster_context() # apply the new config to all leftover instances for instance in leftover_instances: guest = self.get_guest(instance) # render the conf.d/cluster.cnf configuration cluster_configuration = self._render_cluster_config( context, instance, ",".join(leftover_cluster_ips), cluster_context['cluster_name'], cluster_context['replication_user']) guest.write_cluster_configuration_overrides( cluster_configuration) timeout = Timeout(CONF.cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for shrinking cluster.")) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) except Exception: LOG.exception(_("Error shrinking cluster %s.") % cluster_id) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) finally: timeout.cancel() LOG.debug("End shrink_cluster for id: %s." % cluster_id) trove-5.0.0/trove/common/strategies/cluster/experimental/galera_common/__init__.py0000664000567000056710000000000012701410316031670 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/cluster/experimental/galera_common/api.py0000664000567000056710000002207012701410316030715 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Copyright 2016 Tesora Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.cluster import models as cluster_models from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import exception from trove.common import remote from trove.common.strategies.cluster import base as cluster_base from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class GaleraCommonAPIStrategy(cluster_base.BaseAPIStrategy): @property def cluster_class(self): return GaleraCommonCluster @property def cluster_view_class(self): return GaleraCommonClusterView @property def mgmt_cluster_view_class(self): return GaleraCommonMgmtClusterView class GaleraCommonCluster(cluster_models.Cluster): @staticmethod def _validate_cluster_instances(context, instances, datastore, datastore_version): """Validate the flavor and volume""" ds_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Check number of instances is at least min_cluster_member_count if num_instances < ds_conf.min_cluster_member_count: raise exception.ClusterNumInstancesNotLargeEnough( num_instances=ds_conf.min_cluster_member_count) # Checking flavors and get delta for quota check flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) deltas = {'instances': num_instances} # Checking volumes and get delta for quota check volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] volume_size = None if ds_conf.volume_support: if len(volume_sizes) != num_instances: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] cluster_models.validate_volume_size(volume_size) deltas['volumes'] = volume_size * num_instances else: if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = ds_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) # quota check check_quotas(context.tenant, deltas) # Checking networks are same for the cluster instance_nics = [instance.get('nics', None) for instance in instances] if len(set(instance_nics)) != 1: raise exception.ClusterNetworksNotEqual() instance_nic = instance_nics[0] if instance_nic is None: return try: nova_client.networks.get(instance_nic) except nova_exceptions.NotFound: raise exception.NetworkNotFound(uuid=instance_nic) @staticmethod def _create_instances(context, db_info, datastore, datastore_version, instances): member_config = {"id": db_info.id, "instance_type": "member"} name_index = 1 for instance in instances: if not instance.get("name"): instance['name'] = "%s-member-%s" % (db_info.name, str(name_index)) name_index += 1 return map(lambda instance: Instance.create(context, instance['name'], instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance.get('volume_size', None), None, availability_zone=instance.get( 'availability_zone', None), nics=instance.get('nics', None), configuration_id=None, cluster_config=member_config ), instances) @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties): LOG.debug("Initiating Galera cluster creation.") cls._validate_cluster_instances(context, instances, datastore, datastore_version) # Updating Cluster Task db_info = cluster_models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_instances(context, db_info, datastore, datastore_version, instances) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return cls(context, db_info, datastore, datastore_version) def _get_cluster_network_interfaces(self): nova_client = remote.create_nova_client(self.context) nova_instance_id = self.db_instances[0].compute_instance_id interfaces = nova_client.virtual_interfaces.list(nova_instance_id) ret = [{"net-id": getattr(interface, 'net_id')} for interface in interfaces] return ret def grow(self, instances): LOG.debug("Growing cluster %s." % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) try: # Get the network of the existing cluster instances. interface_ids = self._get_cluster_network_interfaces() for instance in instances: instance["nics"] = interface_ids new_instances = self._create_instances( context, db_info, datastore, datastore_version, instances) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) except Exception: db_info.update(task_status=ClusterTasks.NONE) return self.__class__(context, db_info, datastore, datastore_version) def shrink(self, instances): """Removes instances from a cluster.""" LOG.debug("Shrinking cluster %s." % self.id) self.validate_cluster_available() removal_instances = [Instance.load(self.context, inst_id) for inst_id in instances] db_instances = DBInstance.find_all(cluster_id=self.db_info.id).all() if len(db_instances) - len(removal_instances) < 1: raise exception.ClusterShrinkMustNotLeaveClusterEmpty() self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) try: task_api.load(self.context, self.ds_version.manager ).shrink_cluster(self.db_info.id, [instance.id for instance in removal_instances]) except Exception: self.db_info.update(task_status=ClusterTasks.NONE) return self.__class__(self.context, self.db_info, self.ds, self.ds_version) class GaleraCommonClusterView(ClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) class GaleraCommonMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) trove-5.0.0/trove/common/strategies/cluster/strategy.py0000664000567000056710000000312612701410316024507 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from trove.common import cfg from trove.common.utils import import_class CONF = cfg.CONF LOG = logging.getLogger(__name__) def load_api_strategy(manager): clazz = CONF.get(manager).get('api_strategy') LOG.debug("Loading class %s" % clazz) api_strategy = import_class(clazz) return api_strategy() def load_taskmanager_strategy(manager): try: clazz = CONF.get(manager).get('taskmanager_strategy') LOG.debug("Loading class %s" % clazz) taskmanager_strategy = import_class(clazz) return taskmanager_strategy() except NoSuchOptError: return None def load_guestagent_strategy(manager): try: clazz = CONF.get(manager).get('guestagent_strategy') LOG.debug("Loading class %s" % clazz) guestagent_strategy = import_class(clazz) return guestagent_strategy() except NoSuchOptError: return None trove-5.0.0/trove/common/strategies/__init__.py0000664000567000056710000000000012701410316022707 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/storage/0000775000567000056710000000000012701410521022252 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/storage/__init__.py0000664000567000056710000000167712701410316024400 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common.strategies.strategy import Strategy LOG = logging.getLogger(__name__) def get_storage_strategy(storage_driver, ns=__name__): LOG.debug("Getting storage strategy: %s." % storage_driver) return Strategy.get_strategy(storage_driver, ns) trove-5.0.0/trove/common/strategies/storage/base.py0000664000567000056710000000275312701410316023547 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from trove.common.strategies.strategy import Strategy class Storage(Strategy): """Base class for Storage Strategy implementation.""" __strategy_type__ = 'storage' __strategy_ns__ = 'trove.common.strategies.storage' def __init__(self, context): self.context = context super(Storage, self).__init__() @abc.abstractmethod def save(self, filename, stream): """Persist information from the stream.""" @abc.abstractmethod def load(self, location, backup_checksum): """Load a stream from a persisted storage location.""" @abc.abstractmethod def load_metadata(self, location, backup_checksum): """Load metadata for a persisted object.""" @abc.abstractmethod def save_metadata(self, location, metadata={}): """Save metadata for a persisted object.""" trove-5.0.0/trove/common/strategies/storage/swift.py0000664000567000056710000002145312701410316023767 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import hashlib from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common.remote import create_swift_client from trove.common.strategies.storage import base LOG = logging.getLogger(__name__) CONF = cfg.CONF CHUNK_SIZE = CONF.backup_chunk_size MAX_FILE_SIZE = CONF.backup_segment_max_size BACKUP_CONTAINER = CONF.backup_swift_container class DownloadError(Exception): """Error running the Swift Download Command.""" class SwiftDownloadIntegrityError(Exception): """Integrity error while running the Swift Download Command.""" class StreamReader(object): """Wrap the stream from the backup process and chunk it into segements.""" def __init__(self, stream, filename, max_file_size=MAX_FILE_SIZE): self.stream = stream self.filename = filename self.container = BACKUP_CONTAINER self.max_file_size = max_file_size self.segment_length = 0 self.process = None self.file_number = 0 self.end_of_file = False self.end_of_segment = False self.segment_checksum = hashlib.md5() @property def base_filename(self): """Filename with extensions removed.""" return self.filename.split('.')[0] @property def segment(self): return '%s_%08d' % (self.base_filename, self.file_number) @property def prefix(self): return '%s/%s_' % (self.container, self.base_filename) def read(self, chunk_size=CHUNK_SIZE): if self.end_of_segment: self.segment_length = 0 self.segment_checksum = hashlib.md5() self.end_of_segment = False # Upload to a new file if we are starting or too large if self.segment_length > (self.max_file_size - chunk_size): self.file_number += 1 self.end_of_segment = True return '' chunk = self.stream.read(chunk_size) if not chunk: self.end_of_file = True return '' self.segment_checksum.update(chunk) self.segment_length += len(chunk) return chunk class SwiftStorage(base.Storage): """Implementation of Storage Strategy for Swift.""" __strategy_name__ = 'swift' def __init__(self, *args, **kwargs): super(SwiftStorage, self).__init__(*args, **kwargs) self.connection = create_swift_client(self.context) def save(self, filename, stream): """Persist information from the stream to swift. The file is saved to the location /. The filename is defined on the backup runner manifest property which is typically in the format '..gz' """ # Create the container if it doesn't already exist self.connection.put_container(BACKUP_CONTAINER) # Swift Checksum is the checksum of the concatenated segment checksums swift_checksum = hashlib.md5() # Wrap the output of the backup process to segment it for swift stream_reader = StreamReader(stream, filename) url = self.connection.url # Full location where the backup manifest is stored location = "%s/%s/%s" % (url, BACKUP_CONTAINER, filename) # Read from the stream and write to the container in swift while not stream_reader.end_of_file: etag = self.connection.put_object(BACKUP_CONTAINER, stream_reader.segment, stream_reader) segment_checksum = stream_reader.segment_checksum.hexdigest() # Check each segment MD5 hash against swift etag # Raise an error and mark backup as failed if etag != segment_checksum: LOG.error(_("Error saving data segment to swift. " "ETAG: %(tag)s Segment MD5: %(checksum)s."), {'tag': etag, 'checksum': segment_checksum}) return False, "Error saving data to Swift!", None, location swift_checksum.update(segment_checksum) # Create the manifest file # We create the manifest file after all the segments have been uploaded # so a partial swift object file can't be downloaded; if the manifest # file exists then all segments have been uploaded so the whole backup # file can be downloaded. headers = {'X-Object-Manifest': stream_reader.prefix} # The etag returned from the manifest PUT is the checksum of the # manifest object (which is empty); this is not the checksum we want self.connection.put_object(BACKUP_CONTAINER, filename, contents='', headers=headers) resp = self.connection.head_object(BACKUP_CONTAINER, filename) # swift returns etag in double quotes # e.g. '"dc3b0827f276d8d78312992cc60c2c3f"' etag = resp['etag'].strip('"') # Check the checksum of the concatenated segment checksums against # swift manifest etag. # Raise an error and mark backup as failed final_swift_checksum = swift_checksum.hexdigest() if etag != final_swift_checksum: LOG.error( _("Error saving data to swift. Manifest " "ETAG: %(tag)s Swift MD5: %(checksum)s"), {'tag': etag, 'checksum': final_swift_checksum}) return False, "Error saving data to Swift!", None, location return (True, "Successfully saved data to Swift!", final_swift_checksum, location) def _explodeLocation(self, location): storage_url = "/".join(location.split('/')[:-2]) container = location.split('/')[-2] filename = location.split('/')[-1] return storage_url, container, filename def _verify_checksum(self, etag, checksum): etag_checksum = etag.strip('"') if etag_checksum != checksum: msg = (_("Original checksum: %(original)s does not match" " the current checksum: %(current)s") % {'original': etag_checksum, 'current': checksum}) LOG.error(msg) raise SwiftDownloadIntegrityError(msg) return True def load(self, location, backup_checksum): """Restore a backup from the input stream to the restore_location.""" storage_url, container, filename = self._explodeLocation(location) headers, info = self.connection.get_object(container, filename, resp_chunk_size=CHUNK_SIZE) if CONF.verify_swift_checksum_on_restore: self._verify_checksum(headers.get('etag', ''), backup_checksum) return info def _get_attr(self, original): """Get a friendly name from an object header key.""" key = original.replace('-', '_') key = key.replace('x_object_meta_', '') return key def _set_attr(self, original): """Return a swift friendly header key.""" key = original.replace('_', '-') return 'X-Object-Meta-%s' % key def load_metadata(self, location, backup_checksum): """Load metadata from swift.""" storage_url, container, filename = self._explodeLocation(location) headers = self.connection.head_object(container, filename) if CONF.verify_swift_checksum_on_restore: self._verify_checksum(headers.get('etag', ''), backup_checksum) _meta = {} for key, value in headers.iteritems(): if key.startswith('x-object-meta'): _meta[self._get_attr(key)] = value return _meta def save_metadata(self, location, metadata={}): """Save metadata to a swift object.""" storage_url, container, filename = self._explodeLocation(location) _headers = self.connection.head_object(container, filename) headers = {'X-Object-Manifest': _headers.get('x-object-manifest')} for key, value in metadata.iteritems(): headers[self._set_attr(key)] = value LOG.info(_("Writing metadata: %s"), str(headers)) self.connection.post_object(container, filename, headers=headers) trove-5.0.0/trove/common/strategies/storage/experimental/0000775000567000056710000000000012701410521024747 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/storage/experimental/__init__.py0000664000567000056710000000000012701410316027050 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/common/strategies/strategy.py0000664000567000056710000000367512701410316023037 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from oslo_log import log as logging import six from trove.common import utils LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Strategy(object): __strategy_ns__ = None __strategy_name__ = None __strategy_type__ = None def __init__(self): self.name = self.get_canonical_name() LOG.debug("Loaded strategy %s", self.name) def is_enabled(self): """ Is this Strategy enabled? :retval: Boolean """ return True @classmethod def get_strategy(cls, name, ns=None): """ Load a strategy from namespace """ ns = ns or cls.__strategy_ns__ if ns is None: raise RuntimeError( 'No namespace provided or __strategy_ns__ unset') LOG.debug('Looking for strategy %s in %s', name, ns) return utils.import_class(ns + "." + name) @classmethod def get_canonical_name(cls): """ Return the strategy name """ type_ = cls.get_strategy_type() name = cls.get_strategy_name() return "%s:%s" % (type_, name) @classmethod def get_strategy_name(cls): return cls.__strategy_name__ @classmethod def get_strategy_type(cls): return cls.__strategy_type__ trove-5.0.0/trove/common/notification.py0000664000567000056710000005111412701410316021500 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import traceback from oslo_log import log as logging from oslo_utils import timeutils from trove.common import cfg from trove.common.exception import TroveError from trove.common.i18n import _ from trove.conductor import api as conductor_api from trove import rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF class EndNotification(object): @property def _notifier(self): ''' Returns the notification for Trove API or TaskManager, otherwise returns an API to the conductor to whom to forward the notification ''' return (self.context.notification if self.context.notification.server_type in ['api', 'taskmanager'] else conductor_api.API(self.context)) def __init__(self, context, **kwargs): self.context = context self.context.notification.payload.update(kwargs) def __enter__(self): return self.context.notification def __exit__(self, etype, value, tb): if etype: message = str(value) exception = traceback.format_exception(etype, value, tb) self._notifier.notify_exc_info(message, exception) else: self._notifier.notify_end() class StartNotification(EndNotification): def __enter__(self): self.context.notification.notify_start() return super(StartNotification, self).__enter__() class NotificationCastWrapper(object): def __init__(self, context, api): self.context = context self.api = api self.has_notification = hasattr(context, 'notification') def __enter__(self): if self.has_notification: self.old_server_type = self.context.notification.server_type self.context.notification.server_type = self.api def __exit__(self, etype, value, traceback): if self.has_notification: self.context.notification.server_type = self.old_server_type self.context.notification.needs_end_notification = False class TroveBaseTraits(object): ''' The base traits of all trove.* notifications. This class should correspond to trove_base_traits in ceilometer/event_definitions.yaml ''' event_type_format = 'trove.instance.%s' def __init__(self, **kwargs): self.payload = {} instance = kwargs.pop('instance', None) if instance: self.instance = instance self.context = instance.context created_time = timeutils.isotime(instance.db_info.created) self.payload.update({ 'created_at': created_time, 'name': instance.name, 'instance_id': instance.id, 'instance_name': instance.name, 'instance_type_id': instance.flavor_id, 'launched_at': created_time, 'nova_instance_id': instance.server_id, 'region': CONF.region, 'state_description': instance.status.lower(), 'state': instance.status.lower(), 'tenant_id': instance.tenant_id, 'user_id': instance.context.user, }) self.payload.update(kwargs) def serialize(self, ctxt): if hasattr(self, 'instance'): if 'instance_type' not in self.payload: flavor_id = self.instance.flavor_id flavor = self.instance.nova_client.flavors.get(flavor_id) self.payload['instance_type'] = flavor.name self.payload['service_id'] = self.instance._get_service_id( self.instance.datastore_version.manager, CONF.notification_service_id) return self.payload def deserialize(self, ctxt, payload): self.payload = payload self.context = ctxt return self def notify(self, event_type, publisher_id=CONF.host): event_type = self.event_type_format % event_type event_payload = self.serialize(self.context) LOG.debug('Sending event: %(event_type)s, %(payload)s' % {'event_type': event_type, 'payload': event_payload}) notifier = rpc.get_notifier( service='taskmanager', publisher_id=publisher_id) notifier.info(self.context, event_type, event_payload) class TroveCommonTraits(TroveBaseTraits): ''' Additional traits for trove.* notifications that describe instance action events This class should correspond to trove_common_traits in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): self.server = kwargs.pop('server', None) super(TroveCommonTraits, self).__init__(**kwargs) def serialize(self, ctxt): if hasattr(self, 'instance'): instance = self.instance if 'instance_type' not in self.payload: flavor = instance.nova_client.flavors.get(instance.flavor_id) self.payload['instance_size'] = flavor.ram if self.server is None: self.server = instance.nova_client.servers.get( instance.server_id) self.payload['availability_zone'] = getattr( self.server, 'OS-EXT-AZ:availability_zone', None) if CONF.get(instance.datastore_version.manager).volume_support: self.payload.update({ 'volume_size': instance.volume_size, 'nova_volume_id': instance.volume_id }) return TroveBaseTraits.serialize(self, ctxt) class TroveInstanceCreate(TroveCommonTraits): ''' Additional traits for trove.instance.create notifications that describe instance action events This class should correspond to trove_instance_create in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): super(TroveInstanceCreate, self).__init__(**kwargs) def notify(self): super(TroveInstanceCreate, self).notify('create') class TroveInstanceModifyVolume(TroveCommonTraits): ''' Additional traits for trove.instance.create notifications that describe instance action events This class should correspond to trove_instance_modify_volume in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): super(TroveInstanceModifyVolume, self).__init__(**kwargs) def notify(self): super(TroveInstanceModifyVolume, self).notify('modify_volume') class TroveInstanceModifyFlavor(TroveCommonTraits): ''' Additional traits for trove.instance.create notifications that describe instance action events This class should correspond to trove_instance_modify_flavor in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): super(TroveInstanceModifyFlavor, self).__init__(**kwargs) def notify(self): super(TroveInstanceModifyFlavor, self).notify('modify_flavor') class TroveInstanceDelete(TroveCommonTraits): ''' Additional traits for trove.instance.create notifications that describe instance action events This class should correspond to trove_instance_delete in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): super(TroveInstanceDelete, self).__init__(**kwargs) def notify(self): super(TroveInstanceDelete, self).notify('delete') class DBaaSQuotas(object): ''' The traits of dbaas.quotas notifications. This class should correspond to dbaas.quotas in ceilometer/event_definitions.yaml ''' event_type = 'dbaas.quota' def __init__(self, context, quota, usage): self.context = context self.payload = { 'resource': quota.resource, 'in_use': usage.in_use, 'reserved': usage.reserved, 'limit': quota.hard_limit, 'updated': usage.updated } def notify(self): LOG.debug('Sending event: %(event_type)s, %(payload)s' % {'event_type': DBaaSQuotas.event_type, 'payload': self.payload}) notifier = rpc.get_notifier( service='taskmanager', publisher_id=CONF.host) notifier.info(self.context, DBaaSQuotas.event_type, self.payload) class DBaaSAPINotification(object): ''' The traits of dbaas.* notifications (except quotas). This class should correspond to dbaas_base_traits in ceilometer/event_definitions.yaml ''' event_type_format = 'dbaas.%s.%s' @abc.abstractmethod def event_type(self): 'Returns the event type (like "create" for dbaas.create.start)' pass @abc.abstractmethod def required_start_traits(self): 'Returns list of required traits for start notification' pass def optional_start_traits(self): 'Returns list of optional traits for start notification' return [] def required_end_traits(self): 'Returns list of required traits for end notification' return [] def optional_end_traits(self): 'Returns list of optional traits for end notification' return [] def required_error_traits(self): 'Returns list of required traits for error notification' return ['message', 'exception'] def optional_error_traits(self): 'Returns list of optional traits for error notification' return [] def required_base_traits(self): return ['tenant_id', 'client_ip', 'server_ip', 'server_type', 'request_id'] @property def server_type(self): return self.payload['server_type'] @server_type.setter def server_type(self, server_type): self.payload['server_type'] = server_type def __init__(self, context, **kwargs): self.context = context self.needs_end_notification = True self.payload = {} if 'request' in kwargs: request = kwargs.pop('request') self.payload.update({ 'request_id': context.request_id, 'server_type': 'api', 'client_ip': request.remote_addr, 'server_ip': request.host, 'tenant_id': context.tenant, }) elif 'request_id' not in kwargs: raise TroveError(_("Notification %s must include 'request'" " property") % self.__class__.__name__) self.payload.update(kwargs) def serialize(self, context): return self.payload def validate(self, required_traits): required_keys = set(required_traits) provided_keys = set(self.payload.keys()) if not required_keys.issubset(provided_keys): raise TroveError(_("The following required keys not defined for" " notification %(name)s: %(keys)s") % {'name': self.__class__.__name__, 'keys': list(required_keys - provided_keys)}) if 'server_type' not in self.payload: raise TroveError(_("Notification %s must include a" " 'server_type' for correct routing") % self.__class__.__name__) def _notify(self, event_qualifier, required_traits, optional_traits, **kwargs): self.payload.update(kwargs) self.validate(self.required_base_traits() + required_traits) available_values = self.serialize(self.context) payload = {k: available_values[k] for k in self.required_base_traits() + required_traits} for k in optional_traits: if k in available_values: payload[k] = available_values[k] qualified_event_type = (DBaaSAPINotification.event_type_format % (self.event_type(), event_qualifier)) LOG.debug('Sending event: %(event_type)s, %(payload)s' % {'event_type': qualified_event_type, 'payload': payload}) context = copy.copy(self.context) del context.notification notifier = rpc.get_notifier(service=self.payload['server_type']) notifier.info(context, qualified_event_type, self.payload) def notify_start(self, **kwargs): self._notify('start', self.required_start_traits(), self.optional_start_traits(), **kwargs) def notify_end(self, **kwargs): if self.needs_end_notification: self._notify('end', self.required_end_traits(), self.optional_end_traits(), **kwargs) def notify_exc_info(self, message, exception): self.payload.update({ 'message': message, 'exception': exception }) self._notify('error', self.required_error_traits(), self.optional_error_traits()) class DBaaSInstanceCreate(DBaaSAPINotification): def event_type(self): return 'instance_create' def required_start_traits(self): return ['name', 'flavor_id', 'datastore', 'datastore_version', 'image_id', 'availability_zone'] def optional_start_traits(self): return ['databases', 'users', 'volume_size', 'restore_point', 'replica_of', 'replica_count', 'cluster_id', 'backup_id', 'nics'] def required_end_traits(self): return ['instance_id'] class DBaaSInstanceRestart(DBaaSAPINotification): def event_type(self): return 'instance_restart' def required_start_traits(self): return ['instance_id'] class DBaaSInstanceResizeVolume(DBaaSAPINotification): def event_type(self): return 'instance_resize_volume' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'new_size'] class DBaaSInstanceResizeInstance(DBaaSAPINotification): def event_type(self): return 'instance_resize_instance' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'new_flavor_id'] class DBaaSInstancePromote(DBaaSAPINotification): def event_type(self): return 'instance_promote' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceEject(DBaaSAPINotification): def event_type(self): return 'instance_eject' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceDelete(DBaaSAPINotification): def event_type(self): return 'instance_delete' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceDetach(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'instance_detach' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceAttachConfiguration(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'instance_attach_configuration' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'configuration_id'] class DBaaSInstanceDetachConfiguration(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'instance_detach_configuration' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSClusterCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_create' @abc.abstractmethod def required_start_traits(self): return ['name', 'datastore', 'datastore_version'] @abc.abstractmethod def required_end_traits(self): return ['cluster_id'] class DBaaSClusterDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_delete' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] class DBaaSClusterAddShard(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_add_shard' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] class DBaaSClusterGrow(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_grow' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] class DBaaSClusterShrink(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_shrink' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] class DBaaSBackupCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'backup_create' @abc.abstractmethod def required_start_traits(self): return ['name', 'instance_id', 'description', 'parent_id'] @abc.abstractmethod def required_end_traits(self): return ['backup_id'] class DBaaSBackupDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'backup_delete' @abc.abstractmethod def required_start_traits(self): return ['backup_id'] class DBaaSDatabaseCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'database_create' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'dbname'] class DBaaSDatabaseDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'database_delete' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'dbname'] class DBaaSUserCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_create' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username'] class DBaaSUserDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_delete' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username'] class DBaaSUserUpdateAttributes(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_update_attributes' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username'] class DBaaSUserGrant(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_grant' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username', 'database'] class DBaaSUserRevoke(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_revoke' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username', 'database'] class DBaaSUserChangePassword(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_change_password' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username'] class DBaaSConfigurationCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'configuration_create' @abc.abstractmethod def required_start_traits(self): return ['name', 'datastore', 'datastore_version'] def required_end_traits(self): return ['configuration_id'] class DBaaSConfigurationDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'configuration_delete' @abc.abstractmethod def required_start_traits(self): return ['configuration_id'] class DBaaSConfigurationUpdate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'configuration_update' @abc.abstractmethod def required_start_traits(self): return ['configuration_id', 'name', 'description'] class DBaaSConfigurationEdit(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'configuration_edit' @abc.abstractmethod def required_start_traits(self): return ['configuration_id'] trove-5.0.0/trove/common/instance.py0000664000567000056710000000735412701410316020625 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ServiceStatus(object): """Represents the status of the app and in some rare cases the agent. Code and description are what is stored in the database. "api_status" refers to the status which comes back from the REST API. """ _lookup = {} def __init__(self, code, description, api_status): self._code = code self._description = description self._api_status = api_status ServiceStatus._lookup[code] = self @property def action_is_allowed(self): allowed_statuses = [ ServiceStatuses.RUNNING._code, ServiceStatuses.SHUTDOWN._code, ServiceStatuses.CRASHED._code, ServiceStatuses.BLOCKED._code, ] return self._code in allowed_statuses @property def api_status(self): return self._api_status @property def code(self): return self._code @property def description(self): return self._description def __eq__(self, other): if not isinstance(other, ServiceStatus): return False return self.code == other.code @staticmethod def from_code(code): if code not in ServiceStatus._lookup: msg = 'Status code %s is not a valid ServiceStatus integer code.' raise ValueError(msg % code) return ServiceStatus._lookup[code] @staticmethod def from_description(desc): all_items = ServiceStatus._lookup.items() status_codes = [code for (code, status) in all_items if status.description == desc] if not status_codes: msg = 'Status description %s is not a valid ServiceStatus.' raise ValueError(msg % desc) return ServiceStatus._lookup[status_codes[0]] @staticmethod def is_valid_code(code): return code in ServiceStatus._lookup def __str__(self): return self._description def __repr__(self): return self._api_status class ServiceStatuses(object): RUNNING = ServiceStatus(0x01, 'running', 'ACTIVE') BLOCKED = ServiceStatus(0x02, 'blocked', 'BLOCKED') PAUSED = ServiceStatus(0x03, 'paused', 'SHUTDOWN') SHUTDOWN = ServiceStatus(0x04, 'shutdown', 'SHUTDOWN') CRASHED = ServiceStatus(0x06, 'crashed', 'SHUTDOWN') FAILED = ServiceStatus(0x08, 'failed to spawn', 'FAILED') BUILDING = ServiceStatus(0x09, 'building', 'BUILD') PROMOTING = ServiceStatus(0x10, 'promoting replica', 'PROMOTE') EJECTING = ServiceStatus(0x11, 'ejecting replica source', 'EJECT') LOGGING = ServiceStatus(0x12, 'transferring guest logs', 'LOGGING') UNKNOWN = ServiceStatus(0x16, 'unknown', 'ERROR') NEW = ServiceStatus(0x17, 'new', 'NEW') DELETED = ServiceStatus(0x05, 'deleted', 'DELETED') FAILED_TIMEOUT_GUESTAGENT = ServiceStatus(0x18, 'guestagent error', 'ERROR') INSTANCE_READY = ServiceStatus(0x19, 'instance ready', 'BUILD') RESTART_REQUIRED = ServiceStatus(0x20, 'restart required', 'RESTART_REQUIRED') # Dissuade further additions at run-time. ServiceStatus.__init__ = None trove-5.0.0/trove/common/remote.py0000664000567000056710000001574112701410316020313 0ustar jenkinsjenkins00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils.importutils import import_class from trove.common import cfg from trove.common import exception from trove.common.strategies.cluster import strategy from cinderclient.v2 import client as CinderClient from heatclient.v1 import client as HeatClient from keystoneclient.service_catalog import ServiceCatalog from novaclient.client import Client from swiftclient.client import Connection CONF = cfg.CONF PROXY_AUTH_URL = CONF.trove_auth_url USE_SNET = CONF.backup_use_snet def normalize_url(url): """Adds trailing slash if necessary.""" if not url.endswith('/'): return '%(url)s/' % {'url': url} else: return url def get_endpoint(service_catalog, service_type=None, endpoint_region=CONF.os_region_name, endpoint_type='publicURL'): """ Select an endpoint from the service catalog We search the full service catalog for services matching both type and region. The client is expected to supply the region matching the service_type. There must be one -- and only one -- successful match in the catalog, otherwise we will raise an exception. Some parts copied from glance/common/auth.py. """ if not service_catalog: raise exception.EmptyCatalog() # per IRC chat, X-Service-Catalog will be a v2 catalog regardless of token # format; see https://bugs.launchpad.net/python-keystoneclient/+bug/1302970 # 'token' key necessary to get past factory validation sc = ServiceCatalog.factory({'token': None, 'serviceCatalog': service_catalog}) urls = sc.get_urls(service_type=service_type, region_name=endpoint_region, endpoint_type=endpoint_type) if not urls: raise exception.NoServiceEndpoint(service_type=service_type, endpoint_region=endpoint_region, endpoint_type=endpoint_type) return urls[0] def dns_client(context): from trove.dns.manager import DnsManager return DnsManager() def guest_client(context, id, manager=None): from trove.guestagent.api import API if manager: clazz = strategy.load_guestagent_strategy(manager).guest_client_class else: clazz = API return clazz(context, id) def nova_client(context): if CONF.nova_compute_url: url = '%(nova_url)s%(tenant)s' % { 'nova_url': normalize_url(CONF.nova_compute_url), 'tenant': context.tenant} else: url = get_endpoint(context.service_catalog, service_type=CONF.nova_compute_service_type, endpoint_region=CONF.os_region_name, endpoint_type=CONF.nova_compute_endpoint_type) client = Client(CONF.nova_client_version, context.user, context.auth_token, bypass_url=url, tenant_id=context.tenant, auth_url=PROXY_AUTH_URL) client.client.auth_token = context.auth_token client.client.management_url = url return client def create_admin_nova_client(context): """ Creates client that uses trove admin credentials :return: a client for nova for the trove admin """ client = create_nova_client(context) client.client.auth_token = None return client def cinder_client(context): if CONF.cinder_url: url = '%(cinder_url)s%(tenant)s' % { 'cinder_url': normalize_url(CONF.cinder_url), 'tenant': context.tenant} else: url = get_endpoint(context.service_catalog, service_type=CONF.cinder_service_type, endpoint_region=CONF.os_region_name, endpoint_type=CONF.cinder_endpoint_type) client = CinderClient.Client(context.user, context.auth_token, project_id=context.tenant, auth_url=PROXY_AUTH_URL) client.client.auth_token = context.auth_token client.client.management_url = url return client def heat_client(context): if CONF.heat_url: url = '%(heat_url)s%(tenant)s' % { 'heat_url': normalize_url(CONF.heat_url), 'tenant': context.tenant} else: url = get_endpoint(context.service_catalog, service_type=CONF.heat_service_type, endpoint_region=CONF.os_region_name, endpoint_type=CONF.heat_endpoint_type) client = HeatClient.Client(token=context.auth_token, os_no_client_auth=True, endpoint=url) return client def swift_client(context): if CONF.swift_url: # swift_url has a different format so doesn't need to be normalized url = '%(swift_url)s%(tenant)s' % {'swift_url': CONF.swift_url, 'tenant': context.tenant} else: url = get_endpoint(context.service_catalog, service_type=CONF.swift_service_type, endpoint_region=CONF.os_region_name, endpoint_type=CONF.swift_endpoint_type) client = Connection(preauthurl=url, preauthtoken=context.auth_token, tenant_name=context.tenant, snet=USE_SNET) return client def neutron_client(context): from neutronclient.v2_0 import client as NeutronClient if CONF.neutron_url: # neutron endpoint url / publicURL does not include tenant segment url = CONF.neutron_url else: url = get_endpoint(context.service_catalog, service_type=CONF.neutron_service_type, endpoint_region=CONF.os_region_name, endpoint_type=CONF.neutron_endpoint_type) client = NeutronClient.Client(token=context.auth_token, endpoint_url=url) return client create_dns_client = import_class(CONF.remote_dns_client) create_guest_client = import_class(CONF.remote_guest_client) create_nova_client = import_class(CONF.remote_nova_client) create_swift_client = import_class(CONF.remote_swift_client) create_cinder_client = import_class(CONF.remote_cinder_client) create_heat_client = import_class(CONF.remote_heat_client) create_neutron_client = import_class(CONF.remote_neutron_client) trove-5.0.0/trove/common/cfg.py0000664000567000056710000022774412701410320017562 0ustar jenkinsjenkins00000000000000# copyright 2011 OpenStack Foundation # Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Routines for configuring Trove.""" import os.path from oslo_config import cfg from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from oslo_middleware import cors from osprofiler import opts as profiler from trove.version import version_info as version UNKNOWN_SERVICE_ID = 'unknown-service-id-error' path_opts = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), help='Directory where the Trove python module is installed.'), ] common_opts = [ cfg.IPOpt('bind_host', default='0.0.0.0', help='IP address the API server will listen on.'), cfg.PortOpt('bind_port', default=8779, help='Port the API server will listen on.'), cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for trove-api.'), cfg.BoolOpt('trove_volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.ListOpt('admin_roles', default=['admin'], help='Roles to add to an admin user.'), cfg.BoolOpt('update_status_on_fail', default=True, help='Set the service and instance task statuses to ERROR ' 'when an instance fails to become active within the ' 'configured usage_timeout.'), cfg.StrOpt('os_region_name', default='RegionOne', help='Region name of this node. Used when searching catalog.'), cfg.StrOpt('nova_compute_url', help='URL without the tenant segment.'), cfg.StrOpt('nova_compute_service_type', default='compute', help='Service type to use when searching catalog.'), cfg.StrOpt('nova_compute_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.StrOpt('nova_client_version', default='2.12', help="The version of of the compute service client."), cfg.StrOpt('neutron_url', help='URL without the tenant segment.'), cfg.StrOpt('neutron_service_type', default='network', help='Service type to use when searching catalog.'), cfg.StrOpt('neutron_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.StrOpt('cinder_url', help='URL without the tenant segment.'), cfg.StrOpt('cinder_service_type', default='volumev2', help='Service type to use when searching catalog.'), cfg.StrOpt('cinder_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.StrOpt('heat_url', help='URL without the tenant segment.'), cfg.StrOpt('heat_service_type', default='orchestration', help='Service type to use when searching catalog.'), cfg.StrOpt('heat_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.StrOpt('swift_url', help='URL ending in AUTH_.'), cfg.StrOpt('swift_service_type', default='object-store', help='Service type to use when searching catalog.'), cfg.StrOpt('swift_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.StrOpt('trove_auth_url', default='http://0.0.0.0:5000/v2.0', help='Trove authentication URL.'), cfg.IPOpt('host', default='0.0.0.0', help='Host to listen for RPC messages.'), cfg.IntOpt('report_interval', default=30, help='The interval (in seconds) which periodic tasks are run.'), cfg.BoolOpt('trove_dns_support', default=False, help='Whether Trove should add DNS entries on create ' '(using Designate DNSaaS).'), cfg.StrOpt('db_api_implementation', default='trove.db.sqlalchemy.api', help='API Implementation for Trove database access.'), cfg.StrOpt('dns_driver', default='trove.dns.driver.DnsDriver', help='Driver for DNSaaS.'), cfg.StrOpt('dns_instance_entry_factory', default='trove.dns.driver.DnsInstanceEntryFactory', help='Factory for adding DNS entries.'), cfg.StrOpt('dns_hostname', default="", help='Hostname used for adding DNS entries.'), cfg.StrOpt('dns_account_id', default="", help='Tenant ID for DNSaaS.'), cfg.StrOpt('dns_endpoint_url', default="0.0.0.0", help='Endpoint URL for DNSaaS.'), cfg.StrOpt('dns_service_type', default="", help='Service Type for DNSaaS.'), cfg.StrOpt('dns_region', default="", help='Region name for DNSaaS.'), cfg.StrOpt('dns_auth_url', default="", help='Authentication URL for DNSaaS.'), cfg.StrOpt('dns_domain_name', default="", help='Domain name used for adding DNS entries.'), cfg.StrOpt('dns_username', default="", secret=True, help='Username for DNSaaS.'), cfg.StrOpt('dns_passkey', default="", secret=True, help='Passkey for DNSaaS.'), cfg.StrOpt('dns_management_base_url', default="", help='Management URL for DNSaaS.'), cfg.IntOpt('dns_ttl', default=300, help='Time (in seconds) before a refresh of DNS information ' 'occurs.'), cfg.StrOpt('dns_domain_id', default="", help='Domain ID used for adding DNS entries.'), cfg.IntOpt('users_page_size', default=20, help='Page size for listing users.'), cfg.IntOpt('databases_page_size', default=20, help='Page size for listing databases.'), cfg.IntOpt('instances_page_size', default=20, help='Page size for listing instances.'), cfg.IntOpt('clusters_page_size', default=20, help='Page size for listing clusters.'), cfg.IntOpt('backups_page_size', default=20, help='Page size for listing backups.'), cfg.IntOpt('configurations_page_size', default=20, help='Page size for listing configurations.'), cfg.IntOpt('modules_page_size', default=20, help='Page size for listing modules.'), cfg.IntOpt('agent_call_low_timeout', default=5, help="Maximum time (in seconds) to wait for Guest Agent 'quick'" "requests (such as retrieving a list of users or " "databases)."), cfg.IntOpt('agent_call_high_timeout', default=60, help="Maximum time (in seconds) to wait for Guest Agent 'slow' " "requests (such as restarting the database)."), cfg.IntOpt('agent_replication_snapshot_timeout', default=36000, help='Maximum time (in seconds) to wait for taking a Guest ' 'Agent replication snapshot.'), # The guest_id opt definition must match the one in cmd/guest.py cfg.StrOpt('guest_id', default=None, help="ID of the Guest Instance."), cfg.IntOpt('state_change_wait_time', default=3 * 60, help='Maximum time (in seconds) to wait for a state change.'), cfg.IntOpt('state_change_poll_time', default=3, help='Interval between state change poll requests (seconds).'), cfg.IntOpt('agent_heartbeat_time', default=10, help='Maximum time (in seconds) for the Guest Agent to reply ' 'to a heartbeat request.'), cfg.IntOpt('agent_heartbeat_expiry', default=60, help='Time (in seconds) after which a guest is considered ' 'unreachable'), cfg.IntOpt('num_tries', default=3, help='Number of times to check if a volume exists.'), cfg.StrOpt('volume_fstype', default='ext3', help='File system type used to format a volume.'), cfg.StrOpt('cinder_volume_type', default=None, help='Volume type to use when provisioning a Cinder volume.'), cfg.StrOpt('format_options', default='-m 5', help='Options to use when formatting a volume.'), cfg.IntOpt('volume_format_timeout', default=120, help='Maximum time (in seconds) to wait for a volume format.'), cfg.StrOpt('mount_options', default='defaults,noatime', help='Options to use when mounting a volume.'), cfg.IntOpt('max_instances_per_tenant', default=5, help='Default maximum number of instances per tenant.', deprecated_name='max_instances_per_user'), cfg.IntOpt('max_accepted_volume_size', default=5, help='Default maximum volume size (in GB) for an instance.'), cfg.IntOpt('max_volumes_per_tenant', default=20, help='Default maximum volume capacity (in GB) spanning across ' 'all Trove volumes per tenant.', deprecated_name='max_volumes_per_user'), cfg.IntOpt('max_backups_per_tenant', default=50, help='Default maximum number of backups created by a tenant.', deprecated_name='max_backups_per_user'), cfg.StrOpt('quota_driver', default='trove.quota.quota.DbQuotaDriver', help='Default driver to use for quota checks.'), cfg.StrOpt('taskmanager_queue', default='taskmanager', help='Message queue name the Taskmanager will listen to.'), cfg.StrOpt('conductor_queue', default='trove-conductor', help='Message queue name the Conductor will listen on.'), cfg.IntOpt('trove_conductor_workers', help='Number of workers for the Conductor service. The default ' 'will be the number of CPUs available.'), cfg.BoolOpt('use_nova_server_config_drive', default=False, help='Use config drive for file injection when booting ' 'instance.'), cfg.BoolOpt('use_nova_server_volume', default=False, help='Whether to provision a Cinder volume for the ' 'Nova instance.'), cfg.BoolOpt('use_heat', default=False, help='Use Heat for provisioning.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('default_datastore', default=None, help='The default datastore id or name to use if one is not ' 'provided by the user. If the default value is None, the field ' 'becomes required in the instance create request.'), cfg.StrOpt('datastore_manager', default=None, help='Manager class in the Guest Agent, set up by the ' 'Taskmanager on instance provision.'), cfg.StrOpt('block_device_mapping', default='vdb', help='Block device to map onto the created instance.'), cfg.IntOpt('server_delete_time_out', default=60, help='Maximum time (in seconds) to wait for a server delete.'), cfg.IntOpt('volume_time_out', default=60, help='Maximum time (in seconds) to wait for a volume attach.'), cfg.IntOpt('heat_time_out', default=60, help='Maximum time (in seconds) to wait for a Heat request to ' 'complete.'), cfg.IntOpt('reboot_time_out', default=60 * 2, help='Maximum time (in seconds) to wait for a server reboot.'), cfg.IntOpt('dns_time_out', default=60 * 2, help='Maximum time (in seconds) to wait for a DNS entry add.'), cfg.IntOpt('resize_time_out', default=60 * 10, help='Maximum time (in seconds) to wait for a server resize.'), cfg.IntOpt('revert_time_out', default=60 * 10, help='Maximum time (in seconds) to wait for a server resize ' 'revert.'), cfg.IntOpt('cluster_delete_time_out', default=60 * 3, help='Maximum time (in seconds) to wait for a cluster delete.'), cfg.ListOpt('root_grant', default=['ALL'], help="Permissions to grant to the 'root' user."), cfg.BoolOpt('root_grant_option', default=True, help="Assign the 'root' user GRANT permissions."), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.'), cfg.IntOpt('http_get_rate', default=200, help="Maximum number of HTTP 'GET' requests (per minute)."), cfg.IntOpt('http_post_rate', default=200, help="Maximum number of HTTP 'POST' requests (per minute)."), cfg.IntOpt('http_delete_rate', default=200, help="Maximum number of HTTP 'DELETE' requests (per minute)."), cfg.IntOpt('http_put_rate', default=200, help="Maximum number of HTTP 'PUT' requests (per minute)."), cfg.IntOpt('http_mgmt_post_rate', default=200, help="Maximum number of management HTTP 'POST' requests " "(per minute)."), cfg.BoolOpt('hostname_require_valid_ip', default=True, help='Require user hostnames to be valid IP addresses.', deprecated_name='hostname_require_ipv4'), cfg.BoolOpt('trove_security_groups_support', default=True, help='Whether Trove should add Security Groups on create.'), cfg.StrOpt('trove_security_group_name_prefix', default='SecGroup', help='Prefix to use when creating Security Groups.'), cfg.StrOpt('trove_security_group_rule_cidr', default='0.0.0.0/0', help='CIDR to use when creating Security Group Rules.'), cfg.IntOpt('trove_api_workers', help='Number of workers for the API service. The default will ' 'be the number of CPUs available.'), cfg.IntOpt('usage_sleep_time', default=5, help='Time to sleep during the check for an active Guest.'), cfg.StrOpt('region', default='LOCAL_DEV', help='The region this service is located.'), cfg.StrOpt('backup_runner', default='trove.guestagent.backup.backup_types.InnoBackupEx', help='Runner to use for backups.'), cfg.DictOpt('backup_runner_options', default={}, help='Additional options to be passed to the backup runner.'), cfg.BoolOpt('verify_swift_checksum_on_restore', default=True, help='Enable verification of Swift checksum before starting ' 'restore. Makes sure the checksum of original backup matches ' 'the checksum of the Swift backup file.'), cfg.StrOpt('storage_strategy', default='SwiftStorage', help="Default strategy to store backups."), cfg.StrOpt('storage_namespace', default='trove.common.strategies.storage.swift', help='Namespace to load the default storage strategy from.'), cfg.StrOpt('backup_swift_container', default='database_backups', help='Swift container to put backups in.'), cfg.BoolOpt('backup_use_gzip_compression', default=True, help='Compress backups using gzip.'), cfg.BoolOpt('backup_use_openssl_encryption', default=True, help='Encrypt backups using OpenSSL.'), cfg.StrOpt('backup_aes_cbc_key', default='default_aes_cbc_key', help='Default OpenSSL aes_cbc key.'), cfg.BoolOpt('backup_use_snet', default=False, help='Send backup files over snet.'), cfg.IntOpt('backup_chunk_size', default=2 ** 16, help='Chunk size (in bytes) to stream to the Swift container. ' 'This should be in multiples of 128 bytes, since this is the ' 'size of an md5 digest block allowing the process to update ' 'the file checksum during streaming. ' 'See: http://stackoverflow.com/questions/1131220/'), cfg.IntOpt('backup_segment_max_size', default=2 * (1024 ** 3), help='Maximum size (in bytes) of each segment of the backup ' 'file.'), cfg.StrOpt('remote_dns_client', default='trove.common.remote.dns_client', help='Client to send DNS calls to.'), cfg.StrOpt('remote_guest_client', default='trove.common.remote.guest_client', help='Client to send Guest Agent calls to.'), cfg.StrOpt('remote_nova_client', default='trove.common.remote.nova_client', help='Client to send Nova calls to.'), cfg.StrOpt('remote_neutron_client', default='trove.common.remote.neutron_client', help='Client to send Neutron calls to.'), cfg.StrOpt('remote_cinder_client', default='trove.common.remote.cinder_client', help='Client to send Cinder calls to.'), cfg.StrOpt('remote_heat_client', default='trove.common.remote.heat_client', help='Client to send Heat calls to.'), cfg.StrOpt('remote_swift_client', default='trove.common.remote.swift_client', help='Client to send Swift calls to.'), cfg.StrOpt('exists_notification_transformer', help='Transformer for exists notifications.'), cfg.IntOpt('exists_notification_interval', default=3600, help='Seconds to wait between pushing events.'), cfg.IntOpt('quota_notification_interval', default=3600, help='Seconds to wait between pushing events.'), cfg.DictOpt('notification_service_id', default={'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b', 'percona': 'fd1723f5-68d2-409c-994f-a4a197892a17', 'pxc': '75a628c3-f81b-4ffb-b10a-4087c26bc854', 'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0', 'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed', 'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415', 'mongodb': 'c8c907af-7375-456f-b929-b637ff9209ee', 'postgresql': 'ac277e0d-4f21-40aa-b347-1ea31e571720', 'couchdb': 'f0a9ab7b-66f7-4352-93d7-071521d44c7c', 'vertica': 'a8d805ae-a3b2-c4fd-gb23-b62cee5201ae', 'db2': 'e040cd37-263d-4869-aaa6-c62aa97523b5', 'mariadb': '7a4f82cc-10d2-4bc6-aadc-d9aacc2a3cb5'}, help='Unique ID to tag notification events.'), cfg.StrOpt('nova_proxy_admin_user', default='', help="Admin username used to connect to Nova.", secret=True), cfg.StrOpt('nova_proxy_admin_pass', default='', help="Admin password used to connect to Nova.", secret=True), cfg.StrOpt('nova_proxy_admin_tenant_id', default='', help="Admin tenant ID used to connect to Nova.", secret=True), cfg.StrOpt('nova_proxy_admin_tenant_name', default='', help="Admin tenant name used to connect to Nova.", secret=True), cfg.StrOpt('network_label_regex', default='^private$', help='Regular expression to match Trove network labels.'), cfg.StrOpt('ip_regex', default=None, help='List IP addresses that match this regular expression.'), cfg.StrOpt('black_list_regex', default=None, help='Exclude IP addresses that match this regular ' 'expression.'), cfg.StrOpt('cloudinit_location', default='/etc/trove/cloudinit', help='Path to folder with cloudinit scripts.'), cfg.StrOpt('injected_config_location', default='/etc/trove/conf.d', help='Path to folder on the Guest where config files will be ' 'injected during instance creation.'), cfg.StrOpt('guest_config', default='/etc/trove/trove-guestagent.conf', help='Path to the Guest Agent config file to be injected ' 'during instance creation.'), cfg.StrOpt('guest_info', default='guest_info.conf', help='The guest info filename found in the injected config ' 'location. If a full path is specified then it will ' 'be used as the path to the guest info file'), cfg.DictOpt('datastore_registry_ext', default=dict(), help='Extension for default datastore managers. ' 'Allows the use of custom managers for each of ' 'the datastores supported by Trove.'), cfg.StrOpt('template_path', default='/etc/trove/templates/', help='Path which leads to datastore templates.'), cfg.BoolOpt('sql_query_logging', default=False, help='Allow insecure logging while ' 'executing queries through SQLAlchemy.'), cfg.ListOpt('expected_filetype_suffixes', default=['json'], help='Filetype endings not to be reattached to an ID ' 'by the utils method correct_id_with_req.'), cfg.ListOpt('default_neutron_networks', default=[], help='List of IDs for management networks which should be ' 'attached to the instance regardless of what NICs ' 'are specified in the create API call.'), cfg.IntOpt('max_header_line', default=16384, help='Maximum line size of message headers to be accepted. ' 'max_header_line may need to be increased when using ' 'large tokens (typically those generated by the ' 'Keystone v3 API with big service catalogs).'), cfg.StrOpt('conductor_manager', default='trove.conductor.manager.Manager', help='Qualified class name to use for conductor manager.'), cfg.StrOpt('network_driver', default='trove.network.nova.NovaNetwork', help="Describes the actual network manager used for " "the management of network attributes " "(security groups, floating IPs, etc.)."), cfg.IntOpt('usage_timeout', default=900, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.IntOpt('restore_usage_timeout', default=36000, help='Maximum time (in seconds) to wait for a Guest instance ' 'restored from a backup to become active.'), cfg.IntOpt('cluster_usage_timeout', default=36000, help='Maximum time (in seconds) to wait for a cluster to ' 'become active.'), cfg.IntOpt('timeout_wait_for_service', default=120, help='Maximum time (in seconds) to wait for a service to ' 'become alive.'), cfg.StrOpt('module_aes_cbc_key', default='module_aes_cbc_key', help='OpenSSL aes_cbc key for module encryption.'), cfg.ListOpt('module_types', default=['ping'], help='A list of module types supported. A module type ' 'corresponds to the name of a ModuleDriver.'), cfg.StrOpt('guest_log_container_name', default='database_logs', help='Name of container that stores guest log components.'), cfg.IntOpt('guest_log_limit', default=1000000, help='Maximum size of a chunk saved in guest log container.'), cfg.IntOpt('guest_log_expiry', default=2592000, help='Expiry (in seconds) of objects in guest log container.'), ] database_opts = [ cfg.StrOpt('connection', default='sqlite:///trove_test.sqlite', help='SQL Connection.', secret=True, deprecated_name='sql_connection', deprecated_group='DEFAULT'), cfg.IntOpt('idle_timeout', default=3600, deprecated_name='sql_idle_timeout', deprecated_group='DEFAULT'), cfg.BoolOpt('query_log', default=False, deprecated_name='sql_query_log', deprecated_group='DEFAULT', deprecated_for_removal=True), cfg.BoolOpt('sqlite_synchronous', default=True, help='If True, SQLite uses synchronous mode.'), cfg.StrOpt('slave_connection', secret=True, help='The SQLAlchemy connection string to use to connect to the' ' slave database.'), cfg.StrOpt('mysql_sql_mode', default='TRADITIONAL', help='The SQL mode to be used for MySQL sessions. ' 'This option, including the default, overrides any ' 'server-set SQL mode. To use whatever SQL mode ' 'is set by the server configuration, ' 'set this to no value. Example: mysql_sql_mode='), cfg.IntOpt('max_pool_size', help='Maximum number of SQL connections to keep open in a ' 'pool.'), cfg.IntOpt('max_retries', default=10, help='Maximum number of database connection retries ' 'during startup. Set to -1 to specify an infinite ' 'retry count.'), cfg.IntOpt('retry_interval', default=10, help='Interval between retries of opening a SQL connection.'), cfg.IntOpt('max_overflow', help='If set, use this value for max_overflow with ' 'SQLAlchemy.'), cfg.IntOpt('connection_debug', default=0, help='Verbosity of SQL debugging information: 0=None, ' '100=Everything.'), cfg.BoolOpt('connection_trace', default=False, help='Add Python stack traces to SQL as comment strings.'), cfg.IntOpt('pool_timeout', help='If set, use this value for pool_timeout with ' 'SQLAlchemy.'), ] # Datastore specific option groups # Mysql mysql_group = cfg.OptGroup( 'mysql', title='MySQL options', help="Oslo option group designed for MySQL datastore") mysql_opts = [ cfg.ListOpt('tcp_ports', default=["3306"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='InnoBackupEx', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='MysqlGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.mysql_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.IntOpt('usage_timeout', default=400, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.mysql_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.mysql_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.DictOpt('backup_incremental_strategy', default={'InnoBackupEx': 'InnoBackupExIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental backup, the runner will use the default full ' 'backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.mysql.service.MySQLRootController', help='Root controller implementation for mysql.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root'], help='Users to exclude when listing users.', deprecated_name='ignore_users', deprecated_group='DEFAULT'), cfg.ListOpt('ignore_dbs', default=['mysql', 'information_schema', 'performance_schema'], help='Databases to exclude when listing databases.', deprecated_name='ignore_dbs', deprecated_group='DEFAULT'), cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=1000, help='The time in milliseconds that a statement must take in ' 'in order to be logged in the slow_query log.'), ] # Percona percona_group = cfg.OptGroup( 'percona', title='Percona options', help="Oslo option group designed for Percona datastore") percona_opts = [ cfg.ListOpt('tcp_ports', default=["3306"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='InnoBackupEx', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='MysqlGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.mysql_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('replication_user', default='slave_user', help='Userid for replication slave.'), cfg.StrOpt('replication_password', default='NETOU7897NNLOU', help='Password for replication slave user.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.IntOpt('usage_timeout', default=450, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.mysql_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.mysql_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.DictOpt('backup_incremental_strategy', default={'InnoBackupEx': 'InnoBackupExIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental backup, the runner will use the default full ' 'backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for percona.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root'], help='Users to exclude when listing users.', deprecated_name='ignore_users', deprecated_group='DEFAULT'), cfg.ListOpt('ignore_dbs', default=['mysql', 'information_schema', 'performance_schema'], help='Databases to exclude when listing databases.', deprecated_name='ignore_dbs', deprecated_group='DEFAULT'), cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=1000, help='The time in milliseconds that a statement must take in ' 'in order to be logged in the slow_query log.'), ] # Percona XtraDB Cluster pxc_group = cfg.OptGroup( 'pxc', title='Percona XtraDB Cluster options', help="Oslo option group designed for Percona XtraDB Cluster datastore") pxc_opts = [ cfg.ListOpt('tcp_ports', default=["3306", "4444", "4567", "4568"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='InnoBackupEx', help='Default strategy to perform backups.'), cfg.StrOpt('replication_strategy', default='MysqlGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.mysql_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('replication_user', default='slave_user', help='Userid for replication slave.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.IntOpt('usage_timeout', default=450, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.mysql_impl', help='Namespace to load backup strategies from.'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.mysql_impl', help='Namespace to load restore strategies from.'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.DictOpt('backup_incremental_strategy', default={'InnoBackupEx': 'InnoBackupExIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental backup, the runner will use the default full ' 'backup.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root', 'clusterrepuser'], help='Users to exclude when listing users.'), cfg.ListOpt('ignore_dbs', default=['mysql', 'information_schema', 'performance_schema'], help='Databases to exclude when listing databases.'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.IntOpt('min_cluster_member_count', default=3, help='Minimum number of members in PXC cluster.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.api.GaleraCommonAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.taskmanager.GaleraCommonTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.guestagent.GaleraCommonGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.StrOpt('root_controller', default='trove.extensions.pxc.service.PxcRootController', help='Root controller implementation for pxc.'), cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=1000, help='The time in milliseconds that a statement must take in ' 'in order to be logged in the slow_query log.'), ] # Redis redis_group = cfg.OptGroup( 'redis', title='Redis options', help="Oslo option group designed for Redis datastore") redis_opts = [ cfg.ListOpt('tcp_ports', default=["6379", "16379"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='RedisBackup', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='RedisSyncReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.experimental.' 'redis_sync', help='Namespace to load replication strategies from.'), cfg.StrOpt('mount_point', default='/var/lib/redis', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_namespace', default="trove.guestagent.strategies.backup.experimental." "redis_impl", help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default="trove.guestagent.strategies.restore.experimental." "redis_impl", help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'redis.api.RedisAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.redis.' 'taskmanager.RedisTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.' 'redis.guestagent.RedisGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for redis.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), ] # Cassandra cassandra_group = cfg.OptGroup( 'cassandra', title='Cassandra options', help="Oslo option group designed for Cassandra datastore") cassandra_opts = [ cfg.ListOpt('tcp_ports', default=["7000", "7001", "7199", "9042", "9160"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental strategy based on the default backup ' 'strategy. For strategies that do not implement incremental ' 'backups, the runner performs full backup instead.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('backup_strategy', default="NodetoolSnapshot", help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/cassandra', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_namespace', default="trove.guestagent.strategies.backup.experimental." "cassandra_impl", help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default="trove.guestagent.strategies.restore.experimental." "cassandra_impl", help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.cassandra.service' '.CassandraRootController', help='Root controller implementation for Cassandra.'), cfg.ListOpt('ignore_users', default=['os_admin'], help='Users to exclude when listing users.'), cfg.ListOpt('ignore_dbs', default=['system', 'system_auth', 'system_traces'], help='Databases to exclude when listing databases.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'cassandra.api.CassandraAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental' '.cassandra.taskmanager.CassandraTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental' '.cassandra.guestagent.CassandraGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), ] # Couchbase couchbase_group = cfg.OptGroup( 'couchbase', title='Couchbase options', help="Oslo option group designed for Couchbase datastore") couchbase_opts = [ cfg.ListOpt('tcp_ports', default=["8091", "8092", "4369", "11209-11211", "21100-21199"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='CbBackup', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/couchbase', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental.' 'couchbase_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental.' 'couchbase_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for couchbase.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), ] # MongoDB mongodb_group = cfg.OptGroup( 'mongodb', title='MongoDB options', help="Oslo option group designed for MongoDB datastore") mongodb_opts = [ cfg.ListOpt('tcp_ports', default=["2500", "27017"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='MongoDump', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/mongodb', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.IntOpt('num_config_servers_per_cluster', default=3, help='The number of config servers to create per cluster.'), cfg.IntOpt('num_query_routers_per_cluster', default=1, help='The number of query routers (mongos) to create ' 'per cluster.'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.BoolOpt('cluster_secure', default=True, help='Create secure clusters. If False then the ' 'Role-Based Access Control will be disabled.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'mongodb.api.MongoDbAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.mongodb.' 'taskmanager.MongoDbTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.' 'mongodb.guestagent.MongoDbGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental.' 'mongo_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental.' 'mongo_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.PortOpt('mongodb_port', default=27017, help='Port for mongod and mongos instances.'), cfg.PortOpt('configsvr_port', default=27019, help='Port for instances running as config servers.'), cfg.ListOpt('ignore_dbs', default=['admin', 'local', 'config'], help='Databases to exclude when listing databases.'), cfg.ListOpt('ignore_users', default=['admin.os_admin', 'admin.root'], help='Users to exclude when listing users.'), cfg.IntOpt('add_members_timeout', default=300, help='Maximum time to wait (in seconds) for a replica set ' 'initialization process to complete.'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for mongodb.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), ] # PostgreSQL postgresql_group = cfg.OptGroup( 'postgresql', title='PostgreSQL options', help="Oslo option group for the PostgreSQL datastore.") postgresql_opts = [ cfg.ListOpt('tcp_ports', default=["5432"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.PortOpt('postgresql_port', default=5432, help='The TCP port the server listens on.'), cfg.StrOpt('backup_strategy', default='PgDump', help='Default strategy to perform backups.'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.'), cfg.StrOpt('mount_point', default='/var/lib/postgresql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental.' 'postgresql_impl', help='Namespace to load backup strategies from.'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental.' 'postgresql_impl', help='Namespace to load restore strategies from.'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb'), cfg.ListOpt('ignore_users', default=['os_admin', 'postgres', 'root']), cfg.ListOpt('ignore_dbs', default=['postgres']), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for postgresql.'), cfg.StrOpt('guest_log_exposed_logs', default='general', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=0, help="The time in milliseconds that a statement must take in " "in order to be logged in the 'general' log. A value of " "'0' logs all statements, while '-1' turns off " "statement logging."), ] # Apache CouchDB couchdb_group = cfg.OptGroup( 'couchdb', title='CouchDB options', help="Oslo option group designed for CouchDB datastore") couchdb_opts = [ cfg.ListOpt('tcp_ports', default=["5984"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('mount_point', default='/var/lib/couchdb', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_strategy', default='CouchDBBackup', help='Default strategy to perform backups.'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies' '.backup.experimental.couchdb_impl', help='Namespace to load backup strategies from.'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies' '.restore.experimental.couchdb_impl', help='Namespace to load restore strategies from.'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.'), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' 'instance-create as the "password" field.'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for couchdb.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root'], help='Users to exclude when listing users.', deprecated_name='ignore_users', deprecated_group='DEFAULT'), cfg.ListOpt('ignore_dbs', default=['_users', '_replicator'], help='Databases to exclude when listing databases.', deprecated_name='ignore_dbs', deprecated_group='DEFAULT'), ] # Vertica vertica_group = cfg.OptGroup( 'vertica', title='Vertica options', help="Oslo option group designed for Vertica datastore") vertica_opts = [ cfg.ListOpt('tcp_ports', default=["5433", "5434", "22", "5444", "5450", "4803"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=["5433", "4803", "4804", "6453"], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default=None, help='Default strategy to perform backups.'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/vertica', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_namespace', default=None, help='Namespace to load backup strategies from.'), cfg.StrOpt('restore_namespace', default=None, help='Namespace to load restore strategies from.'), cfg.IntOpt('readahead_size', default=2048, help='Size(MB) to be set as readahead_size for data volume'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.IntOpt('cluster_member_count', default=3, help='Number of members in Vertica cluster.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.vertica.' 'api.VerticaAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.vertica.' 'taskmanager.VerticaTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.vertica.' 'guestagent.VerticaGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.StrOpt('root_controller', default='trove.extensions.vertica.service.' 'VerticaRootController', help='Root controller implementation for Vertica.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('min_ksafety', default=0, help='Minimum k-safety setting permitted for vertica clusters'), ] # DB2 db2_group = cfg.OptGroup( 'db2', title='DB2 options', help="Oslo option group designed for DB2 datastore") db2_opts = [ cfg.ListOpt('tcp_ports', default=["50000"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('mount_point', default="/home/db2inst1/db2inst1", help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_strategy', default='DB2Backup', help='Default strategy to perform backups.'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental.' 'db2_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental.' 'db2_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.'), cfg.ListOpt('ignore_users', default=['PUBLIC', 'DB2INST1']), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for db2.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), ] # MariaDB mariadb_group = cfg.OptGroup( 'mariadb', title='MariaDB options', help="Oslo option group designed for MariaDB datastore") mariadb_opts = [ cfg.ListOpt('tcp_ports', default=["3306", "4444", "4567", "4568"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='InnoBackupEx', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='MariaDBGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.experimental' '.mariadb_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.IntOpt('usage_timeout', default=400, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.mysql_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.mysql_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.DictOpt('backup_incremental_strategy', default={'InnoBackupEx': 'InnoBackupExIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental backup, the runner will use the default full ' 'backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for mysql.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root'], help='Users to exclude when listing users.', deprecated_name='ignore_users', deprecated_group='DEFAULT'), cfg.ListOpt('ignore_dbs', default=['mysql', 'information_schema', 'performance_schema'], help='Databases to exclude when listing databases.', deprecated_name='ignore_dbs', deprecated_group='DEFAULT'), cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=1000, help='The time in milliseconds that a statement must take in ' 'in order to be logged in the slow_query log.'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.IntOpt('min_cluster_member_count', default=3, help='Minimum number of members in MariaDB cluster.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.api.GaleraCommonAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.taskmanager.GaleraCommonTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.guestagent.GaleraCommonGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), ] # RPC version groups upgrade_levels = cfg.OptGroup( 'upgrade_levels', title='RPC upgrade levels group for handling versions', help='Contains the support version caps for each RPC API') rpcapi_cap_opts = [ cfg.StrOpt( 'taskmanager', default="icehouse", help='Set a version cap for messages sent to taskmanager services'), cfg.StrOpt( 'guestagent', default="icehouse", help='Set a version cap for messages sent to guestagent services'), cfg.StrOpt( 'conductor', default="icehouse", help='Set a version cap for messages sent to conductor services'), ] CONF = cfg.CONF CONF.register_opts(path_opts) CONF.register_opts(common_opts) CONF.register_opts(database_opts, 'database') CONF.register_group(mysql_group) CONF.register_group(percona_group) CONF.register_group(pxc_group) CONF.register_group(redis_group) CONF.register_group(cassandra_group) CONF.register_group(couchbase_group) CONF.register_group(mongodb_group) CONF.register_group(postgresql_group) CONF.register_group(couchdb_group) CONF.register_group(vertica_group) CONF.register_group(db2_group) CONF.register_group(mariadb_group) CONF.register_opts(mysql_opts, mysql_group) CONF.register_opts(percona_opts, percona_group) CONF.register_opts(pxc_opts, pxc_group) CONF.register_opts(redis_opts, redis_group) CONF.register_opts(cassandra_opts, cassandra_group) CONF.register_opts(couchbase_opts, couchbase_group) CONF.register_opts(mongodb_opts, mongodb_group) CONF.register_opts(postgresql_opts, postgresql_group) CONF.register_opts(couchdb_opts, couchdb_group) CONF.register_opts(vertica_opts, vertica_group) CONF.register_opts(db2_opts, db2_group) CONF.register_opts(mariadb_opts, mariadb_group) CONF.register_opts(rpcapi_cap_opts, upgrade_levels) profiler.set_defaults(CONF) logging.register_options(CONF) def custom_parser(parsername, parser): CONF.register_cli_opt(cfg.SubCommandOpt(parsername, handler=parser)) def parse_args(argv, default_config_files=None): cfg.CONF(args=argv[1:], project='trove', version=version.cached_version_string(), default_config_files=default_config_files) def get_ignored_dbs(manager=None): try: return get_configuration_property('ignore_dbs', manager=manager) except NoSuchOptError: return [] def get_ignored_users(manager=None): try: return get_configuration_property('ignore_users', manager=manager) except NoSuchOptError: return [] def get_configuration_property(property_name, manager=None): """ Get a configuration property. Try to get it from the datastore-specific section first. If it is not available, retrieve it from the DEFAULT section. """ # TODO(pmalik): Note that the unit and fake-integration tests # do not define 'CONF.datastore_manager'. *MySQL* options will # be loaded unless the caller passes a manager name explicitly. # # Once the tests are fixed this conditional expression should be removed # and the proper value should always be either loaded from # 'CONF.datastore_manager' or passed-in by the caller. datastore_manager = manager or CONF.datastore_manager or 'mysql' try: return CONF.get(datastore_manager).get(property_name) except NoSuchOptError: return CONF.get(property_name) def set_api_config_defaults(): """This method updates all configuration default values.""" # CORS Middleware Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) trove-5.0.0/trove/common/serializable_notification.py0000664000567000056710000000224312701410316024225 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.utils import import_class class SerializableNotification(object): @staticmethod def serialize(context, notification): serialized = notification.serialize(context) serialized['notification_classname'] = ( notification.__module__ + "." + type(notification).__name__) return serialized @staticmethod def deserialize(context, serialized): classname = serialized.pop('notification_classname') notification_class = import_class(classname) return notification_class(context, **serialized) trove-5.0.0/trove/common/api.py0000664000567000056710000003216512701410316017570 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import routes from trove.backup.service import BackupController from trove.cluster.service import ClusterController from trove.common import wsgi from trove.configuration.service import ConfigurationsController from trove.configuration.service import ParametersController from trove.datastore.service import DatastoreController from trove.flavor.service import FlavorController from trove.instance.service import InstanceController from trove.limits.service import LimitsController from trove.module.service import ModuleController from trove.versions import VersionsController class API(wsgi.Router): """Defines the API routes.""" def __init__(self): mapper = routes.Mapper() super(API, self).__init__(mapper) self._instance_router(mapper) self._cluster_router(mapper) self._datastore_router(mapper) self._flavor_router(mapper) self._versions_router(mapper) self._limits_router(mapper) self._backups_router(mapper) self._configurations_router(mapper) self._modules_router(mapper) def _versions_router(self, mapper): versions_resource = VersionsController().create_resource() mapper.connect("/", controller=versions_resource, action="show", conditions={'method': ['GET']}) def _datastore_router(self, mapper): datastore_resource = DatastoreController().create_resource() mapper.resource("datastore", "/{tenant_id}/datastores", controller=datastore_resource) mapper.connect("/{tenant_id}/datastores/{datastore}/versions", controller=datastore_resource, action="version_index") mapper.connect("/{tenant_id}/datastores/{datastore}/versions/{id}", controller=datastore_resource, action="version_show") mapper.connect( "/{tenant_id}/datastores/{datastore}/versions/" "{version_id}/flavors", controller=datastore_resource, action="list_associated_flavors", conditions={'method': ['GET']} ) mapper.connect("/{tenant_id}/datastores/versions/{uuid}", controller=datastore_resource, action="version_show_by_uuid") def _instance_router(self, mapper): instance_resource = InstanceController().create_resource() mapper.connect("/{tenant_id}/instances", controller=instance_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances", controller=instance_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/instances/{id}", controller=instance_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/action", controller=instance_resource, action="action", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/instances/{id}", controller=instance_resource, action="update", conditions={'method': ['PUT']}) mapper.connect("/{tenant_id}/instances/{id}", controller=instance_resource, action="edit", conditions={'method': ['PATCH']}) mapper.connect("/{tenant_id}/instances/{id}", controller=instance_resource, action="delete", conditions={'method': ['DELETE']}) mapper.connect("/{tenant_id}/instances/{id}/backups", controller=instance_resource, action="backups", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/configuration", controller=instance_resource, action="configuration", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/log", controller=instance_resource, action="guest_log_list", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/log", controller=instance_resource, action="guest_log_action", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/instances/{id}/modules", controller=instance_resource, action="module_list", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/modules", controller=instance_resource, action="module_apply", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/instances/{id}/modules/{module_id}", controller=instance_resource, action="module_remove", conditions={'method': ['DELETE']}) def _cluster_router(self, mapper): cluster_resource = ClusterController().create_resource() mapper.connect("/{tenant_id}/clusters", controller=cluster_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/clusters/{id}", controller=cluster_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/clusters", controller=cluster_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/clusters/{id}", controller=cluster_resource, action="action", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/clusters/{cluster_id}/instances/" "{instance_id}", controller=cluster_resource, action="show_instance", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/clusters/{id}", controller=cluster_resource, action="delete", conditions={'method': ['DELETE']}) def _flavor_router(self, mapper): flavor_resource = FlavorController().create_resource() mapper.connect("/{tenant_id}/flavors", controller=flavor_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/flavors/{id}", controller=flavor_resource, action="show", conditions={'method': ['GET']}) def _limits_router(self, mapper): limits_resource = LimitsController().create_resource() mapper.connect("/{tenant_id}/limits", controller=limits_resource, action="index", conditions={'method': ['GET']}) def _backups_router(self, mapper): backups_resource = BackupController().create_resource() mapper.connect("/{tenant_id}/backups", controller=backups_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/backups", controller=backups_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/backups/{id}", controller=backups_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/backups/{id}", controller=backups_resource, action="action", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/backups/{id}", controller=backups_resource, action="delete", conditions={'method': ['DELETE']}) def _modules_router(self, mapper): modules_resource = ModuleController().create_resource() mapper.resource("modules", "/{tenant_id}/modules", controller=modules_resource) mapper.connect("/{tenant_id}/modules", controller=modules_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/modules", controller=modules_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/modules/{id}", controller=modules_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/modules/{id}", controller=modules_resource, action="update", conditions={'method': ['PUT']}) mapper.connect("/{tenant_id}/modules/{id}", controller=modules_resource, action="delete", conditions={'method': ['DELETE']}) mapper.connect("/{tenant_id}/modules/{id}/instances", controller=modules_resource, action="instances", conditions={'method': ['GET']}) def _configurations_router(self, mapper): parameters_resource = ParametersController().create_resource() path = '/{tenant_id}/datastores/versions/{version}/parameters' mapper.connect(path, controller=parameters_resource, action='index_by_version', conditions={'method': ['GET']}) path = '/{tenant_id}/datastores/versions/{version}/parameters/{name}' mapper.connect(path, controller=parameters_resource, action='show_by_version', conditions={'method': ['GET']}) path = '/{tenant_id}/datastores/{datastore}/versions/{id}' mapper.connect(path + '/parameters', controller=parameters_resource, action='index', conditions={'method': ['GET']}) mapper.connect(path + '/parameters/{name}', controller=parameters_resource, action='show', conditions={'method': ['GET']}) configuration_resource = ConfigurationsController().create_resource() mapper.connect('/{tenant_id}/configurations', controller=configuration_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/{tenant_id}/configurations', controller=configuration_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/{tenant_id}/configurations/{id}', controller=configuration_resource, action='show', conditions={'method': ['GET']}) mapper.connect('/{tenant_id}/configurations/{id}/instances', controller=configuration_resource, action='instances', conditions={'method': ['GET']}) mapper.connect('/{tenant_id}/configurations/{id}', controller=configuration_resource, action='edit', conditions={'method': ['PATCH']}) mapper.connect('/{tenant_id}/configurations/{id}', controller=configuration_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/{tenant_id}/configurations/{id}', controller=configuration_resource, action='delete', conditions={'method': ['DELETE']}) def app_factory(global_conf, **local_conf): return API() trove-5.0.0/trove/common/template.py0000664000567000056710000001227012701410316020625 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jinja2 from oslo_config import cfg as oslo_config from oslo_log import log as logging from trove.common import cfg from trove.common import configurations from trove.common import exception from trove.common.i18n import _ from trove.common import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) ENV = utils.ENV SERVICE_PARSERS = { 'mongodb': configurations.MongoDBConfParser, 'mysql': configurations.MySQLConfParser, 'percona': configurations.MySQLConfParser, 'postgresql': configurations.PostgresqlConfParser, 'cassandra': configurations.CassandraConfParser, 'redis': configurations.RedisConfParser, 'vertica': configurations.VerticaConfParser, } class SingleInstanceConfigTemplate(object): """This class selects a single configuration file by database type for rendering on the guest """ template_name = "config.template" def __init__(self, datastore_version, flavor_dict, instance_id): """Constructor :param datastore_version: The datastore version. :type datastore_version: DatastoreVersion :param flavor_dict: dict containing flavor details for use in jinja. :type flavor_dict: dict. :param instance_id: trove instance id :type instance_id: str """ self.flavor_dict = flavor_dict self.datastore_version = datastore_version # TODO(tim.simpson): The current definition of datastore_version is a # bit iffy and I believe will change soon, so I'm # creating a dictionary here for jinja to consume # rather than pass in the datastore version object. self.datastore_dict = { 'name': self.datastore_version.datastore_name, 'manager': self.datastore_version.manager, 'version': self.datastore_version.name, } self.instance_id = instance_id def get_template(self): patterns = ['{name}/{version}/{template_name}', '{name}/{template_name}', '{manager}/{template_name}'] context = self.datastore_dict.copy() context['template_name'] = self.template_name names = [name.format(**context) for name in patterns] return ENV.select_template(names) def render(self, **kwargs): """Renders the jinja template :returns: str -- The rendered configuration file """ template = self.get_template() server_id = self._calculate_unique_id() self.config_contents = template.render( flavor=self.flavor_dict, datastore=self.datastore_dict, server_id=server_id, **kwargs) return self.config_contents def render_dict(self): """ Renders the default configuration template file as a dictionary to apply the default configuration dynamically. """ config = self.render() cfg_parser = SERVICE_PARSERS.get(self.datastore_version.manager) if not cfg_parser: raise exception.NoConfigParserFound( datastore_manager=self.datastore_version.manager) return cfg_parser(config).parse() def _calculate_unique_id(self): """ Returns a positive unique id based off of the instance id :return: a positive integer """ return abs(hash(self.instance_id) % (2 ** 31)) class OverrideConfigTemplate(SingleInstanceConfigTemplate): template_name = "override.config.template" def _validate_datastore(datastore_manager): try: CONF.get(datastore_manager) except oslo_config.NoSuchOptError: raise exception.InvalidDatastoreManager( datastore_manager=datastore_manager) def load_heat_template(datastore_manager): patterns = ["%s/heat.template" % datastore_manager, "default.heat.template"] _validate_datastore(datastore_manager) try: template_obj = ENV.select_template(patterns) return template_obj except jinja2.TemplateNotFound: msg = _("Missing heat template for %(s_datastore_manager)s.") % ( {"s_datastore_manager": datastore_manager}) LOG.error(msg) raise exception.TroveError(msg) class ReplicaSourceConfigTemplate(SingleInstanceConfigTemplate): template_name = "replica_source.config.template" class ReplicaConfigTemplate(SingleInstanceConfigTemplate): template_name = "replica.config.template" class ClusterConfigTemplate(SingleInstanceConfigTemplate): template_name = "cluster.config.template" trove-5.0.0/trove/version.py0000664000567000056710000000130012701410316017177 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('trove') trove-5.0.0/trove/db/0000775000567000056710000000000012701410521015531 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/db/__init__.py0000664000567000056710000000701012701410316017642 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import optparse from trove.common import cfg from trove.common import utils CONF = cfg.CONF db_api_opt = CONF.db_api_implementation def get_db_api(): return utils.import_module(db_api_opt) class Query(object): """Mimics sqlalchemy query object. This class allows us to store query conditions and use them with bulk updates and deletes just like sqlalchemy query object. Using this class makes the models independent of sqlalchemy """ def __init__(self, model, query_func, **conditions): self._query_func = query_func self._model = model self._conditions = conditions self.db_api = get_db_api() def all(self): return self.db_api.list(self._query_func, self._model, **self._conditions) def count(self): return self.db_api.count(self._query_func, self._model, **self._conditions) def first(self): return self.db_api.first(self._query_func, self._model, **self._conditions) def join(self, *args): return self.db_api.join(self._query_func, self._model, *args) def __iter__(self): return iter(self.all()) def update(self, **values): self.db_api.update_all(self._query_func, self._model, self._conditions, values) def delete(self): self.db_api.delete_all(self._query_func, self._model, **self._conditions) def limit(self, limit=200, marker=None, marker_column=None): return self.db_api.find_all_by_limit( self._query_func, self._model, self._conditions, limit=limit, marker=marker, marker_column=marker_column) def paginated_collection(self, limit=200, marker=None, marker_column=None): collection = self.limit(int(limit) + 1, marker, marker_column) if len(collection) > int(limit): return (collection[0:-1], collection[-2]['id']) return (collection, None) class Queryable(object): def __getattr__(self, item): return lambda model, **conditions: Query( model, query_func=getattr(get_db_api(), item), **conditions) db_query = Queryable() def add_options(parser): """Adds any configuration options that the db layer might have. :param parser: An optparse.OptionParser object :retval None """ help_text = ("The following configuration options are specific to the " "Trove database.") group = optparse.OptionGroup( parser, "Registry Database Options", help_text) group.add_option( '--sql-connection', metavar="CONNECTION", default=None, help="A valid SQLAlchemy connection string for the " "registry database. Default: %(default)s.") parser.add_option_group(group) trove-5.0.0/trove/db/sqlalchemy/0000775000567000056710000000000012701410521017673 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/db/sqlalchemy/migrate_repo/0000775000567000056710000000000012701410521022350 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/db/sqlalchemy/migrate_repo/README0000664000567000056710000000015312701410316023231 0ustar jenkinsjenkins00000000000000This is a database migration repository. More information at http://code.google.com/p/sqlalchemy-migrate/ trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/0000775000567000056710000000000012701410521024220 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/004_root_enabled.py0000664000567000056710000000266312701410316027623 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() root_enabled_history = Table( 'root_enabled_history', meta, Column('id', String(36), primary_key=True, nullable=False), Column('user', String(length=255)), Column('created', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([root_enabled_history]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([root_enabled_history]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/014_update_instance_flavor_id.py0000664000567000056710000000345112701410316032356 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # pgsql <= 8.3 was lax about char->other casting but this was tightened up # in 8.4+. We now have to specify the USING clause for the cast to succeed. # NB: The generated sqlalchemy query doesn't support this, so this override # is needed. if migrate_engine.name == 'postgresql': migrate_engine.execute('ALTER TABLE instances ALTER COLUMN flavor_id ' 'TYPE INTEGER USING flavor_id::integer') else: instances = Table('instances', meta, autoload=True) # modify column instances.c.flavor_id.alter(type=Integer()) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # int->char casts in pgsql still work fine without any USING clause, # so downgrade is not affected. # modify column: instances = Table('instances', meta, autoload=True) instances.c.flavor_id.alter(type=String(36)) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/009_add_deleted_flag_to_instances.py0000664000567000056710000000255312701410316033151 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: instances = Table('instances', meta, autoload=True) instances.create_column(Column('deleted', Boolean())) instances.create_column(Column('deleted_at', DateTime())) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # drop column: instances = Table('instances', meta, autoload=True) instances.drop_column('deleted') instances.drop_column('deleted_at') trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/025_add_service_statuses_indexes.py0000664000567000056710000000270112701410316033104 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine service_statuses = Table('service_statuses', meta, autoload=True) idx = Index("service_statuses_instance_id", service_statuses.c.instance_id) try: idx.create() except OperationalError as e: logger.info(e) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine service_statuses = Table('service_statuses', meta, autoload=True) idx = Index("service_statuses_instance_id", service_statuses.c.instance_id) idx.drop() trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/001_base_schema.py0000664000567000056710000000331712701410316027412 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() instances = Table( 'instances', meta, Column('id', String(36), primary_key=True, nullable=False), Column('created', DateTime()), Column('updated', DateTime()), Column('name', String(255)), Column('hostname', String(255)), Column('compute_instance_id', String(36)), Column('task_id', Integer()), Column('task_description', String(32)), Column('task_start_time', DateTime()), Column('volume_id', String(36))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([instances]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([instances]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/005_heartbeat.py0000664000567000056710000000266312701410316027126 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() agent_heartbeats = Table( 'agent_heartbeats', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_id', String(36), nullable=False), Column('updated_at', DateTime())) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([agent_heartbeats]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([agent_heartbeats]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/030_add_master_slave.py0000664000567000056710000000331712701410316030457 0ustar jenkinsjenkins00000000000000# Copyright Tesora, Inc. 2014 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import ForeignKey from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy import utils as db_utils COLUMN_NAME = 'slave_of_id' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.create_column( Column(COLUMN_NAME, String(36), ForeignKey('instances.id')), nullable=True) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) constraint_names = db_utils.get_foreign_key_constraint_names( engine=migrate_engine, table='instances', columns=[COLUMN_NAME], ref_table='instances', ref_columns=['id']) db_utils.drop_foreign_key_constraints( constraint_names=constraint_names, columns=[instances.c.slave_of_id], ref_columns=[instances.c.id]) instances.drop_column(COLUMN_NAME) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/__init__.py0000664000567000056710000000000012701410316026321 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/015_add_service_type.py0000664000567000056710000000244312701410316030475 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) service_type = Column('service_type', String(36)) instances.create_column(service_type) instances.update().values({'service_type': 'mysql'}).execute() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # modify column: instances = Table('instances', meta, autoload=True) instances.drop_column('service_type') trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/022_add_backup_parent_id.py0000664000567000056710000000231712701410316031264 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: backups = Table('backups', meta, autoload=True) backups.create_column(Column('parent_id', String(36), nullable=True)) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # drop column: backups = Table('backups', meta, autoload=True) backups.drop_column('parent_id') trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/028_recreate_agent_heartbeat.py0000664000567000056710000000576512701410316032171 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # new table with desired columns, indexes, and constraints new_agent_heartbeats = Table( 'agent_heartbeats', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_id', String(36), nullable=False, unique=True, index=True), Column('guest_agent_version', String(255), index=True), Column('deleted', Boolean(), index=True), Column('deleted_at', DateTime()), Column('updated_at', DateTime(), nullable=False)) # original table from migration 005_heartbeat.py previous_agent_heartbeats = Table('agent_heartbeats', meta, autoload=True) try: drop_tables([previous_agent_heartbeats]) except OperationalError as e: logger.warn("This table may have been dropped by some other means.") logger.warn(e) create_tables([new_agent_heartbeats]) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # new table with desired columns, indexes, and constraints new_agent_heartbeats = Table('agent_heartbeats', meta, autoload=True) try: drop_tables([new_agent_heartbeats]) except OperationalError as e: logger.warn("This table may have been dropped by some other means.") logger.warn(e) # reset the migrate_engine meta = MetaData() meta.bind = migrate_engine # original table from migration 005_heartbeat.py previous_agent_heartbeats = Table( 'agent_heartbeats', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_id', String(36), nullable=False), Column('updated_at', DateTime()), extend_existing=True) create_tables([previous_agent_heartbeats]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/006_dns_records.py0000664000567000056710000000245012701410316027467 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() dns_records = Table( 'dns_records', meta, Column('name', String(length=255), primary_key=True), Column('record_id', String(length=64))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([dns_records]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([dns_records]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/029_add_backup_datastore.py0000664000567000056710000000363012701410316031313 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy import utils as db_utils def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) Table('datastore_versions', meta, autoload=True) datastore_version_id = Column('datastore_version_id', String(36), ForeignKey('datastore_versions.id')) backups.create_column(datastore_version_id) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) datastore_versions = Table('datastore_versions', meta, autoload=True) constraint_names = db_utils.get_foreign_key_constraint_names( engine=migrate_engine, table='backups', columns=['datastore_version_id'], ref_table='datastore_versions', ref_columns=['id']) db_utils.drop_foreign_key_constraints( constraint_names=constraint_names, columns=[backups.c.datastore_version_id], ref_columns=[datastore_versions.c.id]) backups.drop_column('datastore_version_id') trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/012_backup.py0000664000567000056710000000412312701410316026423 0ustar jenkinsjenkins00000000000000# Copyright [2013] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import Float from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() backups = Table('backups', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(255), nullable=False), Column('description', String(512)), Column('location', String(1024)), Column('backup_type', String(32)), Column('size', Float()), Column('tenant_id', String(36)), Column('state', String(32), nullable=False), Column('instance_id', String(36)), Column('checksum', String(32)), Column('backup_timestamp', DateTime()), Column('deleted', Boolean()), Column('created', DateTime()), Column('updated', DateTime()), Column('deleted_at', DateTime())) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([backups, ]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([backups, ]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/027_add_datastore_capabilities.py0000664000567000056710000000415212701410316032475 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() capabilities = Table( 'capabilities', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(255), unique=True), Column('description', String(255), nullable=False), Column('enabled', Boolean()) ) capability_overrides = Table( 'capability_overrides', meta, Column('id', String(36), primary_key=True, nullable=False), Column('datastore_version_id', String(36), ForeignKey('datastore_versions.id')), Column('capability_id', String(36), ForeignKey('capabilities.id')), Column('enabled', Boolean()), UniqueConstraint('datastore_version_id', 'capability_id', name='idx_datastore_capabilities_enabled') ) def upgrade(migrate_engine): meta.bind = migrate_engine Table('datastores', meta, autoload=True) Table('datastore_versions', meta, autoload=True) create_tables([capabilities, capability_overrides]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([capability_overrides, capabilities]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/016_add_datastore_type.py0000664000567000056710000000617312701410316031030 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy import utils as db_utils meta = MetaData() datastores = Table( 'datastores', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(255), unique=True), Column('manager', String(255), nullable=False), Column('default_version_id', String(36)), ) datastore_versions = Table( 'datastore_versions', meta, Column('id', String(36), primary_key=True, nullable=False), Column('datastore_id', String(36), ForeignKey('datastores.id')), Column('name', String(255), unique=True), Column('image_id', String(36), nullable=False), Column('packages', String(511)), Column('active', Boolean(), nullable=False), UniqueConstraint('datastore_id', 'name', name='ds_versions') ) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([datastores, datastore_versions]) instances = Table('instances', meta, autoload=True) datastore_version_id = Column('datastore_version_id', String(36), ForeignKey('datastore_versions.id')) instances.create_column(datastore_version_id) instances.drop_column('service_type') # Table 'service_images' is deprecated since this version. # Leave it for few releases. # drop_tables([service_images]) def downgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) constraint_names = db_utils.get_foreign_key_constraint_names( engine=migrate_engine, table='instances', columns=['datastore_version_id'], ref_table='datastore_versions', ref_columns=['id']) db_utils.drop_foreign_key_constraints( constraint_names=constraint_names, columns=[instances.c.datastore_version_id], ref_columns=[datastore_versions.c.id]) instances.drop_column('datastore_version_id') service_type = Column('service_type', String(36)) instances.create_column(service_type) instances.update().values({'service_type': 'mysql'}).execute() drop_tables([datastore_versions, datastores]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/031_add_timestamps_to_configurations.py0000664000567000056710000000247412701410316034000 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) configurations = Table('configurations', meta, autoload=True) created = Column('created', DateTime()) updated = Column('updated', DateTime()) configurations.create_column(created) configurations.create_column(updated) def downgrade(migrate_engine): meta = MetaData(bind=migrate_engine) configurations = Table('configurations', meta, autoload=True) configurations.drop_column('created') configurations.drop_column('updated') trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/037_modules.py0000664000567000056710000000644612701410316026647 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy.migrate_repo.schema import Text meta = MetaData() modules = Table( 'modules', meta, Column('id', String(length=64), primary_key=True, nullable=False), Column('name', String(length=255), nullable=False), Column('type', String(length=255), nullable=False), Column('contents', Text(length=16777215), nullable=False), Column('description', String(length=255)), Column('tenant_id', String(length=64), nullable=True), Column('datastore_id', String(length=64), nullable=True), Column('datastore_version_id', String(length=64), nullable=True), Column('auto_apply', Boolean(), default=0, nullable=False), Column('visible', Boolean(), default=1, nullable=False), Column('live_update', Boolean(), default=0, nullable=False), Column('md5', String(length=32), nullable=False), Column('created', DateTime(), nullable=False), Column('updated', DateTime(), nullable=False), Column('deleted', Boolean(), default=0, nullable=False), Column('deleted_at', DateTime()), UniqueConstraint( 'type', 'tenant_id', 'datastore_id', 'datastore_version_id', 'name', 'deleted_at', name='UQ_type_tenant_datastore_datastore_version_name'), ) instance_modules = Table( 'instance_modules', meta, Column('id', String(length=64), primary_key=True, nullable=False), Column('instance_id', String(length=64), ForeignKey('instances.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False), Column('module_id', String(length=64), ForeignKey('modules.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False), Column('md5', String(length=32), nullable=False), Column('created', DateTime(), nullable=False), Column('updated', DateTime(), nullable=False), Column('deleted', Boolean(), default=0, nullable=False), Column('deleted_at', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine Table('instances', meta, autoload=True) create_tables([modules, instance_modules]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([instance_modules, modules]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/003_service_statuses.py0000664000567000056710000000314112701410316030550 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() service_statuses = Table( 'service_statuses', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_id', String(36), nullable=False), Column('status_id', Integer(), nullable=False), Column('status_description', String(64), nullable=False), Column('updated_at', DateTime())) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([service_statuses]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([service_statuses]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/036_add_datastore_version_metadata.py0000664000567000056710000000431412701410316033371 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() datastore_version_metadata = Table( 'datastore_version_metadata', meta, Column('id', String(36), primary_key=True, nullable=False), Column( 'datastore_version_id', String(36), ForeignKey('datastore_versions.id', ondelete='CASCADE'), ), Column('key', String(128), nullable=False), Column('value', String(128)), Column('created', DateTime(), nullable=False), Column('deleted', Boolean(), nullable=False, default=False), Column('deleted_at', DateTime()), Column('updated_at', DateTime()), UniqueConstraint( 'datastore_version_id', 'key', 'value', name='UQ_datastore_version_metadata_datastore_version_id_key_value') ) def upgrade(migrate_engine): meta.bind = migrate_engine # Load the datastore_versions table into the session. # creates datastore_version_metadata table Table('datastore_versions', meta, autoload=True) create_tables([datastore_version_metadata]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([datastore_version_metadata]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/007_add_volume_flavor.py0000664000567000056710000000265112701410316030656 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: instances = Table('instances', meta, autoload=True) volume_size = Column('volume_size', Integer()) flavor_id = Column('flavor_id', String(36)) instances.create_column(flavor_id) instances.create_column(volume_size) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # drop column: instances = Table('instances', meta, autoload=True) instances.drop_column('flavor_id') instances.drop_column('volume_size') trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/033_datastore_parameters.py0000664000567000056710000000430612701410316031375 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() datastore_configuration_parameters = Table( 'datastore_configuration_parameters', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(128), primary_key=True, nullable=False), Column('datastore_version_id', String(36), ForeignKey("datastore_versions.id"), primary_key=True, nullable=False), Column('restart_required', Boolean(), nullable=False, default=False), Column('max_size', String(40)), Column('min_size', String(40)), Column('data_type', String(128), nullable=False), Column('deleted', Boolean()), Column('deleted_at', DateTime()), UniqueConstraint( 'datastore_version_id', 'name', name='UQ_datastore_configuration_parameters_datastore_version_id_name') ) def upgrade(migrate_engine): meta.bind = migrate_engine Table('datastore_versions', meta, autoload=True) create_tables([datastore_configuration_parameters]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([datastore_configuration_parameters]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/017_update_datastores.py0000664000567000056710000000421312701410316030676 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.sql.expression import select from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def migrate_datastore_manager(datastores, datastore_versions): versions = select([datastore_versions]).execute() for ds_v in versions: ds = select([datastores]).\ where(datastores.c.id == ds_v.datastore_id).\ execute().fetchone() datastore_versions.update().\ where(datastore_versions.c.id == ds_v.id).\ values(manager=ds.manager).\ execute() def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastores = Table('datastores', meta, autoload=True) datastore_versions = Table('datastore_versions', meta, autoload=True) # add column to datastore_versions manager = Column('manager', String(255)) datastore_versions.create_column(manager) migrate_datastore_manager(datastores, datastore_versions) # drop column from datastores datastores.drop_column('manager') def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastores = Table('datastores', meta, autoload=True) datastore_versions = Table('datastore_versions', meta, autoload=True) # drop column from datastore_versions datastore_versions.drop_column('manager') # add column to datastores manager = Column('manager', String(255)) datastores.create_column(manager) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/008_add_instance_fields.py0000664000567000056710000000251012701410316031123 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: instances = Table('instances', meta, autoload=True) instances.create_column(Column('tenant_id', String(36), nullable=True)) instances.create_column(Column('server_status', String(64))) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # drop column: instances = Table('instances', meta, autoload=True) instances.drop_column('tenant_id') instances.drop_column('server_status') trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/024_add_backup_indexes.py0000664000567000056710000000350412701410316030757 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) backups_instance_id_idx = Index("backups_instance_id", backups.c.instance_id) backups_deleted_idx = Index("backups_deleted", backups.c.deleted) try: backups_instance_id_idx.create() except OperationalError as e: logger.info(e) try: backups_deleted_idx.create() except OperationalError as e: logger.info(e) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) backups_instance_id_idx = Index("backups_instance_id", backups.c.instance_id) backups_deleted_idx = Index("backups_deleted", backups.c.deleted) meta.bind = migrate_engine backups_instance_id_idx.drop() backups_deleted_idx.drop() trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/018_datastore_versions_fix.py0000664000567000056710000000222112701410316031745 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastore_versions = Table('datastore_versions', meta, autoload=True) # modify column datastore_versions.c.name.alter(unique=False) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # modify column: datastore_versions = Table('datastore_versions', meta, autoload=True) datastore_versions.c.name.alter(unique=True) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/011_quota.py0000664000567000056710000000536612701410316026320 0ustar jenkinsjenkins00000000000000# Copyright [2013] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() quotas = Table('quotas', meta, Column('id', String(36), primary_key=True, nullable=False), Column('created', DateTime()), Column('updated', DateTime()), Column('tenant_id', String(36)), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer()), UniqueConstraint('tenant_id', 'resource')) quota_usages = Table('quota_usages', meta, Column('id', String(36), primary_key=True, nullable=False), Column('created', DateTime()), Column('updated', DateTime()), Column('tenant_id', String(36)), Column('in_use', Integer(), default=0), Column('reserved', Integer(), default=0), Column('resource', String(length=255), nullable=False), UniqueConstraint('tenant_id', 'resource')) reservations = Table('reservations', meta, Column('created', DateTime()), Column('updated', DateTime()), Column('id', String(36), primary_key=True, nullable=False), Column('usage_id', String(36)), Column('delta', Integer(), nullable=False), Column('status', String(length=36))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([quotas, quota_usages, reservations]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([quotas, quota_usages, reservations]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/021_conductor_last_seen.py0000664000567000056710000000267612701410316031226 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import Float from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() conductor_lastseen = Table( 'conductor_lastseen', meta, Column('instance_id', String(36), primary_key=True, nullable=False), Column('method_name', String(36), primary_key=True, nullable=False), Column('sent', Float(precision=32))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([conductor_lastseen]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([conductor_lastseen]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/013_add_security_group_artifacts.py0000664000567000056710000000661612701410316033123 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() security_groups = Table( 'security_groups', meta, Column('id', String(length=36), primary_key=True, nullable=False), Column('name', String(length=255)), Column('description', String(length=255)), Column('user', String(length=255)), Column('tenant_id', String(length=255)), Column('created', DateTime()), Column('updated', DateTime()), Column('deleted', Boolean(), default=0), Column('deleted_at', DateTime()), ) security_group_instance_associations = Table( 'security_group_instance_associations', meta, Column('id', String(length=36), primary_key=True, nullable=False), Column('security_group_id', String(length=36), ForeignKey('security_groups.id', ondelete="CASCADE", onupdate="CASCADE")), Column('instance_id', String(length=36), ForeignKey('instances.id', ondelete="CASCADE", onupdate="CASCADE")), Column('created', DateTime()), Column('updated', DateTime()), Column('deleted', Boolean(), default=0), Column('deleted_at', DateTime()), ) security_group_rules = Table( 'security_group_rules', meta, Column('id', String(length=36), primary_key=True, nullable=False), Column('group_id', String(length=36), ForeignKey('security_groups.id', ondelete="CASCADE", onupdate="CASCADE")), Column('parent_group_id', String(length=36), ForeignKey('security_groups.id', ondelete="CASCADE", onupdate="CASCADE")), Column('protocol', String(length=255)), Column('from_port', Integer()), Column('to_port', Integer()), Column('cidr', String(length=255)), Column('created', DateTime()), Column('updated', DateTime()), Column('deleted', Boolean(), default=0), Column('deleted_at', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine Table( 'instances', meta, autoload=True, ) create_tables([security_groups, security_group_rules, security_group_instance_associations]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([security_group_instance_associations, security_group_rules, security_groups]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/032_clusters.py0000664000567000056710000000724712701410316027036 0ustar jenkinsjenkins00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy import utils as db_utils logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') meta = MetaData() clusters = Table( 'clusters', meta, Column('id', String(36), primary_key=True, nullable=False), Column('created', DateTime(), nullable=False), Column('updated', DateTime(), nullable=False), Column('name', String(255), nullable=False), Column('task_id', Integer(), nullable=False), Column('tenant_id', String(36), nullable=False), Column("datastore_version_id", String(36), ForeignKey('datastore_versions.id'), nullable=False), Column('deleted', Boolean()), Column('deleted_at', DateTime()), Index("clusters_tenant_id", "tenant_id"), Index("clusters_deleted", "deleted"),) def upgrade(migrate_engine): meta.bind = migrate_engine Table('datastores', meta, autoload=True) Table('datastore_versions', meta, autoload=True) instances = Table('instances', meta, autoload=True) create_tables([clusters]) instances.create_column(Column('cluster_id', String(36), ForeignKey("clusters.id"))) instances.create_column(Column('shard_id', String(36))) instances.create_column(Column('type', String(64))) cluster_id_idx = Index("instances_cluster_id", instances.c.cluster_id) cluster_id_idx.create() def downgrade(migrate_engine): meta.bind = migrate_engine datastore_versions = Table('datastore_versions', meta, autoload=True) constraint_names = db_utils.get_foreign_key_constraint_names( engine=migrate_engine, table='clusters', columns=['datastore_version_id'], ref_table='datastore_versions', ref_columns=['id']) db_utils.drop_foreign_key_constraints( constraint_names=constraint_names, columns=[clusters.c.datastore_version_id], ref_columns=[datastore_versions.c.id]) instances = Table('instances', meta, autoload=True) constraint_names = db_utils.get_foreign_key_constraint_names( engine=migrate_engine, table='instances', columns=['cluster_id'], ref_table='clusters', ref_columns=['id']) db_utils.drop_foreign_key_constraints( constraint_names=constraint_names, columns=[instances.c.cluster_id], ref_columns=[clusters.c.id]) instances.drop_column('cluster_id') instances.drop_column('shard_id') instances.drop_column('type') drop_tables([clusters]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/034_change_task_description.py0000664000567000056710000000174412701410316032042 0ustar jenkinsjenkins00000000000000# Copyright 2014 AWCloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.c.task_description.alter(type=String(255)) def downgrade(migrate_engine): pass trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/020_configurations.py0000664000567000056710000000604512701410316030214 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy import utils as db_utils logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') meta = MetaData() configurations = Table( 'configurations', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(64), nullable=False), Column('description', String(256)), Column('tenant_id', String(36), nullable=False), Column('datastore_version_id', String(36), nullable=False), Column('deleted', Boolean(), nullable=False, default=False), Column('deleted_at', DateTime()), ) configuration_parameters = Table( 'configuration_parameters', meta, Column('configuration_id', String(36), ForeignKey("configurations.id"), nullable=False, primary_key=True), Column('configuration_key', String(128), nullable=False, primary_key=True), Column('configuration_value', String(128)), Column('deleted', Boolean(), nullable=False, default=False), Column('deleted_at', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([configurations]) create_tables([configuration_parameters]) instances = Table('instances', meta, autoload=True) instances.create_column(Column('configuration_id', String(36), ForeignKey("configurations.id"))) def downgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) constraint_names = db_utils.get_foreign_key_constraint_names( engine=migrate_engine, table='instances', columns=['configuration_id'], ref_table='configurations', ref_columns=['id']) db_utils.drop_foreign_key_constraints( constraint_names=constraint_names, columns=[instances.c.configuration_id], ref_columns=[configurations.c.id]) instances.drop_column('configuration_id') drop_tables([configuration_parameters, configurations]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/026_datastore_versions_unique_fix.py0000664000567000056710000000351212701410316033336 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import UniqueConstraint from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastore_versions = Table('datastore_versions', meta, autoload=True) # drop the unique index on the name column - unless we are # using sqlite - it doesn't support dropping unique constraints uc = None if migrate_engine.name == "mysql": uc = UniqueConstraint('name', table=datastore_versions, name='name') elif migrate_engine.name == "postgresql": uc = UniqueConstraint('name', table=datastore_versions, name='datastore_versions_name_key') if uc: try: uc.drop() except OperationalError as e: logger.info(e) def downgrade(migrate_engine): # we aren't going to recreate the index in this case for 2 reasons: # 1. this column being unique was a bug in the first place # 2. adding a unique index to a column that has duplicates will fail pass trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/023_add_instance_indexes.py0000664000567000056710000000324712701410316031321 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) tenant_id_idx = Index("instances_tenant_id", instances.c.tenant_id) try: tenant_id_idx.create() except OperationalError as e: logger.info(e) deleted_idx = Index("instances_deleted", instances.c.deleted) try: deleted_idx.create() except OperationalError as e: logger.info(e) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) tenant_id_idx = Index("instances_tenant_id", instances.c.tenant_id) tenant_id_idx.drop() deleted_idx = Index("instances_deleted", instances.c.deleted) deleted_idx.drop() trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/002_service_images.py0000664000567000056710000000253712701410316030151 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() service_images = Table( 'service_images', meta, Column('id', String(36), primary_key=True, nullable=False), Column('service_name', String(255)), Column('image_id', String(255))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([service_images]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([service_images]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/010_add_usage.py0000664000567000056710000000327412701410316027076 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() usage_events = Table( 'usage_events', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_name', String(36)), Column('tenant_id', String(36)), Column('nova_instance_id', String(36)), Column('instance_size', Integer()), Column('nova_volume_id', String(36)), Column('volume_size', Integer()), Column('end_time', DateTime()), Column('updated', DateTime())) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([usage_events]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([usage_events]) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/035_flavor_id_int_to_string.py0000664000567000056710000000222412701410316032072 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() def upgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.c.flavor_id.alter(String(255)) def downgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.c.flavor_id.alter(Integer()) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/versions/019_datastore_fix.py0000664000567000056710000001076712701410320030027 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from sqlalchemy.sql.expression import delete from sqlalchemy.sql.expression import insert from sqlalchemy.sql.expression import select from sqlalchemy.sql.expression import update from trove.common import cfg from trove.db.sqlalchemy.migrate_repo.schema import Table CONF = cfg.CONF LEGACY_IMAGE_ID = "00000000-0000-0000-0000-000000000000" LEGACY_DATASTORE_ID = "10000000-0000-0000-0000-000000000001" LEGACY_VERSION_ID = "20000000-0000-0000-0000-000000000002" meta = MetaData() def create_legacy_version(datastores_table, datastore_versions_table, image_id): insert( table=datastores_table, values=dict(id=LEGACY_DATASTORE_ID, name="Legacy MySQL") ).execute() insert( table=datastore_versions_table, values=dict(id=LEGACY_VERSION_ID, datastore_id=LEGACY_DATASTORE_ID, name="Unknown Legacy Version", image_id=image_id, packages="", active=False, manager="mysql") ).execute() return LEGACY_VERSION_ID def find_image(service_name): image_table = Table('service_images', meta, autoload=True) image = select( columns=["id", "image_id", "service_name"], from_obj=image_table, whereclause="service_name='%s'" % service_name, limit=1 ).execute().fetchone() if image: return image.id return LEGACY_IMAGE_ID def has_instances_wo_datastore_version(instances_table): instance = select( columns=["id"], from_obj=instances_table, whereclause="datastore_version_id is NULL", limit=1 ).execute().fetchone() return instance is not None def find_all_instances_wo_datastore_version(instances_table): instances = select( columns=["id"], from_obj=instances_table, whereclause="datastore_version_id is NULL" ).execute() return instances def upgrade(migrate_engine): meta.bind = migrate_engine instance_table = Table('instances', meta, autoload=True) if has_instances_wo_datastore_version(instance_table): instances = find_all_instances_wo_datastore_version(instance_table) image_id = find_image("mysql") datastores_table = Table('datastores', meta, autoload=True) datastore_versions_table = Table('datastore_versions', meta, autoload=True) version_id = create_legacy_version(datastores_table, datastore_versions_table, image_id) for instance in instances: update( table=instance_table, whereclause="id='%s'" % instance.id, values=dict(datastore_version_id=version_id) ).execute() instance_table.c.datastore_version_id.alter(nullable=False) def downgrade(migrate_engine): meta.bind = migrate_engine instance_table = Table('instances', meta, autoload=True) instance_table.c.datastore_version_id.alter(nullable=True) update( table=instance_table, whereclause="datastore_version_id='%s'" % LEGACY_VERSION_ID, values=dict(datastore_version_id=None) ).execute() datastores_table = Table('datastores', meta, autoload=True) datastore_versions_table = Table('datastore_versions', meta, autoload=True) delete( table=datastore_versions_table, whereclause="id='%s'" % LEGACY_VERSION_ID ).execute() delete( table=datastores_table, whereclause="id='%s'" % LEGACY_DATASTORE_ID ).execute() trove-5.0.0/trove/db/sqlalchemy/migrate_repo/manage.py0000664000567000056710000000140112701410316024150 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.versioning.shell import main if __name__ == "__main__": main(debug='False', repository='.') trove-5.0.0/trove/db/sqlalchemy/migrate_repo/schema.py0000664000567000056710000000506012701410316024165 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Various conveniences used for migration scripts.""" from oslo_log import log as logging import sqlalchemy.types logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') class String(sqlalchemy.types.String): def __init__(self, length, *args, **kwargs): super(String, self).__init__(*args, length=length, **kwargs) class Text(sqlalchemy.types.Text): def __init__(self, length=None, *args, **kwargs): super(Text, self).__init__(*args, length=length, **kwargs) class Boolean(sqlalchemy.types.Boolean): def __init__(self, create_constraint=True, name=None, *args, **kwargs): super(Boolean, self).__init__(*args, create_constraint=create_constraint, name=name, **kwargs) class DateTime(sqlalchemy.types.DateTime): def __init__(self, timezone=False, *args, **kwargs): super(DateTime, self).__init__(*args, timezone=timezone, **kwargs) class Integer(sqlalchemy.types.Integer): def __init__(self, *args, **kwargs): super(Integer, self).__init__(*args, **kwargs) class BigInteger(sqlalchemy.types.BigInteger): def __init__(self, *args, **kwargs): super(BigInteger, self).__init__(*args, **kwargs) class Float(sqlalchemy.types.Float): def __init__(self, *args, **kwargs): super(Float, self).__init__(*args, **kwargs) def create_tables(tables): for table in tables: logger.info("creating table %(table)s" % {'table': table}) table.create() def drop_tables(tables): for table in tables: logger.info("dropping table %(table)s" % {'table': table}) table.drop() def Table(name, metadata, *args, **kwargs): return sqlalchemy.schema.Table(name, metadata, *args, mysql_engine='INNODB', **kwargs) trove-5.0.0/trove/db/sqlalchemy/migrate_repo/__init__.py0000664000567000056710000000000012701410316024451 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/db/sqlalchemy/migrate_repo/migrate.cfg0000664000567000056710000000177412701410316024474 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=Trove Migrations # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=['mysql','postgres','sqlite'] trove-5.0.0/trove/db/sqlalchemy/utils.py0000664000567000056710000000435712701410320021413 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset.constraint import ForeignKeyConstraint from sqlalchemy.engine import reflection def get_foreign_key_constraint_names(engine, table, columns, ref_table, ref_columns): """Retrieve the names of foreign key constraints that match the given criteria. :param engine: The sqlalchemy engine to be used. :param table: Name of the child table. :param columns: List of the foreign key columns. :param ref_table: Name of the parent table. :param ref_columns: List of the referenced columns. :return: List of foreign key constraint names. """ constraint_names = [] inspector = reflection.Inspector.from_engine(engine) fks = inspector.get_foreign_keys(table) for fk in fks: if (fk['referred_table'] == ref_table and fk['constrained_columns'] == columns and fk['referred_columns'] == ref_columns): constraint_names.append(fk['name']) return constraint_names def drop_foreign_key_constraints(constraint_names, columns, ref_columns): """Drop the foreign key constraints that match the given criteria. :param constraint_names: List of foreign key constraint names :param columns: List of the foreign key columns. :param ref_columns: List of the referenced columns. """ for constraint_name in constraint_names: fkey_constraint = ForeignKeyConstraint(columns=columns, refcolumns=ref_columns, name=constraint_name) fkey_constraint.drop() trove-5.0.0/trove/db/sqlalchemy/__init__.py0000664000567000056710000000000012701410316021774 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/db/sqlalchemy/session.py0000664000567000056710000001116012701410316021731 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import threading from oslo_db.sqlalchemy import session from oslo_log import log as logging from sqlalchemy import MetaData from trove.common import cfg from trove.common.i18n import _ from trove.db.sqlalchemy import mappers _FACADE = None _LOCK = threading.Lock() LOG = logging.getLogger(__name__) CONF = cfg.CONF def configure_db(options, models_mapper=None): facade = _create_facade(options) if models_mapper: models_mapper.map(facade) else: from trove.backup import models as backup_models from trove.cluster import models as cluster_models from trove.conductor import models as conductor_models from trove.configuration import models as configurations_models from trove.datastore import models as datastores_models from trove.dns import models as dns_models from trove.extensions.mysql import models as mysql_models from trove.extensions.security_group import models as secgrp_models from trove.guestagent import models as agent_models from trove.instance import models as base_models from trove.module import models as module_models from trove.quota import models as quota_models model_modules = [ base_models, datastores_models, dns_models, mysql_models, agent_models, quota_models, backup_models, secgrp_models, configurations_models, conductor_models, cluster_models, module_models ] models = {} for module in model_modules: models.update(module.persisted_models()) mappers.map(get_engine(), models) def _create_facade(options): global _LOCK, _FACADE # TODO(mvandijk): Refactor this once oslo.db spec is implemented: # https://specs.openstack.org/openstack/oslo-specs/specs/kilo/ # make-enginefacade-a-facade.html if _FACADE is None: with _LOCK: if _FACADE is None: conf = CONF.database # pop the deprecated config option 'query_log' if conf.query_log: if conf.connection_debug < 50: conf['connection_debug'] = 50 LOG.warning(_('Configuration option "query_log" has been ' 'depracated. Use "connection_debug" ' 'instead. Setting connection_debug = ' '%(debug_level)s instead.') % conf.get('connection_debug')) # TODO(mvandijk): once query_log is removed, # use enginefacade.from_config() instead database_opts = dict(CONF.database) database_opts.pop('query_log') _FACADE = session.EngineFacade( options['database']['connection'], **database_opts ) return _FACADE def _check_facade(): if _FACADE is None: msg = _("***The Database has not been setup!!!***") LOG.exception(msg) raise RuntimeError(msg) def get_facade(): _check_facade() return _FACADE def get_engine(use_slave=False): _check_facade() return _FACADE.get_engine(use_slave=use_slave) def get_session(**kwargs): return get_facade().get_session(**kwargs) def raw_query(model, **kwargs): return get_session(**kwargs).query(model) def clean_db(): engine = get_engine() meta = MetaData() meta.bind = engine meta.reflect() with contextlib.closing(engine.connect()) as con: trans = con.begin() for table in reversed(meta.sorted_tables): if table.name != "migrate_version": con.execute(table.delete()) trans.commit() def drop_db(options): if options: _create_facade(options) engine = get_engine() meta = MetaData() meta.bind = engine meta.reflect() meta.drop_all() trove-5.0.0/trove/db/sqlalchemy/mappers.py0000664000567000056710000000704212701410316021721 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import orm from sqlalchemy.orm import exc as orm_exc from sqlalchemy import Table def map(engine, models): meta = MetaData() meta.bind = engine if mapping_exists(models['instance']): return orm.mapper(models['instance'], Table('instances', meta, autoload=True)) orm.mapper(models['root_enabled_history'], Table('root_enabled_history', meta, autoload=True)) orm.mapper(models['datastore'], Table('datastores', meta, autoload=True)) orm.mapper(models['datastore_version'], Table('datastore_versions', meta, autoload=True)) orm.mapper(models['datastore_version_metadata'], Table('datastore_version_metadata', meta, autoload=True)) orm.mapper(models['capabilities'], Table('capabilities', meta, autoload=True)) orm.mapper(models['capability_overrides'], Table('capability_overrides', meta, autoload=True)) orm.mapper(models['service_statuses'], Table('service_statuses', meta, autoload=True)) orm.mapper(models['dns_records'], Table('dns_records', meta, autoload=True)) orm.mapper(models['agent_heartbeats'], Table('agent_heartbeats', meta, autoload=True)) orm.mapper(models['quotas'], Table('quotas', meta, autoload=True)) orm.mapper(models['quota_usages'], Table('quota_usages', meta, autoload=True)) orm.mapper(models['reservations'], Table('reservations', meta, autoload=True)) orm.mapper(models['backups'], Table('backups', meta, autoload=True)) orm.mapper(models['security_group'], Table('security_groups', meta, autoload=True)) orm.mapper(models['security_group_rule'], Table('security_group_rules', meta, autoload=True)) orm.mapper(models['security_group_instance_association'], Table('security_group_instance_associations', meta, autoload=True)) orm.mapper(models['configurations'], Table('configurations', meta, autoload=True)) orm.mapper(models['configuration_parameters'], Table('configuration_parameters', meta, autoload=True)) orm.mapper(models['conductor_lastseen'], Table('conductor_lastseen', meta, autoload=True)) orm.mapper(models['clusters'], Table('clusters', meta, autoload=True)) orm.mapper(models['datastore_configuration_parameters'], Table('datastore_configuration_parameters', meta, autoload=True)) orm.mapper(models['modules'], Table('modules', meta, autoload=True)) orm.mapper(models['instance_modules'], Table('instance_modules', meta, autoload=True)) def mapping_exists(model): try: orm.class_mapper(model) return True except orm_exc.UnmappedClassError: return False trove-5.0.0/trove/db/sqlalchemy/migration.py0000664000567000056710000001070112701410316022237 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from migrate.versioning import api as versioning_api # See LP bug #719834. sqlalchemy-migrate changed location of # exceptions.py after 0.6.0. try: from migrate.versioning import exceptions as versioning_exceptions except ImportError: from migrate import exceptions as versioning_exceptions from oslo_log import log as logging from trove.common import exception logger = logging.getLogger('trove.db.sqlalchemy.migration') def db_version(options, repo_path=None): """Return the database's current migration number. :param options: options dict :retval version number """ repo_path = get_migrate_repo_path(repo_path) sql_connection = options['database']['connection'] try: return versioning_api.db_version(sql_connection, repo_path) except versioning_exceptions.DatabaseNotControlledError: msg = ("database '%(sql_connection)s' is not under migration control" % {'sql_connection': sql_connection}) raise exception.DatabaseMigrationError(msg) def upgrade(options, version=None, repo_path=None): """Upgrade the database's current migration level. :param options: options dict :param version: version to upgrade (defaults to latest) :retval version number """ db_version(options, repo_path) # Ensure db is under migration control repo_path = get_migrate_repo_path(repo_path) sql_connection = options['database']['connection'] version_str = version or 'latest' logger.info("Upgrading %(sql_connection)s to version %(version_str)s" % {'sql_connection': sql_connection, 'version_str': version_str}) return versioning_api.upgrade(sql_connection, repo_path, version) def downgrade(options, version, repo_path=None): """Downgrade the database's current migration level. :param options: options dict :param version: version to downgrade to :retval version number """ db_version(options, repo_path) # Ensure db is under migration control repo_path = get_migrate_repo_path(repo_path) sql_connection = options['database']['connection'] logger.info("Downgrading %(sql_connection)s to version %(version)s" % {'sql_connection': sql_connection, 'version': version}) return versioning_api.downgrade(sql_connection, repo_path, version) def version_control(options, repo_path=None): """Place a database under migration control. :param options: options dict """ sql_connection = options['database']['connection'] try: _version_control(options) except versioning_exceptions.DatabaseAlreadyControlledError: msg = ("database '%(sql_connection)s' is already under migration " "control" % {'sql_connection': sql_connection}) raise exception.DatabaseMigrationError(msg) def _version_control(options, repo_path): """Place a database under migration control. :param options: options dict """ repo_path = get_migrate_repo_path(repo_path) sql_connection = options['database']['connection'] return versioning_api.version_control(sql_connection, repo_path) def db_sync(options, version=None, repo_path=None): """Place a database under migration control and perform an upgrade. :param options: options dict :param repo_path: used for plugin db migrations, defaults to main repo :retval version number """ try: _version_control(options, repo_path) except versioning_exceptions.DatabaseAlreadyControlledError: pass upgrade(options, version=version, repo_path=repo_path) def get_migrate_repo_path(repo_path=None): """Get the path for the migrate repository.""" default_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') repo_path = repo_path or default_path assert os.path.exists(repo_path) return repo_path trove-5.0.0/trove/db/sqlalchemy/api.py0000664000567000056710000000674212701410316021031 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy.exc from trove.common import exception from trove.db.sqlalchemy import migration from trove.db.sqlalchemy import session def list(query_func, *args, **kwargs): return query_func(*args, **kwargs).all() def count(query, *args, **kwargs): return query(*args, **kwargs).count() def first(query, *args, **kwargs): return query(*args, **kwargs).first() def join(query, model, *args): return query(model).join(*args) def find_all(model, **conditions): return _query_by(model, **conditions) def find_all_by_limit(query_func, model, conditions, limit, marker=None, marker_column=None): return _limits(query_func, model, conditions, limit, marker, marker_column).all() def find_by(model, **kwargs): return _query_by(model, **kwargs).first() def save(model): try: db_session = session.get_session() model = db_session.merge(model) db_session.flush() return model except sqlalchemy.exc.IntegrityError as error: raise exception.DBConstraintError(model_name=model.__class__.__name__, error=str(error.orig)) def delete(model): db_session = session.get_session() model = db_session.merge(model) db_session.delete(model) db_session.flush() def delete_all(query_func, model, **conditions): query_func(model, **conditions).delete() def update(model, **values): for k, v in values.iteritems(): model[k] = v def update_all(query_func, model, conditions, values): query_func(model, **conditions).update(values) def configure_db(options, *plugins): session.configure_db(options) configure_db_for_plugins(options, *plugins) def configure_db_for_plugins(options, *plugins): for plugin in plugins: session.configure_db(options, models_mapper=plugin.mapper) def drop_db(options): session.drop_db(options) def clean_db(): session.clean_db() def db_sync(options, version=None, repo_path=None): migration.db_sync(options, version, repo_path) def db_upgrade(options, version=None, repo_path=None): migration.upgrade(options, version, repo_path) def db_downgrade(options, version, repo_path=None): migration.downgrade(options, version, repo_path) def db_reset(options, *plugins): drop_db(options) db_sync(options) configure_db(options) def _base_query(cls): return session.get_session().query(cls) def _query_by(cls, **conditions): query = _base_query(cls) if conditions: query = query.filter_by(**conditions) return query def _limits(query_func, model, conditions, limit, marker, marker_column=None): query = query_func(model, **conditions) marker_column = marker_column or model.id if marker: query = query.filter(marker_column > marker) return query.order_by(marker_column).limit(limit) trove-5.0.0/trove/db/models.py0000664000567000056710000001120312701410316017365 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common import models from trove.common import pagination from trove.common import utils from trove.db import db_query from trove.db import get_db_api LOG = logging.getLogger(__name__) class DatabaseModelBase(models.ModelBase): _auto_generated_attrs = ['id'] @classmethod def create(cls, **values): init_vals = { 'id': utils.generate_uuid(), 'created': utils.utcnow(), } if hasattr(cls, 'deleted'): init_vals['deleted'] = False init_vals.update(values) instance = cls(**init_vals) if not instance.is_valid(): raise exception.InvalidModelError(errors=instance.errors) return instance.save() @property def db_api(self): return get_db_api() @property def preserve_on_delete(self): return hasattr(self, 'deleted') and hasattr(self, 'deleted_at') @classmethod def query(cls): return get_db_api()._base_query(cls) def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated'] = utils.utcnow() LOG.debug("Saving %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return self.db_api.save(self) def delete(self): self['updated'] = utils.utcnow() LOG.debug("Deleting %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) if self.preserve_on_delete: self['deleted_at'] = utils.utcnow() self['deleted'] = True return self.db_api.save(self) else: return self.db_api.delete(self) def update(self, **values): for key in values: if hasattr(self, key): setattr(self, key, values[key]) self['updated'] = utils.utcnow() return self.db_api.save(self) def __init__(self, **kwargs): self.merge_attributes(kwargs) if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) def merge_attributes(self, values): """dict.update() behaviour.""" for k, v in values.iteritems(): self[k] = v @classmethod def find_by(cls, context=None, **conditions): model = cls.get_by(**conditions) if model is None: raise exception.ModelNotFoundError(_("%(s_name)s Not Found") % {"s_name": cls.__name__}) if ((context and not context.is_admin and hasattr(model, 'tenant_id') and model.tenant_id != context.tenant)): msg = _("Tenant %(s_tenant)s tried to access " "%(s_name)s, owned by %(s_owner)s.") LOG.error(msg % ( {"s_tenant": context.tenant, "s_name": cls.__name__, "s_owner": model.tenant_id})) raise exception.ModelNotFoundError( _("Tenant %(s_tenant)s cannot access %(s_name)s") % ( {"s_tenant": context.tenant, "s_name": cls.__name__})) return model @classmethod def get_by(cls, **kwargs): return get_db_api().find_by(cls, **cls._process_conditions(kwargs)) @classmethod def find_all(cls, **kwargs): return db_query.find_all(cls, **cls._process_conditions(kwargs)) @classmethod def _process_conditions(cls, raw_conditions): """Override in inheritors to format/modify any conditions.""" return raw_conditions @classmethod def find_by_pagination(cls, collection_type, collection_query, paginated_url, **kwargs): elements, next_marker = collection_query.paginated_collection(**kwargs) return pagination.PaginatedDataView(collection_type, elements, paginated_url, next_marker) trove-5.0.0/trove/datastore/0000775000567000056710000000000012701410521017132 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/datastore/__init__.py0000664000567000056710000000000012701410316021233 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/datastore/service.py0000664000567000056710000000654512701410316021160 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common import wsgi from trove.datastore import models, views from trove.flavor import views as flavor_views class DatastoreController(wsgi.Controller): def show(self, req, tenant_id, id): datastore = models.Datastore.load(id) datastore_versions = (models.DatastoreVersions.load(datastore.id)) return wsgi.Result(views. DatastoreView(datastore, datastore_versions, req).data(), 200) def index(self, req, tenant_id): context = req.environ[wsgi.CONTEXT_KEY] only_active = True if context.is_admin: only_active = False datastores = models.Datastores.load(only_active) datastores_versions = models.DatastoreVersions.load_all(only_active) return wsgi.Result(views. DatastoresView(datastores, datastores_versions, req).data(), 200) def version_show(self, req, tenant_id, datastore, id): datastore = models.Datastore.load(datastore) datastore_version = models.DatastoreVersion.load(datastore, id) return wsgi.Result(views.DatastoreVersionView(datastore_version, req).data(), 200) def version_show_by_uuid(self, req, tenant_id, uuid): datastore_version = models.DatastoreVersion.load_by_uuid(uuid) return wsgi.Result(views.DatastoreVersionView(datastore_version, req).data(), 200) def version_index(self, req, tenant_id, datastore): context = req.environ[wsgi.CONTEXT_KEY] only_active = True if context.is_admin: only_active = False datastore_versions = models.DatastoreVersions.load(datastore, only_active) return wsgi.Result(views. DatastoreVersionsView(datastore_versions, req).data(), 200) def list_associated_flavors(self, req, tenant_id, datastore, version_id): """ All nova flavors are returned for a datastore-version unless one or more entries are found in datastore_version_metadata, in which case only those are returned. """ context = req.environ[wsgi.CONTEXT_KEY] flavors = (models.DatastoreVersionMetadata. list_datastore_version_flavor_associations( context, datastore, version_id)) return wsgi.Result(flavor_views.FlavorsView(flavors, req).data(), 200) trove-5.0.0/trove/datastore/views.py0000664000567000056710000001046212701410316020646 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common.views import create_links from trove.common import wsgi class DatastoreView(object): def __init__(self, datastore, datastore_versions, req=None): self.datastore = datastore self.datastore_versions = datastore_versions self.req = req def data(self): datastore_dict = { "id": self.datastore.id, "name": self.datastore.name, "links": self._build_links(), } datastore_dict.update(DatastoreVersionsView(self.datastore_versions, self.req).data(False)) default_version = self.datastore.default_version_id if default_version: datastore_dict["default_version"] = default_version return {"datastore": datastore_dict} def _build_links(self): return create_links("datastores", self.req, self.datastore.id) class DatastoresView(object): def __init__(self, datastores, datastores_versions, req=None): self.datastores = datastores self.datastores_versions = datastores_versions self.req = req def data(self): data = [] for datastore in self.datastores: datastores_versions = [ datastore_version for datastore_version in self.datastores_versions if datastore_version.datastore_id == datastore.id] data.append(self.data_for_datastore(datastore, datastores_versions)) return {'datastores': data} def data_for_datastore(self, datastore, datastore_versions): view = DatastoreView(datastore, datastore_versions, req=self.req) return view.data()['datastore'] class DatastoreVersionView(object): def __init__(self, datastore_version, req=None): self.datastore_version = datastore_version self.req = req self.context = req.environ[wsgi.CONTEXT_KEY] def data(self, include_datastore_id=True): datastore_version_dict = { "id": self.datastore_version.id, "name": self.datastore_version.name, "links": self._build_links(), } if include_datastore_id: datastore_version_dict["datastore"] = (self.datastore_version. datastore_id) if self.context.is_admin: datastore_version_dict['active'] = self.datastore_version.active datastore_version_dict['packages'] = (self.datastore_version. packages) datastore_version_dict['image'] = self.datastore_version.image_id return {"version": datastore_version_dict} def _build_links(self): return create_links("datastores/versions", self.req, self.datastore_version.id) class DatastoreVersionsView(object): def __init__(self, datastore_versions, req=None): self.datastore_versions = datastore_versions self.req = req def data(self, include_datastore_id=True): data = [] for datastore_version in self.datastore_versions: data.append(self. data_for_datastore_version(datastore_version, include_datastore_id)) return {'versions': data} def data_for_datastore_version(self, datastore_version, include_datastore_id): view = DatastoreVersionView(datastore_version, req=self.req) return view.data(include_datastore_id)['version'] trove-5.0.0/trove/datastore/models.py0000664000567000056710000005377312701410316021010 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.remote import create_nova_client from trove.common import utils from trove.db import get_db_api from trove.db import models as dbmodels from trove.flavor.models import Flavor as flavor_model LOG = logging.getLogger(__name__) CONF = cfg.CONF db_api = get_db_api() def persisted_models(): return { 'datastore': DBDatastore, 'capabilities': DBCapabilities, 'datastore_version': DBDatastoreVersion, 'capability_overrides': DBCapabilityOverrides, 'datastore_version_metadata': DBDatastoreVersionMetadata } class DBDatastore(dbmodels.DatabaseModelBase): _data_fields = ['id', 'name', 'default_version_id'] class DBCapabilities(dbmodels.DatabaseModelBase): _data_fields = ['id', 'name', 'description', 'enabled'] class DBCapabilityOverrides(dbmodels.DatabaseModelBase): _data_fields = ['id', 'capability_id', 'datastore_version_id', 'enabled'] class DBDatastoreVersion(dbmodels.DatabaseModelBase): _data_fields = ['id', 'datastore_id', 'name', 'manager', 'image_id', 'packages', 'active'] class DBDatastoreVersionMetadata(dbmodels.DatabaseModelBase): _data_fields = ['id', 'datastore_version_id', 'key', 'value', 'created', 'deleted', 'deleted_at', 'updated_at'] preserve_on_delete = True class Capabilities(object): def __init__(self, datastore_version_id=None): self.capabilities = [] self.datastore_version_id = datastore_version_id def __contains__(self, item): return item in [capability.name for capability in self.capabilities] def __len__(self): return len(self.capabilities) def __iter__(self): for item in self.capabilities: yield item def __repr__(self): return '<%s: %s>' % (type(self), self.capabilities) def add(self, capability, enabled): """ Add a capability override to a datastore version. """ if self.datastore_version_id is not None: DBCapabilityOverrides.create( capability_id=capability.id, datastore_version_id=self.datastore_version_id, enabled=enabled) self._load() def _load(self): """ Bulk load and override default capabilities with configured datastore version specific settings. """ capability_defaults = [Capability(c) for c in DBCapabilities.find_all()] capability_overrides = [] if self.datastore_version_id is not None: # This should always happen but if there is any future case where # we don't have a datastore version id number it won't stop # defaults from rendering. capability_overrides = [ CapabilityOverride(ce) for ce in DBCapabilityOverrides.find_all( datastore_version_id=self.datastore_version_id) ] def override(cap): # This logic is necessary to apply datastore version specific # capability overrides when they are present in the database. for capability_override in capability_overrides: if cap.id == capability_override.capability_id: # we have a mapped entity that indicates this datastore # version has an override so we honor that. return capability_override # There were no overrides for this capability so we just hand it # right back. return cap self.capabilities = map(override, capability_defaults) LOG.debug('Capabilities for datastore %(ds_id)s: %(capabilities)s' % {'ds_id': self.datastore_version_id, 'capabilities': self.capabilities}) @classmethod def load(cls, datastore_version_id=None): """ Generates a Capabilities object by looking up all capabilities from defaults and overrides and provides the one structure that should be used as the interface to controlling capabilities per datastore. :returns Capabilities: """ self = cls(datastore_version_id) self._load() return self class BaseCapability(object): def __init__(self, db_info): self.db_info = db_info def __repr__(self): return ('<%(my_class)s: name: %(name)s, enabled: %(enabled)s>' % {'my_class': type(self), 'name': self.name, 'enabled': self.enabled}) @property def id(self): """ The capability's id :returns str: """ return self.db_info.id @property def enabled(self): """ Is the capability/feature enabled? :returns bool: """ return self.db_info.enabled def enable(self): """ Enable the capability. """ self.db_info.enabled = True self.db_info.save() def disable(self): """ Disable the capability """ self.db_info.enabled = False self.db_info.save() def delete(self): """ Delete the capability from the database. """ self.db_info.delete() class CapabilityOverride(BaseCapability): """ A capability override is simply an setting that applies to a specific datastore version that overrides the default setting in the base capability's entry for Trove. """ def __init__(self, db_info): super(CapabilityOverride, self).__init__(db_info) # This *may* be better solved with a join in the SQLAlchemy model but # I was unable to get our query object to work properly for this. parent_capability = Capability.load(db_info.capability_id) if parent_capability: self.parent_name = parent_capability.name self.parent_description = parent_capability.description else: raise exception.CapabilityNotFound( _("Somehow we got a datastore version capability without a " "parent, that shouldn't happen. %s") % db_info.capability_id) @property def name(self): """ The name of the capability. :returns str: """ return self.parent_name @property def description(self): """ The description of the capability. :returns str: """ return self.parent_description @property def capability_id(self): """ Because capability overrides is an association table there are times where having the capability id is necessary. :returns str: """ return self.db_info.capability_id @classmethod def load(cls, capability_id): """ Generates a CapabilityOverride object from the capability_override id. :returns CapabilityOverride: """ try: return cls(DBCapabilityOverrides.find_by( capability_id=capability_id)) except exception.ModelNotFoundError: raise exception.CapabilityNotFound( _("Capability Override not found for " "capability %s") % capability_id) @classmethod def create(cls, capability, datastore_version_id, enabled): """ Create a new CapabilityOverride. :param capability: The capability to be overridden for this DS Version :param datastore_version_id: The datastore version to apply the override to. :param enabled: Set enabled to True or False :returns CapabilityOverride: """ return CapabilityOverride( DBCapabilityOverrides.create( capability_id=capability.id, datastore_version_id=datastore_version_id, enabled=enabled) ) class Capability(BaseCapability): @property def name(self): """ The Capability name :returns str: """ return self.db_info.name @property def description(self): """ The Capability description :returns str: """ return self.db_info.description @classmethod def load(cls, capability_id_or_name): """ Generates a Capability object by looking up the capability first by ID then by name. :returns Capability: """ try: return cls(DBCapabilities.find_by(id=capability_id_or_name)) except exception.ModelNotFoundError: try: return cls(DBCapabilities.find_by(name=capability_id_or_name)) except exception.ModelNotFoundError: raise exception.CapabilityNotFound( capability=capability_id_or_name) @classmethod def create(cls, name, description, enabled=False): """ Creates a new capability. :returns Capability: """ return Capability(DBCapabilities.create( name=name, description=description, enabled=enabled)) class Datastore(object): def __init__(self, db_info): self.db_info = db_info @classmethod def load(cls, id_or_name): try: return cls(DBDatastore.find_by(id=id_or_name)) except exception.ModelNotFoundError: try: return cls(DBDatastore.find_by(name=id_or_name)) except exception.ModelNotFoundError: raise exception.DatastoreNotFound(datastore=id_or_name) @property def id(self): return self.db_info.id @property def name(self): return self.db_info.name @property def default_version_id(self): return self.db_info.default_version_id def delete(self): self.db_info.delete() class Datastores(object): def __init__(self, db_info): self.db_info = db_info @classmethod def load(cls, only_active=True): datastores = DBDatastore.find_all() if only_active: datastores = datastores.join(DBDatastoreVersion).filter( DBDatastoreVersion.active == 1) return cls(datastores) def __iter__(self): for item in self.db_info: yield item class DatastoreVersion(object): def __init__(self, db_info): self._capabilities = None self.db_info = db_info self._datastore_name = None @classmethod def load(cls, datastore, id_or_name): try: return cls(DBDatastoreVersion.find_by(datastore_id=datastore.id, id=id_or_name)) except exception.ModelNotFoundError: versions = DBDatastoreVersion.find_all(datastore_id=datastore.id, name=id_or_name) if versions.count() == 0: raise exception.DatastoreVersionNotFound(version=id_or_name) if versions.count() > 1: raise exception.NoUniqueMatch(name=id_or_name) return cls(versions.first()) @classmethod def load_by_uuid(cls, uuid): try: return cls(DBDatastoreVersion.find_by(id=uuid)) except exception.ModelNotFoundError: raise exception.DatastoreVersionNotFound(version=uuid) def delete(self): self.db_info.delete() @property def id(self): return self.db_info.id @property def datastore_id(self): return self.db_info.datastore_id @property def datastore_name(self): if self._datastore_name is None: self._datastore_name = Datastore.load(self.datastore_id).name return self._datastore_name # TODO(tim.simpson): This would be less confusing if it was called # "version" and datastore_name was called "name". @property def name(self): return self.db_info.name @property def image_id(self): return self.db_info.image_id @property def packages(self): return self.db_info.packages @property def active(self): return (True if self.db_info.active else False) @property def manager(self): return self.db_info.manager @property def default(self): datastore = Datastore.load(self.datastore_id) return (datastore.default_version_id == self.db_info.id) @property def capabilities(self): if self._capabilities is None: self._capabilities = Capabilities.load(self.db_info.id) return self._capabilities class DatastoreVersions(object): def __init__(self, db_info): self.db_info = db_info @classmethod def load(cls, id_or_name, only_active=True): datastore = Datastore.load(id_or_name) if only_active: versions = DBDatastoreVersion.find_all(datastore_id=datastore.id, active=True) else: versions = DBDatastoreVersion.find_all(datastore_id=datastore.id) return cls(versions) @classmethod def load_all(cls, only_active=True): if only_active: return cls(DBDatastoreVersion.find_all(active=True)) return cls(DBDatastoreVersion.find_all()) def __iter__(self): for item in self.db_info: yield item def get_datastore_version(type=None, version=None, return_inactive=False): datastore = type or CONF.default_datastore if not datastore: raise exception.DatastoreDefaultDatastoreNotFound() datastore = Datastore.load(datastore) version = version or datastore.default_version_id if not version: raise exception.DatastoreDefaultVersionNotFound( datastore=datastore.name) datastore_version = DatastoreVersion.load(datastore, version) if datastore_version.datastore_id != datastore.id: raise exception.DatastoreNoVersion(datastore=datastore.name, version=datastore_version.name) if not datastore_version.active and not return_inactive: raise exception.DatastoreVersionInactive( version=datastore_version.name) return (datastore, datastore_version) def update_datastore(name, default_version): db_api.configure_db(CONF) try: datastore = DBDatastore.find_by(name=name) except exception.ModelNotFoundError: # Create a new one datastore = DBDatastore() datastore.id = utils.generate_uuid() datastore.name = name if default_version: version = DatastoreVersion.load(datastore, default_version) if not version.active: raise exception.DatastoreVersionInactive(version=version.name) datastore.default_version_id = version.id else: datastore.default_version_id = None db_api.save(datastore) def update_datastore_version(datastore, name, manager, image_id, packages, active): db_api.configure_db(CONF) datastore = Datastore.load(datastore) try: version = DBDatastoreVersion.find_by(datastore_id=datastore.id, name=name) except exception.ModelNotFoundError: # Create a new one version = DBDatastoreVersion() version.id = utils.generate_uuid() version.name = name version.datastore_id = datastore.id version.manager = manager version.image_id = image_id version.packages = packages version.active = active db_api.save(version) class DatastoreVersionMetadata(object): @classmethod def _datastore_version_metadata_add(cls, datastore_name, datastore_version_name, datastore_version_id, key, value, exception_class): """Create an entry in the Datastore Version Metadata table.""" # Do we have a mapping in the db? # yes: and its deleted then modify the association # yes: and its not deleted then error on create # no: then just create the new association try: db_record = DBDatastoreVersionMetadata.find_by( datastore_version_id=datastore_version_id, key=key, value=value) if db_record.deleted == 1: db_record.deleted = 0 db_record.updated_at = utils.utcnow() db_record.save() return else: raise exception_class( datastore=datastore_name, datastore_version=datastore_version_name, flavor_id=value) except exception.NotFound: pass DBDatastoreVersionMetadata.create( datastore_version_id=datastore_version_id, key=key, value=value) @classmethod def _datastore_version_metadata_delete(cls, datastore_name, datastore_version_name, datastore_version_id, key, value, exception_class): try: db_record = DBDatastoreVersionMetadata.find_by( datastore_version_id=datastore_version_id, key=key, value=value) if db_record.deleted == 0: db_record.delete() return else: raise exception_class( datastore=datastore_name, datastore_version=datastore_version_name, flavor_id=value) except exception.ModelNotFoundError: raise exception_class(datastore=datastore_name, datastore_version=datastore_version_name, flavor_id=value) @classmethod def add_datastore_version_flavor_association(cls, datastore_name, datastore_version_name, flavor_ids): db_api.configure_db(CONF) db_ds_record = DBDatastore.find_by( name=datastore_name ) db_datastore_id = db_ds_record.id db_dsv_record = DBDatastoreVersion.find_by( datastore_id=db_datastore_id, name=datastore_version_name ) datastore_version_id = db_dsv_record.id for flavor_id in flavor_ids: cls._datastore_version_metadata_add( datastore_name, datastore_version_name, datastore_version_id, 'flavor', flavor_id, exception.DatastoreFlavorAssociationAlreadyExists) @classmethod def delete_datastore_version_flavor_association(cls, datastore_name, datastore_version_name, flavor_id): db_api.configure_db(CONF) db_ds_record = DBDatastore.find_by( name=datastore_name ) db_datastore_id = db_ds_record.id db_dsv_record = DBDatastoreVersion.find_by( datastore_id=db_datastore_id, name=datastore_version_name ) datastore_version_id = db_dsv_record.id cls._datastore_version_metadata_delete( datastore_name, datastore_version_name, datastore_version_id, 'flavor', flavor_id, exception.DatastoreFlavorAssociationNotFound) @classmethod def list_datastore_version_flavor_associations(cls, context, datastore_type, datastore_version_id): if datastore_type and datastore_version_id: """ All nova flavors are permitted for a datastore_version unless one or more entries are found in datastore_version_metadata, in which case only those are permitted. """ (datastore, datastore_version) = get_datastore_version( type=datastore_type, version=datastore_version_id) # If datastore_version_id and flavor key exists in the # metadata table return all the associated flavors for # that datastore version. nova_flavors = create_nova_client(context).flavors.list() bound_flavors = DBDatastoreVersionMetadata.find_all( datastore_version_id=datastore_version.id, key='flavor', deleted=False ) if (bound_flavors.count() != 0): bound_flavors = tuple(f.value for f in bound_flavors) # Generate a filtered list of nova flavors ds_nova_flavors = (f for f in nova_flavors if f.id in bound_flavors) associated_flavors = tuple(flavor_model(flavor=item) for item in ds_nova_flavors) else: # Return all nova flavors if no flavor metadata found # for datastore_version. associated_flavors = tuple(flavor_model(flavor=item) for item in nova_flavors) return associated_flavors else: msg = _("Specify both the datastore and datastore_version_id.") raise exception.BadRequest(msg) trove-5.0.0/trove/rpc.py0000664000567000056710000001171612701410316016312 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(esp): This code was taken from nova __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', 'TRANSPORT_ALIASES', ] from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from osprofiler import profiler from trove.common.context import TroveContext import trove.common.exception CONF = cfg.CONF TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ trove.common.exception.__name__, ] EXTRA_EXMODS = [] # TODO(esp): Remove or update these paths TRANSPORT_ALIASES = { 'trove.openstack.common.rpc.impl_kombu': 'rabbit', 'trove.openstack.common.rpc.impl_qpid': 'qpid', 'trove.openstack.common.rpc.impl_zmq': 'zmq', 'trove.rpc.impl_kombu': 'rabbit', 'trove.rpc.impl_qpid': 'qpid', 'trove.rpc.impl_zmq': 'zmq', } def init(conf): global TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_transport(conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) serializer = RequestContextSerializer(JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) def cleanup(): global TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): _context = context.to_dict() prof = profiler.get() if prof: trace_info = { "hmac_key": prof.hmac_key, "base_id": prof.get_base_id(), "parent_id": prof.get_id() } _context.update({"trace_info": trace_info}) return _context def deserialize_context(self, context): trace_info = context.pop("trace_info", None) if trace_info: profiler.init(**trace_info) return TroveContext.from_dict(context) def get_transport_url(url_str=None): return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES) def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None # Thread module is not monkeypatched if remote debugging is enabled. # Using eventlet executor without monkepatching thread module will # lead to unpredictable results. from trove.common import debug_utils debug_utils.setup() executor = "blocking" if debug_utils.enabled() else "eventlet" serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor=executor, serializer=serializer) def get_notifier(service=None, host=None, publisher_id=None): assert NOTIFIER is not None if not publisher_id: publisher_id = "%s.%s" % (service, host or CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id) trove-5.0.0/trove/flavor/0000775000567000056710000000000012701410521016435 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/flavor/__init__.py0000664000567000056710000000000012701410316020536 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/flavor/service.py0000664000567000056710000000331612701410316020454 0ustar jenkinsjenkins00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from trove.common import exception from trove.common import wsgi from trove.flavor import models from trove.flavor import views class FlavorController(wsgi.Controller): """Controller for flavor functionality.""" def show(self, req, tenant_id, id): """Return a single flavor.""" context = req.environ[wsgi.CONTEXT_KEY] self._validate_flavor_id(id) flavor = models.Flavor(context=context, flavor_id=id) # Pass in the request to build accurate links. return wsgi.Result(views.FlavorView(flavor, req).data(), 200) def index(self, req, tenant_id): """Return all flavors.""" context = req.environ[wsgi.CONTEXT_KEY] flavors = models.Flavors(context=context) return wsgi.Result(views.FlavorsView(flavors, req).data(), 200) def _validate_flavor_id(self, id): if isinstance(id, six.string_types): return try: if int(id) != float(id): raise exception.NotFound(uuid=id) except ValueError: raise exception.NotFound(uuid=id) trove-5.0.0/trove/flavor/views.py0000664000567000056710000000355712701410316020160 0ustar jenkinsjenkins00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg from trove.common.views import create_links CONF = cfg.CONF class FlavorView(object): def __init__(self, flavor, req=None): self.flavor = flavor self.req = req def data(self): # If the flavor id cannot be cast to an int, we simply return # no id and rely on str_id instead. try: f_id = int(self.flavor.id) except ValueError: f_id = None flavor = { 'id': f_id, 'links': self._build_links(), 'name': self.flavor.name, 'ram': self.flavor.ram, 'str_id': str(self.flavor.id), } if not CONF.trove_volume_support and CONF.device_path is not None: flavor['local_storage'] = self.flavor.ephemeral return {"flavor": flavor} def _build_links(self): return create_links("flavors", self.req, self.flavor.id) class FlavorsView(object): view = FlavorView def __init__(self, flavors, req=None): self.flavors = flavors self.req = req def data(self): data = [] for flavor in self.flavors: data.append(self.view(flavor, req=self.req).data()['flavor']) return {"flavors": data} trove-5.0.0/trove/flavor/models.py0000664000567000056710000000454012701410316020277 0ustar jenkinsjenkins00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes that form the core of instance flavor functionality.""" from novaclient import exceptions as nova_exceptions from trove.common import exception from trove.common.models import NovaRemoteModelBase from trove.common.remote import create_nova_client class Flavor(object): _data_fields = ['id', 'links', 'name', 'ram', 'vcpus', 'ephemeral'] def __init__(self, flavor=None, context=None, flavor_id=None): if flavor: self.flavor = flavor return if flavor_id and context: try: client = create_nova_client(context) self.flavor = client.flavors.get(flavor_id) except nova_exceptions.NotFound as e: raise exception.NotFound(uuid=flavor_id) except nova_exceptions.ClientException as e: raise exception.TroveError(str(e)) return msg = ("Flavor is not defined, and" " context and flavor_id were not specified.") raise exception.InvalidModelError(errors=msg) @property def id(self): return self.flavor.id @property def name(self): return self.flavor.name @property def ram(self): return self.flavor.ram @property def vcpus(self): return self.flavor.vcpus @property def links(self): return self.flavor.links @property def ephemeral(self): return self.flavor.ephemeral class Flavors(NovaRemoteModelBase): def __init__(self, context): nova_flavors = create_nova_client(context).flavors.list() self.flavors = [Flavor(flavor=item) for item in nova_flavors] def __iter__(self): for item in self.flavors: yield item trove-5.0.0/trove/guestagent/0000775000567000056710000000000012701410521017312 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/backup/0000775000567000056710000000000012701410521020557 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/backup/__init__.py0000664000567000056710000000331412701410316022673 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.guestagent.backup.backupagent import BackupAgent AGENT = BackupAgent() def backup(context, backup_info): """ Main entry point for starting a backup based on the given backup id. This will create a backup for this DB instance and will then store the backup in a configured repository (e.g. Swift) :param context: the context token which contains the users details :param backup_id: the id of the persisted backup object """ return AGENT.execute_backup(context, backup_info) def restore(context, backup_info, restore_location): """ Main entry point for restoring a backup based on the given backup id. This will transfer backup data to this instance an will carry out the appropriate restore procedure (eg. mysqldump) :param context: the context token which contains the users details :param backup_id: the id of the persisted backup object """ return AGENT.execute_restore(context, backup_info, restore_location) trove-5.0.0/trove/guestagent/backup/backupagent.py0000664000567000056710000001710712701410316023425 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.backup.state import BackupState from trove.common import cfg from trove.common.i18n import _ from trove.common.strategies.storage import get_storage_strategy from trove.conductor import api as conductor_api from trove.guestagent.common import timeutils from trove.guestagent.dbaas import get_filesystem_volume_stats from trove.guestagent.strategies.backup.base import BackupError from trove.guestagent.strategies.backup.base import UnknownBackupType from trove.guestagent.strategies.backup import get_backup_strategy from trove.guestagent.strategies.restore import get_restore_strategy LOG = logging.getLogger(__name__) CONF = cfg.CONF CONFIG_MANAGER = CONF.get('mysql' if not CONF.datastore_manager else CONF.datastore_manager) STRATEGY = CONFIG_MANAGER.backup_strategy BACKUP_NAMESPACE = CONFIG_MANAGER.backup_namespace RESTORE_NAMESPACE = CONFIG_MANAGER.restore_namespace RUNNER = get_backup_strategy(STRATEGY, BACKUP_NAMESPACE) EXTRA_OPTS = CONF.backup_runner_options.get(STRATEGY, '') # Try to get the incremental strategy or return the default 'backup_strategy' INCREMENTAL = CONFIG_MANAGER.backup_incremental_strategy.get( STRATEGY, STRATEGY) INCREMENTAL_RUNNER = get_backup_strategy(INCREMENTAL, BACKUP_NAMESPACE) class BackupAgent(object): def _get_restore_runner(self, backup_type): """Returns the RestoreRunner associated with this backup type.""" try: runner = get_restore_strategy(backup_type, RESTORE_NAMESPACE) except ImportError: raise UnknownBackupType("Unknown Backup type: %s in namespace %s" % (backup_type, RESTORE_NAMESPACE)) return runner def stream_backup_to_storage(self, context, backup_info, runner, storage, parent_metadata={}, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] conductor = conductor_api.API(context) # Store the size of the filesystem before the backup. mount_point = CONFIG_MANAGER.mount_point stats = get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: try: LOG.debug("Starting backup %s.", backup_id) success, note, checksum, location = storage.save( bkup.manifest, bkup) backup_state.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.debug("Backup %(backup_id)s completed status: " "%(success)s.", backup_state) LOG.debug("Backup %(backup_id)s file swift checksum: " "%(checksum)s.", backup_state) LOG.debug("Backup %(backup_id)s location: " "%(location)s.", backup_state) if not success: raise BackupError(note) meta = bkup.metadata() meta['datastore'] = backup_info['datastore'] meta['datastore_version'] = backup_info[ 'datastore_version'] storage.save_metadata(location, meta) backup_state.update({'state': BackupState.COMPLETED}) return meta except Exception: LOG.exception( _("Error saving backup: %(backup_id)s.") % backup_state) backup_state.update({'state': BackupState.FAILED}) raise finally: LOG.info(_("Completed backup %(backup_id)s.") % backup_state) conductor.update_backup(CONF.guest_id, sent=timeutils.float_utcnow(), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS, incremental_runner=INCREMENTAL_RUNNER): LOG.debug("Running backup %(id)s.", backup_info) storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = incremental_runner LOG.debug("Using incremental backup runner: %s.", runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) self.stream_backup_to_storage(context, backup_info, runner, storage, parent_metadata, extra_opts) def execute_restore(self, context, backup_info, restore_location): try: LOG.debug("Getting Restore Runner %(type)s.", backup_info) restore_runner = self._get_restore_runner(backup_info['type']) LOG.debug("Getting Storage Strategy.") storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) runner = restore_runner(storage, location=backup_info['location'], checksum=backup_info['checksum'], restore_location=restore_location) backup_info['restore_location'] = restore_location LOG.debug("Restoring instance from backup %(id)s to " "%(restore_location)s.", backup_info) content_size = runner.restore() LOG.debug("Restore from backup %(id)s completed successfully " "to %(restore_location)s.", backup_info) LOG.debug("Restore size: %s.", content_size) except Exception: LOG.exception(_("Error restoring backup %(id)s.") % backup_info) raise else: LOG.debug("Restored backup %(id)s." % backup_info) trove-5.0.0/trove/guestagent/guest_log.py0000664000567000056710000003575412701410316021674 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import enum import hashlib import os from requests.exceptions import ConnectionError from oslo_log import log as logging from swiftclient.client import ClientException from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.remote import create_swift_client from trove.common import stream_codecs from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode LOG = logging.getLogger(__name__) CONF = cfg.CONF class LogType(enum.Enum): """Represent the type of the log object.""" # System logs. These are always enabled. SYS = 1 # User logs. These can be enabled or disabled. USER = 2 class LogStatus(enum.Enum): """Represent the status of the log object.""" # The log is disabled and potentially no data is being written to # the corresponding log file Disabled = 1 # Logging is on, but no determination has been made about data availability Enabled = 2 # Logging is on, but no log data is available to publish Unavailable = 3 # Logging is on and data is available to be published Ready = 4 # Logging is on and all data has been published Published = 5 # Logging is on and some data has been published Partial = 6 # Log file has been rotated, so next publish will discard log first Rotated = 7 # Waiting for a datastore restart to begin logging Restart_Required = 8 # Now that restart has completed, regular status can be reported again # This is an internal status Restart_Completed = 9 class GuestLog(object): MF_FILE_SUFFIX = '_metafile' MF_LABEL_LOG_NAME = 'log_name' MF_LABEL_LOG_TYPE = 'log_type' MF_LABEL_LOG_FILE = 'log_file' MF_LABEL_LOG_SIZE = 'log_size' MF_LABEL_LOG_HEADER = 'log_header_digest' def __init__(self, log_context, log_name, log_type, log_user, log_file, log_exposed): self._context = log_context self._name = log_name self._type = log_type self._user = log_user self._file = log_file self._exposed = log_exposed self._size = None self._published_size = None self._header_digest = 'abc' self._published_header_digest = None self._status = None self._cached_context = None self._cached_swift_client = None self._enabled = log_type == LogType.SYS self._file_readable = False self._container_name = None self._codec = stream_codecs.JsonCodec() self._set_status(self._type == LogType.USER, LogStatus.Disabled, LogStatus.Enabled) # The directory should already exist - make sure we have access to it log_dir = os.path.dirname(self._file) operating_system.chmod( log_dir, FileMode.ADD_GRP_RX_OTH_RX, as_root=True) @property def context(self): return self._context @context.setter def context(self, context): self._context = context @property def type(self): return self._type @property def swift_client(self): if not self._cached_swift_client or ( self._cached_context != self.context): self._cached_swift_client = create_swift_client(self.context) self._cached_context = self.context return self._cached_swift_client @property def exposed(self): return self._exposed or self.context.is_admin @property def enabled(self): return self._enabled @enabled.setter def enabled(self, enabled): self._enabled = enabled @property def status(self): return self._status @status.setter def status(self, status): # Keep the status in Restart_Required until we're set # to Restart_Completed if (self.status != LogStatus.Restart_Required or (self.status == LogStatus.Restart_Required and status == LogStatus.Restart_Completed)): self._status = status LOG.debug("Log status for '%s' set to %s" % (self._name, status)) else: LOG.debug("Log status for '%s' *not* set to %s (currently %s)" % (self._name, status, self.status)) def get_container_name(self, force=False): if not self._container_name or force: container_name = CONF.guest_log_container_name try: self.swift_client.get_container(container_name, prefix='dummy') except ClientException as ex: if ex.http_status == 404: LOG.debug("Container '%s' not found; creating now" % container_name) self.swift_client.put_container( container_name, headers=self._get_headers()) else: LOG.exception(_("Could not retrieve container '%s'") % container_name) raise self._container_name = container_name return self._container_name def _set_status(self, use_first, first_status, second_status): if use_first: self.status = first_status else: self.status = second_status def show(self): if self.exposed: self._refresh_details() container_name = 'None' prefix = 'None' if self._published_size: container_name = self.get_container_name() prefix = self._object_prefix() pending = self._size - self._published_size if self.status == LogStatus.Rotated: pending = self._size return { 'name': self._name, 'type': self._type.name, 'status': self.status.name.replace('_', ' '), 'published': self._published_size, 'pending': pending, 'container': container_name, 'prefix': prefix, 'metafile': self._metafile_name() } else: raise exception.UnauthorizedRequest(_( "Not authorized to show log '%s'.") % self._name) def _refresh_details(self): if self._published_size is None: # Initializing, so get all the values try: meta_details = self._get_meta_details() self._published_size = int( meta_details[self.MF_LABEL_LOG_SIZE]) self._published_header_digest = ( meta_details[self.MF_LABEL_LOG_HEADER]) except ClientException as ex: if ex.http_status == 404: LOG.debug("No published metadata found for log '%s'" % self._name) self._published_size = 0 else: LOG.exception(_("Could not get meta details for log '%s'") % self._name) raise except ConnectionError as e: # A bad endpoint will cause a ConnectionError # This exception contains another exception that we want exc = e.args[0] raise exc self._update_details() LOG.debug("Log size for '%s' set to %d (published %d)" % ( self._name, self._size, self._published_size)) def _update_details(self): # Make sure we can read the file if not self._file_readable or not os.access(self._file, os.R_OK): if not os.access(self._file, os.R_OK): if operating_system.exists(self._file, as_root=True): operating_system.chmod( self._file, FileMode.ADD_ALL_R, as_root=True) self._file_readable = True if os.path.isfile(self._file): logstat = os.stat(self._file) self._size = logstat.st_size self._update_log_header_digest(self._file) if self._log_rotated(): self.status = LogStatus.Rotated # See if we have stuff to publish elif logstat.st_size > self._published_size: self._set_status(self._published_size, LogStatus.Partial, LogStatus.Ready) # We've published everything so far elif logstat.st_size == self._published_size: self._set_status(self._published_size, LogStatus.Published, LogStatus.Enabled) # We've already handled this case (log rotated) so what gives? else: raise ("Bug in _log_rotated ?") else: self._published_size = 0 self._size = 0 if not self._size or not self.enabled: user_status = LogStatus.Disabled if self.enabled: user_status = LogStatus.Enabled self._set_status(self._type == LogType.USER, user_status, LogStatus.Unavailable) def _log_rotated(self): """If the file is smaller than the last reported size or the first line hash is different, we can probably assume the file changed under our nose. """ if (self._published_size > 0 and (self._size < self._published_size or self._published_header_digest != self._header_digest)): return True def _update_log_header_digest(self, log_file): with open(log_file, 'r') as log: self._header_digest = hashlib.md5(log.readline()).hexdigest() def _get_headers(self): return {'X-Delete-After': CONF.guest_log_expiry} def publish_log(self): if self.exposed: if self._log_rotated(): LOG.debug("Log file rotation detected for '%s' - " "discarding old log" % self._name) self._delete_log_components() if os.path.isfile(self._file): self._publish_to_container(self._file) else: raise RuntimeError(_( "Cannot publish log file '%s' as it does not exist.") % self._file) return self.show() else: raise exception.UnauthorizedRequest(_( "Not authorized to publish log '%s'.") % self._name) def discard_log(self): if self.exposed: self._delete_log_components() return self.show() else: raise exception.UnauthorizedRequest(_( "Not authorized to discard log '%s'.") % self._name) def _delete_log_components(self): container_name = self.get_container_name(force=True) prefix = self._object_prefix() swift_files = [swift_file['name'] for swift_file in self.swift_client.get_container( container_name, prefix=prefix)[1]] swift_files.append(self._metafile_name()) for swift_file in swift_files: self.swift_client.delete_object(container_name, swift_file) self._set_status(self._type == LogType.USER, LogStatus.Disabled, LogStatus.Enabled) self._published_size = 0 def _publish_to_container(self, log_filename): log_component, log_lines = '', 0 chunk_size = CONF.guest_log_limit container_name = self.get_container_name(force=True) def _read_chunk(f): while True: current_chunk = f.read(chunk_size) if not current_chunk: break yield current_chunk def _write_log_component(): object_headers.update({'x-object-meta-lines': log_lines}) component_name = '%s%s' % (self._object_prefix(), self._object_name()) self.swift_client.put_object(container_name, component_name, log_component, headers=object_headers) self._published_size = ( self._published_size + len(log_component)) self._published_header_digest = self._header_digest self._refresh_details() self._put_meta_details() object_headers = self._get_headers() with open(log_filename, 'r') as log: LOG.debug("seeking to %s", self._published_size) log.seek(self._published_size) for chunk in _read_chunk(log): for log_line in chunk.splitlines(): if len(log_component) + len(log_line) > chunk_size: _write_log_component() log_component, log_lines = '', 0 log_component = log_component + log_line + '\n' log_lines += 1 if log_lines > 0: _write_log_component() self._put_meta_details() def _put_meta_details(self): metafile_name = self._metafile_name() metafile_details = { self.MF_LABEL_LOG_NAME: self._name, self.MF_LABEL_LOG_TYPE: self._type.name, self.MF_LABEL_LOG_FILE: self._file, self.MF_LABEL_LOG_SIZE: self._published_size, self.MF_LABEL_LOG_HEADER: self._header_digest, } container_name = self.get_container_name() self.swift_client.put_object(container_name, metafile_name, self._codec.serialize(metafile_details), headers=self._get_headers()) LOG.debug("_put_meta_details has published log size as %s", self._published_size) def _metafile_name(self): return self._object_prefix().rstrip('/') + '_metafile' def _object_prefix(self): return '%(instance_id)s/%(datastore)s-%(log)s/' % { 'instance_id': CONF.guest_id, 'datastore': CONF.datastore_manager, 'log': self._name} def _object_name(self): return 'log-%s' % str(datetime.utcnow()).replace(' ', 'T') def _get_meta_details(self): LOG.debug("Getting meta details for '%s'" % self._name) metafile_name = self._metafile_name() container_name = self.get_container_name() headers, metafile_details = self.swift_client.get_object( container_name, metafile_name) LOG.debug("Found meta details for '%s'" % self._name) return self._codec.deserialize(metafile_details) trove-5.0.0/trove/guestagent/pkg.py0000664000567000056710000003656412701410316020465 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manages packages on the Guest VM. """ import commands import os import re import subprocess from tempfile import NamedTemporaryFile from oslo_log import log as logging import pexpect import six from trove.common import exception from trove.common.exception import ProcessExecutionError from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system LOG = logging.getLogger(__name__) OK = 0 RUN_DPKG_FIRST = 1 REINSTALL_FIRST = 2 CONFLICT_REMOVED = 3 class PkgAdminLockError(exception.TroveError): pass class PkgPermissionError(exception.TroveError): pass class PkgPackageStateError(exception.TroveError): pass class PkgNotFoundError(exception.NotFound): pass class PkgTimeout(exception.TroveError): pass class PkgScriptletError(exception.TroveError): pass class PkgDownloadError(exception.TroveError): pass class PkgSignError(exception.TroveError): pass class PkgBrokenError(exception.TroveError): pass class PkgConfigureError(exception.TroveError): pass class BasePackagerMixin: def pexpect_kill_proc(self, child): child.delayafterclose = 1 child.delayafterterminate = 1 try: child.close(force=True) except pexpect.ExceptionPexpect: # Close fails to terminate a sudo process on some OSes. subprocess.call(['sudo', 'kill', str(child.pid)]) def pexpect_wait_and_close_proc(self, child): child.expect(pexpect.EOF) child.close() def pexpect_run(self, cmd, output_expects, time_out): child = pexpect.spawn(cmd, timeout=time_out) try: i = child.expect(output_expects) match = child.match self.pexpect_wait_and_close_proc(child) except pexpect.TIMEOUT: self.pexpect_kill_proc(child) raise PkgTimeout("Process timeout after %i seconds." % time_out) return (i, match) class RedhatPackagerMixin(BasePackagerMixin): def _rpm_remove_nodeps(self, package_name): """ Sometimes transaction errors happens, easy way is to remove conflicted package without dependencies and hope it will replaced by another package """ try: utils.execute("rpm", "-e", "--nodeps", package_name, run_as_root=True, root_helper="sudo") except ProcessExecutionError: LOG.exception(_("Error removing conflict %(package)s") % package_name) def _install(self, packages, time_out): """Attempts to install packages. Returns OK if the packages are installed or a result code if a recoverable-error occurred. Raises an exception if a non-recoverable error or timeout occurs. """ cmd = "sudo yum --color=never -y install %s" % " ".join(packages) output_expects = ['\[sudo\] password for .*:', 'No package (.*) available.', ('file .* from install of .* conflicts with file' ' from package (.*?)\r\n'), 'Error: (.*?) conflicts with .*?\r\n', 'Processing Conflict: .* conflicts (.*?)\r\n', '.*scriptlet failed*', 'HTTP Error', 'No more mirrors to try.', 'GPG key retrieval failed:', '.*already installed and latest version', 'Updated:', 'Installed:'] LOG.debug("Running package install command: %s" % cmd) i, match = self.pexpect_run(cmd, output_expects, time_out) if i == 0: raise PkgPermissionError("Invalid permissions.") elif i == 1: raise PkgNotFoundError("Could not find pkg %s" % match.group(1)) elif i == 2 or i == 3 or i == 4: self._rpm_remove_nodeps(match.group(1)) return CONFLICT_REMOVED elif i == 5: raise PkgScriptletError("Package scriptlet failed") elif i == 6 or i == 7: raise PkgDownloadError("Package download problem") elif i == 8: raise PkgSignError("GPG key retrieval failed") return OK def _remove(self, package_name, time_out): """Removes a package. Returns OK if the package is removed successfully or a result code if a recoverable-error occurs. Raises an exception if a non-recoverable error or timeout occurs. """ cmd = "sudo yum --color=never -y remove %s" % package_name LOG.debug("Running package remove command: %s" % cmd) output_expects = ['\[sudo\] password for .*:', 'No Packages marked for removal', 'Removed:'] i, match = self.pexpect_run(cmd, output_expects, time_out) if i == 0: raise PkgPermissionError("Invalid permissions.") elif i == 1: raise PkgNotFoundError("Could not find pkg %s" % package_name) return OK def pkg_install(self, packages, config_opts, time_out): result = self._install(packages, time_out) if result != OK: while result == CONFLICT_REMOVED: result = self._install(packages, time_out) if result != OK: raise PkgPackageStateError("Cannot install packages.") def pkg_is_installed(self, packages): packages = packages if isinstance(packages, list) else packages.split() cmd = "rpm -qa" p = commands.getstatusoutput(cmd) std_out = p[1] for pkg in packages: found = False for line in std_out.split("\n"): if line.find(pkg) != -1: found = True break if not found: return False return True def pkg_version(self, package_name): cmd_list = ["rpm", "-qa", "--qf", "'%{VERSION}-%{RELEASE}\n'", package_name] p = commands.getstatusoutput(' '.join(cmd_list)) # Need to capture the version string # check the command output std_out = p[1] for line in std_out.split("\n"): regex = re.compile("[0-9.]+-.*") matches = regex.match(line) if matches: line = matches.group() return line LOG.error(_("Unexpected output from rpm command. (%(output)s)") % {'output': std_out}) def pkg_remove(self, package_name, time_out): """Removes a package.""" if self.pkg_version(package_name) is None: return result = self._remove(package_name, time_out) if result != OK: raise PkgPackageStateError("Package %s is in a bad state." % package_name) class DebianPackagerMixin(BasePackagerMixin): def _fix(self, time_out): """Sometimes you have to run this command before a package will install. """ try: utils.execute("dpkg", "--configure", "-a", run_as_root=True, root_helper="sudo") except ProcessExecutionError: LOG.exception(_("Error fixing dpkg")) def _fix_package_selections(self, packages, config_opts): """ Sometimes you have to run this command before a package will install. This command sets package selections to configure package. """ selections = "" for package in packages: m = re.match('(.+)=(.+)', package) if m: package_name = m.group(1) else: package_name = package command = "sudo debconf-show %s" % package_name p = commands.getstatusoutput(command) std_out = p[1] for line in std_out.split("\n"): for selection, value in config_opts.items(): m = re.match(".* (.*/%s):.*" % selection, line) if m: selections += ("%s %s string '%s'\n" % (package_name, m.group(1), value)) if selections: with NamedTemporaryFile(delete=False) as f: fname = f.name f.write(selections) try: utils.execute("debconf-set-selections", fname, run_as_root=True, root_helper="sudo") utils.execute("dpkg", "--configure", "-a", run_as_root=True, root_helper="sudo") except ProcessExecutionError: raise PkgConfigureError("Error configuring package.") finally: os.remove(fname) def _install(self, packages, time_out): """Attempts to install packages. Returns OK if the packages are installed or a result code if a recoverable-error occurred. Raises an exception if a non-recoverable error or timeout occurs. """ cmd = "sudo -E DEBIAN_FRONTEND=noninteractive apt-get -y " \ "--force-yes --allow-unauthenticated -o " \ "DPkg::options::=--force-confmiss --reinstall " \ "install %s" % " ".join(packages) output_expects = ['.*password*', 'E: Unable to locate package (.*)', "Couldn't find package (.*)", "E: Version '.*' for '(.*)' was not found", ("dpkg was interrupted, you must manually run " "'sudo dpkg --configure -a'"), "Unable to lock the administration directory", ("E: Unable to correct problems, you have held " "broken packages."), "Setting up (.*)", "is already the newest version"] LOG.debug("Running package install command: %s" % cmd) i, match = self.pexpect_run(cmd, output_expects, time_out) if i == 0: raise PkgPermissionError("Invalid permissions.") elif i == 1 or i == 2 or i == 3: raise PkgNotFoundError("Could not find apt %s" % match.group(1)) elif i == 4: return RUN_DPKG_FIRST elif i == 5: raise PkgAdminLockError() elif i == 6: raise PkgBrokenError() return OK def _remove(self, package_name, time_out): """Removes a package. Returns OK if the package is removed successfully or a result code if a recoverable-error occurs. Raises an exception if a non-recoverable error or timeout occurs. """ cmd = "sudo -E apt-get -y --allow-unauthenticated remove %s" \ % package_name output_expects = ['.*password*', 'E: Unable to locate package %s' % package_name, 'Package is in a very bad inconsistent state', 'Sub-process /usr/bin/dpkg returned an error code', ("dpkg was interrupted, you must manually run " "'sudo dpkg --configure -a'"), "Unable to lock the administration directory", "Removing %s*" % package_name] LOG.debug("Running remove package command %s" % cmd) i, match = self.pexpect_run(cmd, output_expects, time_out) if i == 0: raise PkgPermissionError("Invalid permissions.") elif i == 1: raise PkgNotFoundError("Could not find pkg %s" % package_name) elif i == 2 or i == 3: return REINSTALL_FIRST elif i == 4: return RUN_DPKG_FIRST elif i == 5: raise PkgAdminLockError() return OK def pkg_install(self, packages, config_opts, time_out): """Installs packages.""" try: utils.execute("apt-get", "update", run_as_root=True, root_helper="sudo") except ProcessExecutionError: LOG.exception(_("Error updating the apt sources")) result = self._install(packages, time_out) if result != OK: if result == RUN_DPKG_FIRST: self._fix(time_out) result = self._install(packages, time_out) if result != OK: raise PkgPackageStateError("Packages is in a bad state.") # even after successful install, packages can stay unconfigured # config_opts - is dict with name/value for questions asked by # interactive configure script if config_opts: self._fix_package_selections(packages, config_opts) def pkg_version(self, package_name): p = commands.getstatusoutput("apt-cache policy %s" % package_name) std_out = p[1] for line in std_out.split("\n"): m = re.match("\s+Installed: (.*)", line) if m: version = m.group(1) if version == "(none)": version = None return version def pkg_is_installed(self, packages): packages = packages if isinstance(packages, list) else packages.split() for pkg in packages: m = re.match('(.+)=(.+)', pkg) if m: package_name = m.group(1) package_version = m.group(2) else: package_name = pkg package_version = None installed_version = self.pkg_version(package_name) if ((package_version and installed_version == package_version) or (installed_version and not package_version)): LOG.debug("Package %s already installed." % package_name) else: return False return True def pkg_remove(self, package_name, time_out): """Removes a package.""" if self.pkg_version(package_name) is None: return result = self._remove(package_name, time_out) if result != OK: if result == REINSTALL_FIRST: self._install(package_name, time_out) elif result == RUN_DPKG_FIRST: self._fix(time_out) result = self._remove(package_name, time_out) if result != OK: raise PkgPackageStateError("Package %s is in a bad state." % package_name) class BasePackage(type): def __new__(meta, name, bases, dct): if operating_system.get_os() == operating_system.REDHAT: bases += (RedhatPackagerMixin, ) else: # The default is debian bases += (DebianPackagerMixin,) return super(BasePackage, meta).__new__(meta, name, bases, dct) @six.add_metaclass(BasePackage) class Package(object): pass trove-5.0.0/trove/guestagent/__init__.py0000664000567000056710000000000012701410316021413 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/volume.py0000664000567000056710000002064612701410316021205 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from tempfile import NamedTemporaryFile from oslo_log import log as logging import pexpect from trove.common import cfg from trove.common.exception import GuestError from trove.common.exception import ProcessExecutionError from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system TMP_MOUNT_POINT = "/mnt/volume" LOG = logging.getLogger(__name__) CONF = cfg.CONF class VolumeDevice(object): def __init__(self, device_path): self.device_path = device_path def migrate_data(self, source_dir, target_subdir=None): """Synchronize the data from the source directory to the new volume; optionally to a new sub-directory on the new volume. """ self.mount(TMP_MOUNT_POINT, write_to_fstab=False) if not source_dir[-1] == '/': source_dir = "%s/" % source_dir target_dir = TMP_MOUNT_POINT if target_subdir: target_dir = target_dir + "/" + target_subdir utils.execute("sudo", "rsync", "--safe-links", "--perms", "--recursive", "--owner", "--group", "--xattrs", "--sparse", source_dir, target_dir) self.unmount(TMP_MOUNT_POINT) def _check_device_exists(self): """Check that the device path exists. Verify that the device path has actually been created and can report it's size, only then can it be available for formatting, retry num_tries to account for the time lag. """ try: num_tries = CONF.num_tries LOG.debug("Checking if %s exists." % self.device_path) utils.execute('sudo', 'blockdev', '--getsize64', self.device_path, attempts=num_tries) except ProcessExecutionError: LOG.exception(_("Error getting device status")) raise GuestError(_("InvalidDevicePath(path=%s)") % self.device_path) def _check_format(self): """Checks that an unmounted volume is formatted.""" cmd = "sudo dumpe2fs %s" % self.device_path LOG.debug("Checking whether %s is formated: %s." % (self.device_path, cmd)) child = pexpect.spawn(cmd) try: i = child.expect(['has_journal', 'Wrong magic number']) if i == 0: return volume_fstype = CONF.volume_fstype raise IOError( _('Device path at {0} did not seem to be {1}.').format( self.device_path, volume_fstype)) except pexpect.EOF: raise IOError(_("Volume was not formatted.")) child.expect(pexpect.EOF) def _format(self): """Calls mkfs to format the device at device_path.""" volume_fstype = CONF.volume_fstype format_options = CONF.format_options cmd = "sudo mkfs -t %s %s %s" % (volume_fstype, format_options, self.device_path) volume_format_timeout = CONF.volume_format_timeout LOG.debug("Formatting %s. Executing: %s." % (self.device_path, cmd)) child = pexpect.spawn(cmd, timeout=volume_format_timeout) # child.expect("(y,n)") # child.sendline('y') child.expect(pexpect.EOF) def format(self): """Formats the device at device_path and checks the filesystem.""" self._check_device_exists() self._format() self._check_format() def mount(self, mount_point, write_to_fstab=True): """Mounts, and writes to fstab.""" LOG.debug("Will mount %s at %s." % (self.device_path, mount_point)) mount_point = VolumeMountPoint(self.device_path, mount_point) mount_point.mount() if write_to_fstab: mount_point.write_to_fstab() def resize_fs(self, mount_point): """Resize the filesystem on the specified device.""" self._check_device_exists() try: # check if the device is mounted at mount_point before e2fsck if not os.path.ismount(mount_point): utils.execute("e2fsck", "-f", "-p", self.device_path, run_as_root=True, root_helper="sudo") utils.execute("resize2fs", self.device_path, run_as_root=True, root_helper="sudo") except ProcessExecutionError: LOG.exception(_("Error resizing file system.")) raise GuestError(_("Error resizing the filesystem: %s") % self.device_path) def unmount(self, mount_point): if os.path.exists(mount_point): cmd = "sudo umount %s" % mount_point child = pexpect.spawn(cmd) child.expect(pexpect.EOF) def unmount_device(self, device_path): # unmount if device is already mounted mount_points = self.mount_points(device_path) for mnt in mount_points: LOG.info(_("Device %(device)s is already mounted in " "%(mount_point)s. Unmounting now.") % {'device': device_path, 'mount_point': mnt}) self.unmount(mnt) def mount_points(self, device_path): """Returns a list of mount points on the specified device.""" try: cmd = "grep %s /etc/mtab | awk '{print $2}'" % device_path stdout, stderr = utils.execute(cmd, shell=True) return stdout.strip().split('\n') except ProcessExecutionError: LOG.exception(_("Error retrieving mount points")) raise GuestError(_("Could not obtain a list of mount points for " "device: %s") % device_path) def set_readahead_size(self, readahead_size, execute_function=utils.execute): """Set the readahead size of disk.""" self._check_device_exists() try: execute_function("sudo", "blockdev", "--setra", readahead_size, self.device_path) except ProcessExecutionError: LOG.exception(_("Error setting readhead size to %(size)s " "for device %(device)s.") % {'size': readahead_size, 'device': self.device_path}) raise GuestError(_("Error setting readhead size: %s.") % self.device_path) class VolumeMountPoint(object): def __init__(self, device_path, mount_point): self.device_path = device_path self.mount_point = mount_point self.volume_fstype = CONF.volume_fstype self.mount_options = CONF.mount_options def mount(self): if not os.path.exists(self.mount_point): operating_system.create_directory(self.mount_point, as_root=True) LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, " "volume_type:{2}, mount options:{3}".format( self.device_path, self.mount_point, self.volume_fstype, self.mount_options)) cmd = ("sudo mount -t %s -o %s %s %s" % (self.volume_fstype, self.mount_options, self.device_path, self.mount_point)) child = pexpect.spawn(cmd) child.expect(pexpect.EOF) def write_to_fstab(self): fstab_line = ("%s\t%s\t%s\t%s\t0\t0" % (self.device_path, self.mount_point, self.volume_fstype, self.mount_options)) LOG.debug("Writing new line to fstab:%s" % fstab_line) with open('/etc/fstab', "r") as fstab: fstab_content = fstab.read() with NamedTemporaryFile(delete=False) as tempfstab: tempfstab.write(fstab_content + fstab_line) utils.execute("sudo", "install", "-o", "root", "-g", "root", "-m", "644", tempfstab.name, "/etc/fstab") os.remove(tempfstab.name) trove-5.0.0/trove/guestagent/module/0000775000567000056710000000000012701410521020577 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/module/driver_manager.py0000664000567000056710000000701612701410316024144 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging import stevedore from trove.common import base_exception as exception from trove.common import cfg from trove.common.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF class ModuleDriverManager(object): MODULE_DRIVER_NAMESPACE = 'trove.guestagent.module.drivers' def __init__(self): LOG.info(_('Initializing module driver manager.')) self._drivers = {} self._module_types = [mt.lower() for mt in CONF.module_types] self._load_drivers() def _load_drivers(self): manager = stevedore.enabled.EnabledExtensionManager( namespace=self.MODULE_DRIVER_NAMESPACE, check_func=self._check_extension, invoke_on_load=True, invoke_kwds={}) try: manager.map(self.add_driver_extension) except stevedore.exception.NoMatches: LOG.info(_("No module drivers loaded")) def _check_extension(self, extension): """Checks for required methods in driver objects.""" driver = extension.obj supported = False try: LOG.info(_('Loading Module driver: %s'), driver.get_type()) if driver.get_type() != driver.get_type().lower(): raise AttributeError(_("Driver 'type' must be lower-case")) LOG.debug(' description: %s', driver.get_description()) LOG.debug(' updated : %s', driver.get_updated()) required_attrs = ['apply', 'remove'] for attr in required_attrs: if not hasattr(driver, attr): raise AttributeError( _("Driver '%(type)s' missing attribute: %(attr)s") % {'type': driver.get_type(), 'attr': attr}) if driver.get_type() in self._module_types: supported = True else: LOG.info(_("Driver '%s' not supported, skipping"), driver.get_type) except AttributeError as ex: LOG.exception(_("Exception loading module driver: %s"), unicode(ex)) return supported def add_driver_extension(self, extension): # Add a module driver from the extension. # If the stevedore manager is changed to one that doesn't # check the extension driver, then it should be done manually here # by calling self._check_extension(extension) driver = extension.obj driver_type = driver.get_type() LOG.info(_('Loaded module driver: %s'), driver_type) if driver_type in self._drivers: raise exception.Error(_("Found duplicate driver: %s") % driver_type) self._drivers[driver_type] = driver def get_driver(self, driver_type): found = None if driver_type in self._drivers: found = self._drivers[driver_type] return found trove-5.0.0/trove/guestagent/module/__init__.py0000664000567000056710000000000012701410316022700 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/module/drivers/0000775000567000056710000000000012701410521022255 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/module/drivers/__init__.py0000664000567000056710000000000012701410316024356 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/module/drivers/module_driver.py0000664000567000056710000000421112701410316025467 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import six from oslo_log import log as logging from trove.common import cfg LOG = logging.getLogger(__name__) CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class ModuleDriver(object): """Base class that defines the contract for module drivers. Note that you don't have to derive from this class to have a valid driver; it is purely a convenience. """ def get_type(self): """This is used when setting up a module in Trove, and is here for code clarity. It just returns the name of the driver. """ return self.get_name() def get_name(self): """Attempt to generate a usable name based on the class name. If overridden, must be in lower-case. """ return self.__class__.__name__.lower().replace( 'driver', '').replace(' ', '_') @abc.abstractmethod def get_description(self): """Description for the driver.""" pass @abc.abstractmethod def get_updated(self): """Date the driver was last updated.""" pass @abc.abstractmethod def apply(self, name, datastore, ds_version, data_file): """Apply the data to the guest instance. Return status and message as a tupple. """ return False, "Not a concrete driver" @abc.abstractmethod def remove(self, name, datastore, ds_version, data_file): """Remove the data from the guest instance. Return status and message as a tupple. """ return False, "Not a concrete driver" trove-5.0.0/trove/guestagent/module/drivers/ping_driver.py0000664000567000056710000000470212701410316025144 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from datetime import date from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common import stream_codecs from trove.guestagent.common import operating_system from trove.guestagent.module.drivers import module_driver LOG = logging.getLogger(__name__) CONF = cfg.CONF class PingDriver(module_driver.ModuleDriver): """Concrete module to show implementation and functionality. Responds like an actual module driver, but does nothing except return the value of the message key in the contents file. For example, if the file contains 'message=Hello' then the message returned by module-apply will be 'Hello.' """ def get_type(self): return 'ping' def get_description(self): return "Ping Guestagent Module Driver" def get_updated(self): return date(2016, 3, 4) def apply(self, name, datastore, ds_version, data_file): success = False message = "Message not found in contents file" try: data = operating_system.read_file( data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'message' == key.lower(): success = True message = value break except Exception: # assume we couldn't read the file, because there was some # issue with it (for example, it's a binary file). Just log # it and drive on. LOG.error(_("Could not extract contents from '%s' - possibly " "a binary file?") % name) return success, message def _is_binary(self, data_str): bool(data_str.translate(None, self.TEXT_CHARS)) def remove(self, name, datastore, ds_version, data_file): return True, "" trove-5.0.0/trove/guestagent/module/module_manager.py0000664000567000056710000002051112701410316024131 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import os from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import stream_codecs from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system LOG = logging.getLogger(__name__) CONF = cfg.CONF class ModuleManager(): """This is a Manager utility class (mixin) for managing module-related tasks. """ MODULE_APPLY_TO_ALL = 'all' MODULE_BASE_DIR = guestagent_utils.build_file_path('~', 'modules') MODULE_CONTENTS_FILENAME = 'contents.dat' MODULE_RESULT_FILENAME = 'result.json' @classmethod def get_current_timestamp(cls): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") @classmethod def apply_module(cls, driver, module_type, name, tenant, datastore, ds_version, contents, module_id, md5, auto_apply, visible): tenant = tenant or cls.MODULE_APPLY_TO_ALL datastore = datastore or cls.MODULE_APPLY_TO_ALL ds_version = ds_version or cls.MODULE_APPLY_TO_ALL module_dir = cls.build_module_dir(module_type, module_id) data_file = cls.write_module_contents(module_dir, contents, md5) applied = True message = None now = cls.get_current_timestamp() default_result = cls.build_default_result( module_type, name, tenant, datastore, ds_version, module_id, md5, auto_apply, visible, now) result = cls.read_module_result(module_dir, default_result) try: applied, message = driver.apply( name, datastore, ds_version, data_file) except Exception as ex: LOG.exception(_("Could not apply module '%s'") % name) applied = False message = ex.message finally: status = 'OK' if applied else 'ERROR' admin_only = (not visible or tenant == cls.MODULE_APPLY_TO_ALL or auto_apply) result['status'] = status result['message'] = message result['updated'] = now result['id'] = module_id result['md5'] = md5 result['tenant'] = tenant result['auto_apply'] = auto_apply result['visible'] = visible result['admin_only'] = admin_only cls.write_module_result(module_dir, result) return result @classmethod def build_module_dir(cls, module_type, module_id): sub_dir = os.path.join(module_type, module_id) module_dir = guestagent_utils.build_file_path( cls.MODULE_BASE_DIR, sub_dir) if not operating_system.exists(module_dir, is_directory=True): operating_system.create_directory(module_dir, force=True) return module_dir @classmethod def write_module_contents(cls, module_dir, contents, md5): contents_file = cls.build_contents_filename(module_dir) operating_system.write_file(contents_file, contents, codec=stream_codecs.Base64Codec(), encode=False) return contents_file @classmethod def build_contents_filename(cls, module_dir): contents_file = guestagent_utils.build_file_path( module_dir, cls.MODULE_CONTENTS_FILENAME) return contents_file @classmethod def build_default_result(cls, module_type, name, tenant, datastore, ds_version, module_id, md5, auto_apply, visible, now): admin_only = (not visible or tenant == cls.MODULE_APPLY_TO_ALL or auto_apply) result = { 'type': module_type, 'name': name, 'datastore': datastore, 'datastore_version': ds_version, 'tenant': tenant, 'id': module_id, 'md5': md5, 'status': None, 'message': None, 'created': now, 'updated': now, 'removed': None, 'auto_apply': auto_apply, 'visible': visible, 'admin_only': admin_only, 'contents': None, } return result @classmethod def read_module_result(cls, result_file, default=None): result_file = cls.get_result_filename(result_file) result = default try: result = operating_system.read_file( result_file, codec=stream_codecs.JsonCodec()) except Exception: if not result: LOG.exception(_("Could not find module result in %s") % result_file) raise return result @classmethod def get_result_filename(cls, file_or_dir): result_file = file_or_dir if operating_system.exists(file_or_dir, is_directory=True): result_file = guestagent_utils.build_file_path( file_or_dir, cls.MODULE_RESULT_FILENAME) return result_file @classmethod def write_module_result(cls, result_file, result): result_file = cls.get_result_filename(result_file) operating_system.write_file( result_file, result, codec=stream_codecs.JsonCodec()) @classmethod def read_module_results(cls, is_admin=False, include_contents=False): """Read all the module results on the guest and return a list of them. """ results = [] pattern = cls.MODULE_RESULT_FILENAME result_files = operating_system.list_files_in_directory( cls.MODULE_BASE_DIR, recursive=True, pattern=pattern) for result_file in result_files: result = cls.read_module_result(result_file) if (not result.get('removed') and (is_admin or result.get('visible'))): if include_contents: codec = stream_codecs.Base64Codec() if not is_admin and result.get('admin_only'): contents = ( "Must be admin to retrieve contents for module %s" % result.get('name', 'Unknown')) result['contents'] = codec.serialize(contents) else: contents_dir = os.path.dirname(result_file) contents_file = cls.build_contents_filename( contents_dir) result['contents'] = operating_system.read_file( contents_file, codec=codec, decode=False) results.append(result) return results @classmethod def remove_module(cls, driver, module_type, module_id, name, datastore, ds_version): datastore = datastore or cls.MODULE_APPLY_TO_ALL ds_version = ds_version or cls.MODULE_APPLY_TO_ALL module_dir = cls.build_module_dir(module_type, module_id) contents_file = cls.build_contents_filename(module_dir) if not operating_system.exists(cls.get_result_filename(module_dir)): raise exception.NotFound( _("Module '%s' has not been applied") % name) try: removed, message = driver.remove( name, datastore, ds_version, contents_file) cls.remove_module_result(module_dir) except Exception: LOG.exception(_("Could not remove module '%s'") % name) raise return removed, message @classmethod def remove_module_result(cls, result_file): now = cls.get_current_timestamp() result = cls.read_module_result(result_file, None) result['removed'] = now cls.write_module_result(result_file, result) trove-5.0.0/trove/guestagent/service.py0000664000567000056710000000225412701410316021331 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import routes from trove.common import wsgi class Controller(wsgi.Controller): """Base controller class.""" pass class API(wsgi.Router): """Defines the API routes.""" def __init__(self): mapper = routes.Mapper() super(API, self).__init__(mapper) self._instance_router(mapper) def _instance_router(self, mapper): resource = Controller().create_resource() path = "/guests" mapper.resource("guest", path, controller=resource) def app_factory(global_conf, **local_conf): return API() trove-5.0.0/trove/guestagent/models.py0000664000567000056710000000601112701410316021147 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from datetime import timedelta from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.db import get_db_api from trove.db import models as dbmodels LOG = logging.getLogger(__name__) CONF = cfg.CONF AGENT_HEARTBEAT = CONF.agent_heartbeat_time def persisted_models(): return {'agent_heartbeats': AgentHeartBeat} class AgentHeartBeat(dbmodels.DatabaseModelBase): """Defines the state of a Guest Agent.""" _data_fields = ['instance_id', 'updated_at', 'guest_agent_version', 'deleted', 'deleted_at'] _table_name = 'agent_heartbeats' def __init__(self, **kwargs): super(AgentHeartBeat, self).__init__(**kwargs) @classmethod def create(cls, **values): values['id'] = utils.generate_uuid() heartbeat = cls(**values).save() if not heartbeat.is_valid(): raise exception.InvalidModelError(errors=heartbeat.errors) return heartbeat def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated_at'] = utils.utcnow() LOG.debug("Saving %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self) @classmethod def find_all_by_version(cls, guest_agent_version, deleted=0): if guest_agent_version is None: raise exception.ModelNotFoundError() heartbeats = cls.find_all(guest_agent_version=guest_agent_version, deleted=deleted) if heartbeats is None or heartbeats.count() == 0: raise exception.ModelNotFoundError( guest_agent_version=guest_agent_version) return heartbeats @classmethod def find_by_instance_id(cls, instance_id): if instance_id is None: raise exception.ModelNotFoundError(instance_id=instance_id) try: return cls.find_by(instance_id=instance_id) except exception.NotFound: LOG.exception(_("Error finding instance %s") % instance_id) raise exception.ModelNotFoundError(instance_id=instance_id) @staticmethod def is_active(agent): return (datetime.now() - agent.updated_at < timedelta(seconds=AGENT_HEARTBEAT)) trove-5.0.0/trove/guestagent/strategies/0000775000567000056710000000000012701410521021464 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/backup/0000775000567000056710000000000012701410521022731 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/backup/__init__.py0000664000567000056710000000167212701410316025052 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common.strategies.strategy import Strategy LOG = logging.getLogger(__name__) def get_backup_strategy(backup_driver, ns=__name__): LOG.debug("Getting backup strategy: %s." % backup_driver) return Strategy.get_strategy(backup_driver, ns) trove-5.0.0/trove/guestagent/strategies/backup/mysql_impl.py0000664000567000056710000001021612701410316025473 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import re from oslo_log import log as logging from trove.common.i18n import _ from trove.guestagent.datastore.mysql.service import MySqlApp from trove.guestagent.datastore.mysql_common.service import ADMIN_USER_NAME from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) class MySQLDump(base.BackupRunner): """Implementation of Backup Strategy for MySQLDump.""" __strategy_name__ = 'mysqldump' @property def cmd(self): user_and_pass = ( ' --password=%(password)s -u %(user)s ' '2>/tmp/mysqldump.log' % {'password': MySqlApp.get_auth_password(), 'user': ADMIN_USER_NAME}) cmd = ('mysqldump' ' --all-databases' ' %(extra_opts)s' ' --opt' + user_and_pass) return cmd + self.zip_cmd + self.encrypt_cmd class InnoBackupEx(base.BackupRunner): """Implementation of Backup Strategy for InnoBackupEx.""" __strategy_name__ = 'innobackupex' @property def cmd(self): cmd = ('sudo innobackupex' ' --stream=xbstream' ' %(extra_opts)s ' + MySqlApp.get_data_dir() + ' 2>/tmp/innobackupex.log' ) return cmd + self.zip_cmd + self.encrypt_cmd def check_process(self): """Check the output from innobackupex for 'completed OK!'.""" LOG.debug('Checking innobackupex process output.') with open('/tmp/innobackupex.log', 'r') as backup_log: output = backup_log.read() LOG.info(output) if not output: LOG.error(_("Innobackupex log file empty.")) return False last_line = output.splitlines()[-1].strip() if not re.search('completed OK!', last_line): LOG.error(_("Innobackupex did not complete successfully.")) return False return True def metadata(self): LOG.debug('Getting metadata from backup.') meta = {} lsn = re.compile("The latest check point \(for incremental\): '(\d+)'") with open('/tmp/innobackupex.log', 'r') as backup_log: output = backup_log.read() match = lsn.search(output) if match: meta = {'lsn': match.group(1)} LOG.info(_("Metadata for backup: %s.") % str(meta)) return meta @property def filename(self): return '%s.xbstream' % self.base_filename class InnoBackupExIncremental(InnoBackupEx): """InnoBackupEx incremental backup.""" def __init__(self, *args, **kwargs): if not kwargs.get('lsn'): raise AttributeError('lsn attribute missing, bad parent?') super(InnoBackupExIncremental, self).__init__(*args, **kwargs) self.parent_location = kwargs.get('parent_location') self.parent_checksum = kwargs.get('parent_checksum') @property def cmd(self): cmd = ('sudo innobackupex' ' --stream=xbstream' ' --incremental' ' --incremental-lsn=%(lsn)s' ' %(extra_opts)s ' + MySqlApp.get_data_dir() + ' 2>/tmp/innobackupex.log') return cmd + self.zip_cmd + self.encrypt_cmd def metadata(self): _meta = super(InnoBackupExIncremental, self).metadata() _meta.update({ 'parent_location': self.parent_location, 'parent_checksum': self.parent_checksum, }) return _meta trove-5.0.0/trove/guestagent/strategies/backup/base.py0000664000567000056710000001037112701410316024221 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import signal from oslo_log import log as logging from eventlet.green import subprocess from trove.common import cfg, utils from trove.common.strategies.strategy import Strategy CONF = cfg.CONF LOG = logging.getLogger(__name__) class BackupError(Exception): """Error running the Backup Command.""" class UnknownBackupType(Exception): """Unknown backup type.""" class BackupRunner(Strategy): """Base class for Backup Strategy implementations.""" __strategy_type__ = 'backup_runner' __strategy_ns__ = 'trove.guestagent.strategies.backup' # The actual system call to run the backup cmd = None is_zipped = CONF.backup_use_gzip_compression is_encrypted = CONF.backup_use_openssl_encryption encrypt_key = CONF.backup_aes_cbc_key def __init__(self, filename, **kwargs): self.base_filename = filename self.process = None self.pid = None kwargs.update({'filename': filename}) self.command = self.cmd % kwargs super(BackupRunner, self).__init__() @property def backup_type(self): return type(self).__name__ def _run(self): LOG.debug("BackupRunner running cmd: %s", self.command) self.process = subprocess.Popen(self.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid) self.pid = self.process.pid def __enter__(self): """Start up the process.""" self._run_pre_backup() self._run() return self def __exit__(self, exc_type, exc_value, traceback): """Clean up everything.""" if exc_type is not None: return False if getattr(self, 'process', None): try: # Send a sigterm to the session leader, so that all # child processes are killed and cleaned up on terminate # (Ensures zombie processes aren't left around on a FAILURE) # https://bugs.launchpad.net/trove/+bug/1253850 os.killpg(self.process.pid, signal.SIGTERM) self.process.terminate() except OSError: # Already stopped pass utils.raise_if_process_errored(self.process, BackupError) if not self.check_process(): raise BackupError self._run_post_backup() return True def metadata(self): """Hook for subclasses to store metadata from the backup.""" return {} @property def filename(self): """Subclasses may overwrite this to declare a format (.tar).""" return self.base_filename @property def manifest(self): return "%s%s%s" % (self.filename, self.zip_manifest, self.encrypt_manifest) @property def zip_cmd(self): return ' | gzip' if self.is_zipped else '' @property def zip_manifest(self): return '.gz' if self.is_zipped else '' @property def encrypt_cmd(self): return (' | openssl enc -aes-256-cbc -salt -pass pass:%s' % self.encrypt_key) if self.is_encrypted else '' @property def encrypt_manifest(self): return '.enc' if self.is_encrypted else '' def check_process(self): """Hook for subclasses to check process for errors.""" return True def read(self, chunk_size): return self.process.stdout.read(chunk_size) def _run_pre_backup(self): pass def _run_post_backup(self): pass trove-5.0.0/trove/guestagent/strategies/backup/experimental/0000775000567000056710000000000012701410521025426 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/backup/experimental/redis_impl.py0000664000567000056710000000257712701410316030144 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.guestagent.datastore.experimental.redis import service from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) class RedisBackup(base.BackupRunner): """Implementation of Backup Strategy for Redis.""" __strategy_name__ = 'redisbackup' def __init__(self, filename, **kwargs): self.app = service.RedisApp() super(RedisBackup, self).__init__(filename, **kwargs) @property def cmd(self): cmd = 'sudo cat %s' % self.app.get_persistence_filepath() return cmd + self.zip_cmd + self.encrypt_cmd def _run_pre_backup(self): self.app.admin.persist_data() LOG.debug('Redis data persisted.') trove-5.0.0/trove/guestagent/strategies/backup/experimental/__init__.py0000664000567000056710000000000012701410316027527 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/backup/experimental/postgresql_impl.py0000664000567000056710000000177612701410316031241 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) class PgDump(base.BackupRunner): """Implementation of Backup Strategy for pg_dump.""" __strategy_name__ = 'pg_dump' @property def cmd(self): cmd = 'sudo -u postgres pg_dumpall ' return cmd + self.zip_cmd + self.encrypt_cmd trove-5.0.0/trove/guestagent/strategies/backup/experimental/db2_impl.py0000664000567000056710000000775012701410316027503 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.db2 import service from trove.guestagent.datastore.experimental.db2 import system from trove.guestagent.db import models from trove.guestagent.strategies.backup import base CONF = cfg.CONF DB2_DBPATH = CONF.db2.mount_point DB2_BACKUP_DIR = DB2_DBPATH + "/backup" LOG = logging.getLogger(__name__) class DB2Backup(base.BackupRunner): """Implementation of Backup Strategy for DB2.""" __Strategy_name__ = 'db2backup' def __init__(self, *args, **kwargs): self.admin = service.DB2Admin() super(DB2Backup, self).__init__(*args, **kwargs) def _run_pre_backup(self): """Create archival contents in dump dir""" try: est_dump_size = self.estimate_dump_size() avail = operating_system.get_bytes_free_on_fs(DB2_DBPATH) if est_dump_size > avail: self.cleanup() raise OSError(_("Need more free space to backup db2 database," " estimated %(est_dump_size)s" " and found %(avail)s bytes free ") % {'est_dump_size': est_dump_size, 'avail': avail}) operating_system.create_directory(DB2_BACKUP_DIR, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) service.run_command(system.QUIESCE_DB2) dbNames = self.list_dbnames() for dbName in dbNames: service.run_command(system.BACKUP_DB % { 'dbname': dbName, 'dir': DB2_BACKUP_DIR}) service.run_command(system.UNQUIESCE_DB2) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when preparing the directory") self.cleanup() raise e @property def cmd(self): cmd = 'sudo tar cPf - ' + DB2_BACKUP_DIR return cmd + self.zip_cmd + self.encrypt_cmd def cleanup(self): operating_system.remove(DB2_BACKUP_DIR, force=True, as_root=True) def _run_post_backup(self): self.cleanup() def list_dbnames(self): dbNames = [] databases, marker = self.admin.list_databases() for database in databases: mydb = models.MySQLDatabase() mydb.deserialize(database) dbNames.append(mydb.name) return dbNames def estimate_dump_size(self): """ Estimating the size of the backup based on the size of the data returned from the get_db_size procedure. The size of the backup is always going to be smaller than the size of the data. """ try: dbs = self.list_dbnames() size = 0 for dbname in dbs: out = service.run_command(system.GET_DB_SIZE % {'dbname': dbname}) size = size + out except exception.ProcessExecutionError: LOG.debug("Error while trying to get db size info") LOG.debug("Estimated size for databases: " + str(size)) return size trove-5.0.0/trove/guestagent/strategies/backup/experimental/couchdb_impl.py0000664000567000056710000000251112701410316030431 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.guestagent.datastore.experimental.couchdb import service from trove.guestagent.strategies.backup import base class CouchDBBackup(base.BackupRunner): __strategy_name__ = 'couchdbbackup' @property def cmd(self): """ CouchDB backup is based on a simple filesystem copy of the database files. Each database is a single fully contained append only file. For example, if a user creates a database 'foo', then a corresponding 'foo.couch' file will be created in the database directory which by default is in '/var/lib/couchdb'. """ cmd = 'sudo tar cpPf - ' + service.COUCHDB_LIB_DIR return cmd + self.zip_cmd + self.encrypt_cmd trove-5.0.0/trove/guestagent/strategies/backup/experimental/mongo_impl.py0000664000567000056710000000777412701410316030161 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 eBay Software Foundation # Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import ( service as mongo_service) from trove.guestagent.datastore.experimental.mongodb import ( system as mongo_system) from trove.guestagent.strategies.backup import base CONF = cfg.CONF LOG = logging.getLogger(__name__) MONGODB_DBPATH = CONF.mongodb.mount_point MONGO_DUMP_DIR = MONGODB_DBPATH + "/dump" LARGE_TIMEOUT = 1200 class MongoDump(base.BackupRunner): """Implementation of Backup Strategy for MongoDump.""" __strategy_name__ = 'mongodump' def __init__(self, *args, **kwargs): self.app = mongo_service.MongoDBApp() super(MongoDump, self).__init__(*args, **kwargs) def _run_pre_backup(self): """Create archival contents in dump dir""" try: est_dump_size = self.estimate_dump_size() avail = operating_system.get_bytes_free_on_fs(MONGODB_DBPATH) if est_dump_size > avail: self.cleanup() # TODO(atomic77) Though we can fully recover from this error # BackupRunner will leave the trove instance in a BACKUP state raise OSError(_("Need more free space to run mongodump, " "estimated %(est_dump_size)s" " and found %(avail)s bytes free ") % {'est_dump_size': est_dump_size, 'avail': avail}) operating_system.create_directory(MONGO_DUMP_DIR, as_root=True) operating_system.chown(MONGO_DUMP_DIR, mongo_system.MONGO_USER, mongo_system.MONGO_USER, as_root=True) # high timeout here since mongodump can take a long time utils.execute_with_timeout( 'mongodump', '--out', MONGO_DUMP_DIR, *(self.app.admin_cmd_auth_params()), run_as_root=True, root_helper='sudo', timeout=LARGE_TIMEOUT ) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when creating the dump") self.cleanup() raise e @property def cmd(self): """Tars and streams the dump dir contents to the stdout """ cmd = 'sudo tar cPf - ' + MONGO_DUMP_DIR return cmd + self.zip_cmd + self.encrypt_cmd def cleanup(self): operating_system.remove(MONGO_DUMP_DIR, force=True, as_root=True) def _run_post_backup(self): self.cleanup() def estimate_dump_size(self): """ Estimate the space that the mongodump will take based on the output of db.stats().dataSize. This seems to be conservative, as the actual bson output in many cases is a fair bit smaller. """ dbs = self.app.list_all_dbs() # mongodump does not dump the content of the local database dbs.remove('local') dbstats = dict([(d, 0) for d in dbs]) for d in dbstats: dbstats[d] = self.app.db_data_size(d) LOG.debug("Estimated size for databases: " + str(dbstats)) return sum(dbstats.values()) trove-5.0.0/trove/guestagent/strategies/backup/experimental/couchbase_impl.py0000664000567000056710000000760712701410316030771 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import json from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.couchbase import service from trove.guestagent.datastore.experimental.couchbase import system from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) OUTFILE = '/tmp' + system.BUCKETS_JSON class CbBackup(base.BackupRunner): """ Implementation of Backup Strategy for Couchbase. """ __strategy_name__ = 'cbbackup' pre_backup_commands = [ ['rm', '-rf', system.COUCHBASE_DUMP_DIR], ['mkdir', '-p', system.COUCHBASE_DUMP_DIR], ] post_backup_commands = [ ['rm', '-rf', system.COUCHBASE_DUMP_DIR], ] @property def cmd(self): """ Creates backup dump dir, tars it up, and encrypts it. """ cmd = 'tar cpPf - ' + system.COUCHBASE_DUMP_DIR return cmd + self.zip_cmd + self.encrypt_cmd def _save_buckets_config(self, password): url = system.COUCHBASE_REST_API + '/pools/default/buckets' utils.execute_with_timeout('curl -u root:' + password + ' ' + url + ' > ' + OUTFILE, shell=True, timeout=300) def _backup(self, password): utils.execute_with_timeout('/opt/couchbase/bin/cbbackup', system.COUCHBASE_REST_API, system.COUCHBASE_DUMP_DIR, '-u', 'root', '-p', password, timeout=600) def _run_pre_backup(self): try: for cmd in self.pre_backup_commands: utils.execute_with_timeout(*cmd) root = service.CouchbaseRootAccess() pw = root.get_password() self._save_buckets_config(pw) with open(OUTFILE, "r") as f: out = f.read() if out != "[]": d = json.loads(out) all_memcached = True for i in range(len(d)): bucket_type = d[i]["bucketType"] if bucket_type != "memcached": all_memcached = False break if not all_memcached: self._backup(pw) else: LOG.info(_("All buckets are memcached. " "Skipping backup.")) operating_system.move(OUTFILE, system.COUCHBASE_DUMP_DIR) if pw != "password": # Not default password, backup generated root password operating_system.copy(system.pwd_file, system.COUCHBASE_DUMP_DIR, preserve=True, as_root=True) except exception.ProcessExecutionError as p: LOG.error(p) raise p def _run_post_backup(self): try: for cmd in self.post_backup_commands: utils.execute_with_timeout(*cmd) except exception.ProcessExecutionError as p: LOG.error(p) raise p trove-5.0.0/trove/guestagent/strategies/backup/experimental/cassandra_impl.py0000664000567000056710000001115312701410316030763 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # Copyright 2015 Tesora Inc. # All Rights Reserved.s # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import service from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) class NodetoolSnapshot(base.BackupRunner): """Implementation of backup using the Nodetool (http://goo.gl/QtXVsM) utility. """ # It is recommended to include the system keyspace in the backup. # Keeping the system keyspace will reduce the restore time # by avoiding need to rebuilding indexes. __strategy_name__ = 'nodetoolsnapshot' _SNAPSHOT_EXTENSION = 'db' def __init__(self, filename, **kwargs): self._app = service.CassandraApp() super(NodetoolSnapshot, self).__init__(filename, **kwargs) def _run_pre_backup(self): """Take snapshot(s) for all keyspaces. Remove existing ones first if any. Snapshot(s) will be stored in the data directory tree: //
/snapshots/ """ self._remove_snapshot(self.filename) self._snapshot_all_keyspaces(self.filename) # Commonly 'self.command' gets resolved in the base constructor, # but we can build the full command only after having taken the # keyspace snapshot(s). self.command = self._backup_cmd + self.command def _run_post_backup(self): """Remove the created snapshot(s). """ self._remove_snapshot(self.filename) def _remove_snapshot(self, snapshot_name): LOG.debug('Clearing snapshot(s) for all keyspaces with snapshot name ' '"%s".' % snapshot_name) utils.execute('nodetool', 'clearsnapshot', '-t %s' % snapshot_name) def _snapshot_all_keyspaces(self, snapshot_name): LOG.debug('Creating snapshot(s) for all keyspaces with snapshot name ' '"%s".' % snapshot_name) utils.execute('nodetool', 'snapshot', '-t %s' % snapshot_name) @property def cmd(self): return self.zip_cmd + self.encrypt_cmd @property def _backup_cmd(self): """Command to collect and package keyspace snapshot(s). """ return self._build_snapshot_package_cmd(self._app.cassandra_data_dir, self.filename) def _build_snapshot_package_cmd(self, data_dir, snapshot_name): """Collect all files for a given snapshot and build a package command for them. Transform the paths such that the backup can be restored simply by extracting the archive right to an existing data directory (i.e. place the root into the and remove the 'snapshots/' portion of the path). Attempt to preserve access modifiers on the archived files. Assert the backup is not empty as there should always be at least the system keyspace. Fail if there is nothing to backup. """ LOG.debug('Searching for all snapshot(s) with name "%s".' % snapshot_name) snapshot_files = operating_system.list_files_in_directory( data_dir, recursive=True, include_dirs=False, pattern='.*/snapshots/%s/.*\.%s' % (snapshot_name, self._SNAPSHOT_EXTENSION), as_root=True) num_snapshot_files = len(snapshot_files) LOG.debug('Found %d snapshot (*.%s) files.' % (num_snapshot_files, self._SNAPSHOT_EXTENSION)) if num_snapshot_files > 0: return ('sudo tar ' '--transform="s#snapshots/%s/##" -cpPf - -C "%s" "%s"' % (snapshot_name, data_dir, '" "'.join(snapshot_files))) # There should always be at least the system keyspace snapshot. raise exception.BackupCreationError(_("No data found.")) trove-5.0.0/trove/guestagent/strategies/__init__.py0000664000567000056710000000000012701410316023565 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/replication/0000775000567000056710000000000012701410521023775 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/replication/__init__.py0000664000567000056710000000366012701410316026115 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.strategy import Strategy LOG = logging.getLogger(__name__) CONF = cfg.CONF __replication_instance = None __replication_manager = None __replication_namespace = None __replication_strategy = None def get_instance(manager): global __replication_instance global __replication_manager global __replication_namespace if not __replication_instance or manager != __replication_manager: replication_strategy = get_strategy(manager) __replication_namespace = CONF.get(manager).replication_namespace replication_strategy_cls = get_strategy_cls( replication_strategy, __replication_namespace) __replication_instance = replication_strategy_cls() __replication_manager = manager LOG.debug('Got replication instance from: %s.%s' % ( __replication_namespace, __replication_strategy)) return __replication_instance def get_strategy(manager): global __replication_strategy if not __replication_strategy or manager != __replication_manager: __replication_strategy = CONF.get(manager).replication_strategy return __replication_strategy def get_strategy_cls(replication_driver, ns=__name__): return Strategy.get_strategy(replication_driver, ns) trove-5.0.0/trove/guestagent/strategies/replication/mysql_base.py0000664000567000056710000001311212701410316026506 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import uuid from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.common.i18n import _ from trove.common import utils from trove.guestagent.backup.backupagent import BackupAgent from trove.guestagent.datastore.mysql.service import MySqlAdmin from trove.guestagent.db import models from trove.guestagent.strategies import backup from trove.guestagent.strategies.replication import base AGENT = BackupAgent() CONF = cfg.CONF REPL_BACKUP_NAMESPACE = 'trove.guestagent.strategies.backup.mysql_impl' REPL_BACKUP_STRATEGY = 'InnoBackupEx' REPL_BACKUP_INCREMENTAL_STRATEGY = 'InnoBackupExIncremental' REPL_BACKUP_RUNNER = backup.get_backup_strategy( REPL_BACKUP_STRATEGY, REPL_BACKUP_NAMESPACE) REPL_BACKUP_INCREMENTAL_RUNNER = backup.get_backup_strategy( REPL_BACKUP_INCREMENTAL_STRATEGY, REPL_BACKUP_NAMESPACE) REPL_EXTRA_OPTS = CONF.backup_runner_options.get(REPL_BACKUP_STRATEGY, '') LOG = logging.getLogger(__name__) class MysqlReplicationBase(base.Replication): """Base class for MySql Replication strategies.""" def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': service.get_port() } return master_ref def _create_replication_user(self): replication_user = None replication_password = utils.generate_random_password(16) mysql_user = models.MySQLUser() mysql_user.password = replication_password retry_count = 0 while replication_user is None: try: mysql_user.name = 'slave_' + str(uuid.uuid4())[:8] MySqlAdmin().create_user([mysql_user.serialize()]) LOG.debug("Trying to create replication user " + mysql_user.name) replication_user = { 'name': mysql_user.name, 'password': replication_password } except Exception: retry_count += 1 if retry_count > 5: LOG.error(_("Replication user retry count exceeded")) raise return replication_user def backup_runner_for_replication(self): return { 'runner': REPL_BACKUP_RUNNER, 'extra_opts': REPL_EXTRA_OPTS, 'incremental_runner': REPL_BACKUP_INCREMENTAL_RUNNER } def snapshot_for_replication(self, context, service, location, snapshot_info): snapshot_id = snapshot_info['id'] replica_number = snapshot_info.get('replica_number', 1) LOG.debug("Acquiring backup for replica number %d." % replica_number) # Only create a backup if it's the first replica if replica_number == 1: AGENT.execute_backup( context, snapshot_info, **self.backup_runner_for_replication()) else: LOG.debug("Using existing backup created for previous replica.") LOG.debug("Replication snapshot %s used for replica number %d." % (snapshot_id, replica_number)) replication_user = self._create_replication_user() service.grant_replication_privilege(replication_user) # With streamed InnobackupEx, the log position is in # the stream and will be decoded by the slave log_position = { 'replication_user': replication_user } return snapshot_id, log_position def enable_as_master(self, service, master_config): if not service.exists_replication_source_overrides(): service.write_replication_source_overrides(master_config) service.restart() @abc.abstractmethod def connect_to_master(self, service, snapshot): """Connects a slave to a master""" def enable_as_slave(self, service, snapshot, slave_config): try: service.write_replication_replica_overrides(slave_config) service.restart() self.connect_to_master(service, snapshot) except Exception: LOG.exception(_("Exception enabling guest as replica")) raise def detach_slave(self, service, for_failover): replica_info = service.stop_slave(for_failover) service.remove_replication_replica_overrides() service.restart() return replica_info def get_replica_context(self, service): replication_user = self._create_replication_user() service.grant_replication_privilege(replication_user) return { 'master': self.get_master_ref(service, None), 'log_position': { 'replication_user': replication_user } } def cleanup_source_on_replica_detach(self, admin_service, replica_info): admin_service.delete_user_by_name(replica_info['replication_user']) def demote_master(self, service): service.remove_replication_source_overrides() service.restart() trove-5.0.0/trove/guestagent/strategies/replication/base.py0000664000567000056710000000423212701410316025264 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import six from trove.common.strategies.strategy import Strategy @six.add_metaclass(abc.ABCMeta) class Replication(Strategy): """Base class for Replication Strategy implementation.""" __strategy_type__ = 'replication' __strategy_ns__ = 'trove.guestagent.strategies.replication' def __init__(self): super(Replication, self).__init__() @abc.abstractmethod def get_master_ref(self, service, snapshot_info): """Get reference to master site for replication strategy.""" def backup_required_for_replication(self): """Indicates whether a backup is required for replication.""" return True @abc.abstractmethod def snapshot_for_replication(self, context, service, location, snapshot_info): """Capture snapshot of master db.""" @abc.abstractmethod def enable_as_master(self, service, master_config): """Configure underlying database to act as master for replication.""" @abc.abstractmethod def enable_as_slave(self, service, snapshot, slave_config): """Configure underlying database as a slave of the given master.""" @abc.abstractmethod def detach_slave(self, service, for_failover): """Turn off replication on a slave site.""" @abc.abstractmethod def cleanup_source_on_replica_detach(self, service, replica_info): """Clean up the source on the detach of a replica.""" @abc.abstractmethod def demote_master(self, service): """Turn off replication on a master site.""" trove-5.0.0/trove/guestagent/strategies/replication/mysql_gtid.py0000664000567000056710000000345312701410316026532 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.guestagent.backup.backupagent import BackupAgent from trove.guestagent.strategies.replication import mysql_base AGENT = BackupAgent() CONF = cfg.CONF LOG = logging.getLogger(__name__) class MysqlGTIDReplication(mysql_base.MysqlReplicationBase): """MySql Replication coordinated by GTIDs.""" def connect_to_master(self, service, snapshot): logging_config = snapshot['log_position'] LOG.debug("connect_to_master %s" % logging_config['replication_user']) change_master_cmd = ( "CHANGE MASTER TO MASTER_HOST='%(host)s', " "MASTER_PORT=%(port)s, " "MASTER_USER='%(user)s', " "MASTER_PASSWORD='%(password)s', " "MASTER_AUTO_POSITION=1, " "MASTER_CONNECT_RETRY=15" % { 'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'] }) service.execute_on_client(change_master_cmd) service.start_slave() trove-5.0.0/trove/guestagent/strategies/replication/mysql_binlog.py0000664000567000056710000000622612701410316027056 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import csv from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.guestagent.backup.backupagent import BackupAgent from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.mysql.service import MySqlApp from trove.guestagent.strategies.replication import mysql_base AGENT = BackupAgent() CONF = cfg.CONF LOG = logging.getLogger(__name__) class MysqlBinlogReplication(mysql_base.MysqlReplicationBase): """MySql Replication coordinated by binlog position.""" class UnableToDetermineBinlogPosition(exception.TroveError): message = _("Unable to determine binlog position " "(from file %(binlog_file)s).") def connect_to_master(self, service, snapshot): logging_config = snapshot['log_position'] logging_config.update(self._read_log_position()) change_master_cmd = ( "CHANGE MASTER TO MASTER_HOST='%(host)s', " "MASTER_PORT=%(port)s, " "MASTER_USER='%(user)s', " "MASTER_PASSWORD='%(password)s', " "MASTER_LOG_FILE='%(log_file)s', " "MASTER_LOG_POS=%(log_pos)s, " "MASTER_CONNECT_RETRY=15" % { 'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'], 'log_file': logging_config['log_file'], 'log_pos': logging_config['log_position'] }) service.execute_on_client(change_master_cmd) service.start_slave() def _read_log_position(self): INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir()) LOG.info(_("Setting read permissions on %s") % INFO_FILE) operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True) LOG.info(_("Reading log position from %s") % INFO_FILE) try: with open(INFO_FILE, 'rb') as f: row = csv.reader(f, delimiter='\t', skipinitialspace=True).next() return { 'log_file': row[0], 'log_position': int(row[1]) } except (IOError, IndexError) as ex: LOG.exception(ex) raise self.UnableToDetermineBinlogPosition( {'binlog_file': INFO_FILE}) trove-5.0.0/trove/guestagent/strategies/replication/experimental/0000775000567000056710000000000012701410521026472 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/replication/experimental/redis_sync.py0000664000567000056710000000675712701410316031227 0ustar jenkinsjenkins00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.guestagent.strategies.replication import base CONF = cfg.CONF LOG = logging.getLogger(__name__) class RedisSyncReplication(base.Replication): """Redis Replication strategy.""" __strategy_ns__ = 'trove.guestagent.strategies.replication.experimental' __strategy_name__ = 'RedisSyncReplication' CONF_LABEL_REPLICATION_MASTER = 'replication_master' CONF_LABEL_REPLICATION_SLAVE = 'replication_slave' def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': service.get_port(), 'requirepass': service.get_auth_password(), } return master_ref def backup_required_for_replication(self): LOG.debug('Request for replication backup: no backup required') return False def snapshot_for_replication(self, context, service, location, snapshot_info): return None, None def enable_as_master(self, service, master_config): service.configuration_manager.apply_system_override( master_config, change_id=self.CONF_LABEL_REPLICATION_MASTER) service.restart() def enable_as_slave(self, service, snapshot, slave_config): service.configuration_manager.apply_system_override( slave_config, change_id=self.CONF_LABEL_REPLICATION_SLAVE) master_info = snapshot['master'] master_host = master_info['host'] master_port = master_info['port'] connect_options = {'slaveof': [master_host, master_port]} master_passwd = master_info.get('requirepass') if master_passwd: connect_options['masterauth'] = master_passwd service.admin.config_set('masterauth', master_passwd) else: service.admin.config_set('masterauth', "") service.configuration_manager.apply_system_override( connect_options, change_id=self.CONF_LABEL_REPLICATION_SLAVE) service.admin.set_master(host=master_host, port=master_port) LOG.debug('Enabled as slave.') def detach_slave(self, service, for_failover): service.configuration_manager.remove_system_override( change_id=self.CONF_LABEL_REPLICATION_SLAVE) service.admin.set_master(host=None, port=None) service.admin.config_set('masterauth', "") return None def cleanup_source_on_replica_detach(self, service, replica_info): # Nothing needs to be done to the master when a replica goes away. pass def get_replica_context(self, service): return { 'master': self.get_master_ref(service, None), } def demote_master(self, service): service.configuration_manager.remove_system_override( change_id=self.CONF_LABEL_REPLICATION_MASTER) trove-5.0.0/trove/guestagent/strategies/replication/experimental/__init__.py0000664000567000056710000000000012701410316030573 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/replication/experimental/mariadb_gtid.py0000664000567000056710000000341212701410316031454 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.guestagent.backup.backupagent import BackupAgent from trove.guestagent.strategies.replication import mysql_base AGENT = BackupAgent() CONF = cfg.CONF LOG = logging.getLogger(__name__) class MariaDBGTIDReplication(mysql_base.MysqlReplicationBase): """MariaDB Replication coordinated by GTIDs.""" def connect_to_master(self, service, snapshot): logging_config = snapshot['log_position'] LOG.debug("connect_to_master %s" % logging_config['replication_user']) change_master_cmd = ( "CHANGE MASTER TO MASTER_HOST='%(host)s', " "MASTER_PORT=%(port)s, " "MASTER_USER='%(user)s', " "MASTER_PASSWORD='%(password)s', " "MASTER_USE_GTID=slave_pos" % { 'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'] }) service.execute_on_client(change_master_cmd) service.start_slave() trove-5.0.0/trove/guestagent/strategies/restore/0000775000567000056710000000000012701410521023147 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/restore/__init__.py0000664000567000056710000000160512701410316025264 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.common.strategies.strategy import Strategy LOG = logging.getLogger(__name__) def get_restore_strategy(restore_driver, ns=__name__): LOG.debug("Getting restore strategy: %s." % restore_driver) return Strategy.get_strategy(restore_driver, ns) trove-5.0.0/trove/guestagent/strategies/restore/mysql_impl.py0000664000567000056710000003226512701410320025714 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import glob import os import re import tempfile from oslo_log import log as logging import pexpect from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode import trove.guestagent.datastore.mysql.service as dbaas from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class MySQLRestoreMixin(object): """Common utils for restoring MySQL databases.""" RESET_ROOT_RETRY_TIMEOUT = 100 RESET_ROOT_SLEEP_INTERVAL = 10 # Reset the root password in a single transaction with 'FLUSH PRIVILEGES' # to ensure we never leave database wide open without 'grant tables'. RESET_ROOT_MYSQL_COMMANDS = ("START TRANSACTION;", "UPDATE `mysql`.`user` SET" " `password`=PASSWORD('')" " WHERE `user`='root';", "FLUSH PRIVILEGES;", "COMMIT;") # This is a suffix MySQL appends to the file name given in # the '--log-error' startup parameter. _ERROR_LOG_SUFFIX = '.err' _ERROR_MESSAGE_PATTERN = re.compile("^ERROR:\s+.+$") def mysql_is_running(self): try: utils.execute_with_timeout("/usr/bin/mysqladmin", "ping") LOG.debug("MySQL is up and running.") return True except exception.ProcessExecutionError: LOG.debug("MySQL is not running.") return False def mysql_is_not_running(self): try: utils.execute_with_timeout("/usr/bin/pgrep", "mysqld") LOG.info(_("MySQL is still running.")) return False except exception.ProcessExecutionError: LOG.debug("MySQL is not running.") return True def poll_until_then_raise(self, event, exc): try: utils.poll_until(event, sleep_time=self.RESET_ROOT_SLEEP_INTERVAL, time_out=self.RESET_ROOT_RETRY_TIMEOUT) except exception.PollTimeOut: raise exc def _start_mysqld_safe_with_init_file(self, init_file, err_log_file): child = pexpect.spawn("sudo mysqld_safe" " --skip-grant-tables" " --skip-networking" " --init-file='%s'" " --log-error='%s'" % (init_file.name, err_log_file.name) ) try: i = child.expect(['Starting mysqld daemon']) if i == 0: LOG.info(_("Starting MySQL")) except pexpect.TIMEOUT: LOG.exception(_("Got a timeout launching mysqld_safe")) finally: # There is a race condition here where we kill mysqld before # the init file been executed. We need to ensure mysqld is up. # # mysqld_safe will start even if init-file statement(s) fail. # We therefore also check for errors in the log file. self.poll_until_then_raise( self.mysql_is_running, base.RestoreError("Reset root password failed:" " mysqld did not start!")) first_err_message = self._find_first_error_message(err_log_file) if first_err_message: raise base.RestoreError("Reset root password failed: %s" % first_err_message) LOG.info(_("Root password reset successfully.")) LOG.debug("Cleaning up the temp mysqld process.") utils.execute_with_timeout("mysqladmin", "-uroot", "shutdown") LOG.debug("Polling for shutdown to complete.") try: utils.poll_until(self.mysql_is_not_running, sleep_time=self.RESET_ROOT_SLEEP_INTERVAL, time_out=self.RESET_ROOT_RETRY_TIMEOUT) LOG.debug("Database successfully shutdown") except exception.PollTimeOut: LOG.debug("Timeout shutting down database " "- performing killall on mysqld_safe.") utils.execute_with_timeout("killall", "mysqld_safe", root_helper="sudo", run_as_root=True) self.poll_until_then_raise( self.mysql_is_not_running, base.RestoreError("Reset root password failed: " "mysqld did not stop!")) def reset_root_password(self): with tempfile.NamedTemporaryFile() as init_file: operating_system.chmod(init_file.name, FileMode.ADD_READ_ALL, as_root=True) self._writelines_one_per_line(init_file, self.RESET_ROOT_MYSQL_COMMANDS) # Do not attempt to delete the file as the 'trove' user. # The process writing into it may have assumed its ownership. # Only owners can delete temporary # files (restricted deletion). err_log_file = tempfile.NamedTemporaryFile( suffix=self._ERROR_LOG_SUFFIX, delete=False) try: self._start_mysqld_safe_with_init_file(init_file, err_log_file) finally: err_log_file.close() MySQLRestoreMixin._delete_file(err_log_file.name) def _writelines_one_per_line(self, fp, lines): fp.write(os.linesep.join(lines)) fp.flush() def _find_first_error_message(self, fp): if MySQLRestoreMixin._is_non_zero_file(fp): return MySQLRestoreMixin._find_first_pattern_match( fp, self._ERROR_MESSAGE_PATTERN ) return None @classmethod def _delete_file(self, file_path): """Force-remove a given file as root. Do not raise an exception on failure. """ if os.path.isfile(file_path): try: operating_system.remove(file_path, force=True, as_root=True) except Exception: LOG.exception("Could not remove file: '%s'" % file_path) @classmethod def _is_non_zero_file(self, fp): file_path = fp.name return os.path.isfile(file_path) and (os.path.getsize(file_path) > 0) @classmethod def _find_first_pattern_match(self, fp, pattern): for line in fp: if pattern.match(line): return line return None class MySQLDump(base.RestoreRunner, MySQLRestoreMixin): """Implementation of Restore Strategy for MySQLDump.""" __strategy_name__ = 'mysqldump' base_restore_cmd = 'sudo mysql' class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin): """Implementation of Restore Strategy for InnoBackupEx.""" __strategy_name__ = 'innobackupex' base_restore_cmd = 'sudo xbstream -x -C %(restore_location)s' base_prepare_cmd = ('sudo innobackupex' ' --defaults-file=%(restore_location)s/backup-my.cnf' ' --ibbackup=xtrabackup' ' --apply-log' ' %(restore_location)s' ' 2>/tmp/innoprepare.log') def __init__(self, *args, **kwargs): super(InnoBackupEx, self).__init__(*args, **kwargs) self.prepare_cmd = self.base_prepare_cmd % kwargs self.prep_retcode = None def pre_restore(self): app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) app.stop_db() LOG.info(_("Cleaning out restore location: %s."), self.restore_location) operating_system.chmod(self.restore_location, FileMode.SET_FULL, as_root=True) utils.clean_out(self.restore_location) def _run_prepare(self): LOG.debug("Running innobackupex prepare: %s.", self.prepare_cmd) self.prep_retcode = utils.execute(self.prepare_cmd, shell=True) LOG.info(_("Innobackupex prepare finished successfully.")) def post_restore(self): self._run_prepare() operating_system.chown(self.restore_location, 'mysql', None, force=True, as_root=True) self._delete_old_binlogs() self.reset_root_password() app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) app.start_mysql() def _delete_old_binlogs(self): files = glob.glob(os.path.join(self.restore_location, "ib_logfile*")) for f in files: os.unlink(f) class InnoBackupExIncremental(InnoBackupEx): __strategy_name__ = 'innobackupexincremental' incremental_prep = ('sudo innobackupex' ' --defaults-file=%(restore_location)s/backup-my.cnf' ' --ibbackup=xtrabackup' ' --apply-log' ' --redo-only' ' %(restore_location)s' ' %(incremental_args)s' ' 2>/tmp/innoprepare.log') def __init__(self, *args, **kwargs): super(InnoBackupExIncremental, self).__init__(*args, **kwargs) self.restore_location = kwargs.get('restore_location') self.content_length = 0 def _incremental_restore_cmd(self, incremental_dir): """Return a command for a restore with a incremental location.""" args = {'restore_location': incremental_dir} return (self.decrypt_cmd + self.unzip_cmd + (self.base_restore_cmd % args)) def _incremental_prepare_cmd(self, incremental_dir): if incremental_dir is not None: incremental_arg = '--incremental-dir=%s' % incremental_dir else: incremental_arg = '' args = { 'restore_location': self.restore_location, 'incremental_args': incremental_arg, } return self.incremental_prep % args def _incremental_prepare(self, incremental_dir): prepare_cmd = self._incremental_prepare_cmd(incremental_dir) LOG.debug("Running innobackupex prepare: %s.", prepare_cmd) utils.execute(prepare_cmd, shell=True) LOG.info(_("Innobackupex prepare finished successfully.")) def _incremental_restore(self, location, checksum): """Recursively apply backups from all parents. If we are the parent then we restore to the restore_location and we apply the logs to the restore_location only. Otherwise if we are an incremental we restore to a subfolder to prevent stomping on the full restore data. Then we run apply log with the '--incremental-dir' flag """ metadata = self.storage.load_metadata(location, checksum) incremental_dir = None if 'parent_location' in metadata: LOG.info(_("Restoring parent: %(parent_location)s" " checksum: %(parent_checksum)s.") % metadata) parent_location = metadata['parent_location'] parent_checksum = metadata['parent_checksum'] # Restore parents recursively so backup are applied sequentially self._incremental_restore(parent_location, parent_checksum) # for *this* backup set the incremental_dir # just use the checksum for the incremental path as it is # sufficiently unique /var/lib/mysql/data/ incremental_dir = os.path.join(self.restore_location, checksum) operating_system.create_directory(incremental_dir, as_root=True) command = self._incremental_restore_cmd(incremental_dir) else: # The parent (full backup) use the same command from InnobackupEx # super class and do not set an incremental_dir. command = self.restore_cmd self.content_length += self._unpack(location, checksum, command) self._incremental_prepare(incremental_dir) # Delete unpacked incremental backup metadata if incremental_dir: operating_system.remove(incremental_dir, force=True, as_root=True) def _run_restore(self): """Run incremental restore. First grab all parents and prepare them with '--redo-only'. After all backups are restored the super class InnoBackupEx post_restore method is called to do the final prepare with '--apply-log' """ self._incremental_restore(self.location, self.checksum) return self.content_length trove-5.0.0/trove/guestagent/strategies/restore/base.py0000664000567000056710000000666612701410316024453 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from eventlet.green import subprocess from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.strategy import Strategy from trove.common import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF CHUNK_SIZE = CONF.backup_chunk_size BACKUP_USE_GZIP = CONF.backup_use_gzip_compression BACKUP_USE_OPENSSL = CONF.backup_use_openssl_encryption BACKUP_DECRYPT_KEY = CONF.backup_aes_cbc_key class RestoreError(Exception): """Error running the Backup Command.""" class RestoreRunner(Strategy): """Base class for Restore Strategy implementations.""" """Restore a database from a previous backup.""" __strategy_type__ = 'restore_runner' __strategy_ns__ = 'trove.guestagent.strategies.restore' # The actual system calls to run the restore and prepare restore_cmd = None # The backup format type restore_type = None # Decryption Parameters is_zipped = BACKUP_USE_GZIP is_encrypted = BACKUP_USE_OPENSSL decrypt_key = BACKUP_DECRYPT_KEY def __init__(self, storage, **kwargs): self.storage = storage self.location = kwargs.pop('location') self.checksum = kwargs.pop('checksum') self.restore_location = kwargs.get('restore_location') self.restore_cmd = (self.decrypt_cmd + self.unzip_cmd + (self.base_restore_cmd % kwargs)) super(RestoreRunner, self).__init__() def pre_restore(self): """Hook that is called before the restore command.""" pass def post_restore(self): """Hook that is called after the restore command.""" pass def restore(self): self.pre_restore() content_length = self._run_restore() self.post_restore() return content_length def _run_restore(self): return self._unpack(self.location, self.checksum, self.restore_cmd) def _unpack(self, location, checksum, command): stream = self.storage.load(location, checksum) process = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE) content_length = 0 for chunk in stream: process.stdin.write(chunk) content_length += len(chunk) process.stdin.close() utils.raise_if_process_errored(process, RestoreError) LOG.debug("Restored %s bytes from stream." % content_length) return content_length @property def decrypt_cmd(self): if self.is_encrypted: return ('openssl enc -d -aes-256-cbc -salt -pass pass:%s | ' % self.decrypt_key) else: return '' @property def unzip_cmd(self): return 'gzip -d -c | ' if self.is_zipped else '' trove-5.0.0/trove/guestagent/strategies/restore/experimental/0000775000567000056710000000000012701410521025644 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/restore/experimental/redis_impl.py0000664000567000056710000000631112701410316030350 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common.i18n import _ from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.redis import service from trove.guestagent.datastore.experimental.redis import system from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class RedisBackup(base.RestoreRunner): """Implementation of Restore Strategy for Redis.""" __strategy_name__ = 'redisbackup' CONF_LABEL_AOF_TEMP_OFF = 'restore_aof_temp_off' INFO_PERSISTENCE_SECTION = 'persistence' def __init__(self, storage, **kwargs): self.app = service.RedisApp() self.restore_location = self.app.get_persistence_filepath() self.base_restore_cmd = 'tee %s' % self.restore_location self.aof_set = self.app.is_appendonly_enabled() self.aof_off_cfg = {'appendonly': 'no'} kwargs.update({'restore_location': self.restore_location}) super(RedisBackup, self).__init__(storage, **kwargs) def pre_restore(self): self.app.stop_db() LOG.info(_("Removing old persistence file: %s."), self.restore_location) operating_system.remove(self.restore_location, force=True, as_root=True) dir = os.path.dirname(self.restore_location) operating_system.create_directory(dir, as_root=True) operating_system.chmod(dir, FileMode.SET_FULL, as_root=True) # IF AOF is set, we need to turn it off temporarily if self.aof_set: self.app.configuration_manager.apply_system_override( self.aof_off_cfg, change_id=self.CONF_LABEL_AOF_TEMP_OFF) def post_restore(self): operating_system.chown(self.restore_location, system.REDIS_OWNER, system.REDIS_OWNER, as_root=True) self.app.start_db() # IF AOF was set, we need to put back the original file if self.aof_set: self.app.admin.wait_until('loading', '0', section=self.INFO_PERSISTENCE_SECTION) self.app.admin.execute('BGREWRITEAOF') self.app.admin.wait_until('aof_rewrite_in_progress', '0', section=self.INFO_PERSISTENCE_SECTION) self.app.stop_db() self.app.configuration_manager.remove_system_override( change_id=self.CONF_LABEL_AOF_TEMP_OFF) self.app.start_db() trove-5.0.0/trove/guestagent/strategies/restore/experimental/__init__.py0000664000567000056710000000000012701410316027745 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/strategies/restore/experimental/postgresql_impl.py0000664000567000056710000000570012701410316031446 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from eventlet.green import subprocess from oslo_log import log as logging from trove.common import exception from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class PgDump(base.RestoreRunner): """Implementation of Restore Strategy for pg_dump.""" __strategy_name__ = 'pg_dump' base_restore_cmd = 'sudo -u postgres psql ' IGNORED_ERROR_PATTERNS = [ re.compile("ERROR:\s*role \"postgres\" already exists"), ] def restore(self): """We are overriding the base class behavior to perform custom error handling. """ self.pre_restore() content_length = self._execute_postgres_restore() self.post_restore() return content_length def _execute_postgres_restore(self): # Postgresql outputs few benign messages into the stderr stream # during a normal restore procedure. # We need to watch for those and avoid raising # an exception in response. # Message 'ERROR: role "postgres" already exists' # is expected and does not pose any problems to the restore operation. stream = self.storage.load(self.location, self.checksum) process = subprocess.Popen(self.restore_cmd, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE) content_length = 0 for chunk in stream: process.stdin.write(chunk) content_length += len(chunk) process.stdin.close() self._handle_errors(process) LOG.debug("Restored %s bytes from stream." % content_length) return content_length def _handle_errors(self, process): # Handle messages in the error stream of a given process. # Raise an exception if the stream is not empty and # does not match the expected message sequence. try: err = process.stderr.read() # Empty error stream is always accepted as valid # for future compatibility. if err: for message in err.splitlines(False): if not any(regex.match(message) for regex in self.IGNORED_ERROR_PATTERNS): raise exception(message) except OSError: pass trove-5.0.0/trove/guestagent/strategies/restore/experimental/db2_impl.py0000664000567000056710000000412212701410316027707 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.db2 import service from trove.guestagent.datastore.experimental.db2 import system from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) CONF = cfg.CONF DB2_DBPATH = CONF.db2.mount_point DB2_BACKUP_DIR = DB2_DBPATH + "/backup" class DB2Backup(base.RestoreRunner): """Implementation of Restore Strategy for DB2.""" __strategy_name__ = 'db2backup' base_restore_cmd = 'sudo tar xPf -' def __init__(self, *args, **kwargs): super(DB2Backup, self).__init__(*args, **kwargs) self.appStatus = service.DB2AppStatus() self.app = service.DB2App(self.appStatus) self.admin = service.DB2Admin() self.restore_location = DB2_BACKUP_DIR def post_restore(self): """ Restore from the directory that we untarred into """ out, err = utils.execute_with_timeout(system.GET_DB_NAMES, shell=True) dbNames = out.split() for dbName in dbNames: service.run_command(system.RESTORE_DB % {'dbname': dbName, 'dir': DB2_BACKUP_DIR}) LOG.info(_("Cleaning out restore location post: %s."), DB2_BACKUP_DIR) operating_system.remove(DB2_BACKUP_DIR, force=True, as_root=True) trove-5.0.0/trove/guestagent/strategies/restore/experimental/couchdb_impl.py0000664000567000056710000000302412701410316030647 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.couchdb import service from trove.guestagent.strategies.restore import base class CouchDBBackup(base.RestoreRunner): __strategy_name__ = 'couchdbbackup' base_restore_cmd = 'sudo tar xPf -' def __init__(self, *args, **kwargs): self.appStatus = service.CouchDBAppStatus() self.app = service.CouchDBApp(self.appStatus) super(CouchDBBackup, self).__init__(*args, **kwargs) def post_restore(self): """ To restore from backup, all we need to do is untar the compressed database files into the database directory and change its ownership. """ operating_system.chown(service.COUCHDB_LIB_DIR, 'couchdb', 'couchdb', as_root=True) self.app.restart() trove-5.0.0/trove/guestagent/strategies/restore/experimental/mongo_impl.py0000664000567000056710000000353312701410316030364 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 eBay Software Foundation # Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import ( service as mongo_service) from trove.guestagent.strategies.restore import base CONF = cfg.CONF LOG = logging.getLogger(__name__) IP = netutils.get_my_ipv4() LARGE_TIMEOUT = 1200 MONGODB_DBPATH = CONF.mongodb.mount_point MONGO_DUMP_DIR = MONGODB_DBPATH + "/dump" class MongoDump(base.RestoreRunner): __strategy_name__ = 'mongodump' base_restore_cmd = 'sudo tar xPf -' def __init__(self, *args, **kwargs): super(MongoDump, self).__init__(*args, **kwargs) self.app = mongo_service.MongoDBApp() def post_restore(self): """ Restore from the directory that we untarred into """ params = self.app.admin_cmd_auth_params() params.append(MONGO_DUMP_DIR) utils.execute_with_timeout('mongorestore', *params, timeout=LARGE_TIMEOUT) operating_system.remove(MONGO_DUMP_DIR, force=True, as_root=True) trove-5.0.0/trove/guestagent/strategies/restore/experimental/couchbase_impl.py0000664000567000056710000002260212701410316031177 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import json import os.path import time from oslo_log import log as logging from trove.common import exception from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.couchbase import service from trove.guestagent.datastore.experimental.couchbase import system from trove.guestagent import dbaas from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class CbBackup(base.RestoreRunner): """ Implementation of Restore Strategy for Couchbase. """ __strategy_name__ = 'cbbackup' base_restore_cmd = 'sudo tar xpPf -' def __init__(self, *args, **kwargs): super(CbBackup, self).__init__(*args, **kwargs) def pre_restore(self): try: operating_system.remove(system.COUCHBASE_DUMP_DIR, force=True) except exception.ProcessExecutionError as p: LOG.error(p) raise p def post_restore(self): try: # Root enabled for the backup pwd_file = system.COUCHBASE_DUMP_DIR + system.SECRET_KEY if os.path.exists(pwd_file): with open(pwd_file, "r") as f: pw = f.read().rstrip("\n") root = service.CouchbaseRootAccess() root.set_password(pw) # Get current root password root = service.CouchbaseRootAccess() root_pwd = root.get_password() # Iterate through each bucket config buckets_json = system.COUCHBASE_DUMP_DIR + system.BUCKETS_JSON with open(buckets_json, "r") as f: out = f.read() if out == "[]": # No buckets or data to restore. Done. return d = json.loads(out) for i in range(len(d)): bucket_name = d[i]["name"] bucket_type = d[i]["bucketType"] if bucket_type == "membase": bucket_type = "couchbase" ram = int(dbaas.to_mb(d[i]["quota"]["ram"])) auth_type = d[i]["authType"] password = d[i]["saslPassword"] port = d[i]["proxyPort"] replica_number = d[i]["replicaNumber"] replica_index = 1 if d[i]["replicaIndex"] else 0 threads = d[i]["threadsNumber"] flush = 1 if "flush" in d[i]["controllers"] else 0 # cbrestore requires you to manually create dest buckets create_bucket_cmd = ('curl -X POST -u root:' + root_pwd + ' -d name="' + bucket_name + '"' + ' -d bucketType="' + bucket_type + '"' + ' -d ramQuotaMB="' + str(ram) + '"' + ' -d authType="' + auth_type + '"' + ' -d saslPassword="' + password + '"' + ' -d proxyPort="' + str(port) + '"' + ' -d replicaNumber="' + str(replica_number) + '"' + ' -d replicaIndex="' + str(replica_index) + '"' + ' -d threadsNumber="' + str(threads) + '"' + ' -d flushEnabled="' + str(flush) + '" ' + system.COUCHBASE_REST_API + '/pools/default/buckets') utils.execute_with_timeout(create_bucket_cmd, shell=True, timeout=300) if bucket_type == "memcached": continue # Wait for couchbase (membase) bucket creation to complete # (follows same logic as --wait for couchbase-cli) timeout_in_seconds = 120 start = time.time() bucket_exist = False while ((time.time() - start) <= timeout_in_seconds and not bucket_exist): url = (system.COUCHBASE_REST_API + '/pools/default/buckets/') outfile = system.COUCHBASE_DUMP_DIR + '/buckets.all' utils.execute_with_timeout('curl -u root:' + root_pwd + ' ' + url + ' > ' + outfile, shell=True, timeout=300) with open(outfile, "r") as file: out = file.read() buckets = json.loads(out) for bucket in buckets: if bucket["name"] == bucket_name: bucket_exist = True break if not bucket_exist: time.sleep(2) if not bucket_exist: raise base.RestoreError("Failed to create bucket '%s' " "within %s seconds" % (bucket_name, timeout_in_seconds)) # Query status # (follows same logic as --wait for couchbase-cli) healthy = False while ((time.time() - start) <= timeout_in_seconds): url = (system.COUCHBASE_REST_API + '/pools/default/buckets/' + bucket_name) outfile = system.COUCHBASE_DUMP_DIR + '/' + bucket_name utils.execute_with_timeout('curl -u root:' + root_pwd + ' ' + url + ' > ' + outfile, shell=True, timeout=300) all_node_ready = True with open(outfile, "r") as file: out = file.read() bucket = json.loads(out) for node in bucket["nodes"]: if node["status"] != "healthy": all_node_ready = False break if not all_node_ready: time.sleep(2) else: healthy = True break if not healthy: raise base.RestoreError("Bucket '%s' is created but " "not ready to use within %s " "seconds" % (bucket_name, timeout_in_seconds)) # Restore restore_cmd = ('/opt/couchbase/bin/cbrestore ' + system.COUCHBASE_DUMP_DIR + ' ' + system.COUCHBASE_REST_API + ' --bucket-source=' + bucket_name + ' --bucket-destination=' + bucket_name + ' -u root' + ' -p ' + root_pwd) try: utils.execute_with_timeout(restore_cmd, shell=True, timeout=300) except exception.ProcessExecutionError as p: # cbrestore fails or hangs at times: # http://www.couchbase.com/issues/browse/MB-10832 # Retrying typically works LOG.error(p) LOG.error(_("cbrestore failed. Retrying...")) utils.execute_with_timeout(restore_cmd, shell=True, timeout=300) except exception.ProcessExecutionError as p: LOG.error(p) raise base.RestoreError("Couchbase restore failed.") trove-5.0.0/trove/guestagent/strategies/restore/experimental/cassandra_impl.py0000664000567000056710000000500112701410316031174 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import service from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class NodetoolSnapshot(base.RestoreRunner): """Implementation of restore using the Nodetool (http://goo.gl/QtXVsM) utility. """ __strategy_name__ = 'nodetoolsnapshot' def __init__(self, storage, **kwargs): self._app = service.CassandraApp() kwargs.update({'restore_location': self._app.cassandra_data_dir}) super(NodetoolSnapshot, self).__init__(storage, **kwargs) def pre_restore(self): """Prepare the data directory for restored files. The directory itself is not included in the backup archive (i.e. the archive is rooted inside the data directory). This is to make sure we can always restore an old backup even if the standard guest agent data directory changes. """ LOG.debug('Initializing a data directory.') operating_system.create_directory( self.restore_location, user=self._app.cassandra_owner, group=self._app.cassandra_owner, force=True, as_root=True) def post_restore(self): """Updated ownership on the restored files. """ LOG.debug('Updating ownership of the restored files.') operating_system.chown( self.restore_location, self._app.cassandra_owner, self._app.cassandra_owner, recursive=True, force=True, as_root=True) @property def base_restore_cmd(self): """Command to extract a backup archive into a given location. Attempt to preserve access modifiers on the archived files. """ return 'sudo tar -xpPf - -C "%(restore_location)s"' trove-5.0.0/trove/guestagent/common/0000775000567000056710000000000012701410521020602 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/common/__init__.py0000664000567000056710000000000012701410316022703 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/common/operating_system.py0000664000567000056710000006623012701410320024554 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import operator import os import re import stat import tempfile from functools import reduce from oslo_concurrency.processutils import UnknownArgumentError from trove.common import exception from trove.common.i18n import _ from trove.common.stream_codecs import IdentityCodec from trove.common import utils REDHAT = 'redhat' DEBIAN = 'debian' SUSE = 'suse' def read_file(path, codec=IdentityCodec(), as_root=False, decode=True): """ Read a file into a Python data structure digestible by 'write_file'. :param path: Path to the read config file. :type path: string :param codec: A codec used to transform the data. :type codec: StreamCodec :param as_root: Execute as root. :type as_root: boolean :param decode: Should the codec decode the data. :type decode: boolean :returns: A dictionary of key-value pairs. :raises: :class:`UnprocessableEntity` if file doesn't exist. :raises: :class:`UnprocessableEntity` if codec not given. """ if path and exists(path, is_directory=False, as_root=as_root): if as_root: return _read_file_as_root(path, codec, decode=decode) with open(path, 'rb') as fp: if decode: return codec.deserialize(fp.read()) return codec.serialize(fp.read()) raise exception.UnprocessableEntity(_("File does not exist: %s") % path) def exists(path, is_directory=False, as_root=False): """Check a given path exists. :param path Path to be checked. :type path string :param is_directory: Check that the path exists and is a directory. Check for a regular file otherwise. :type is_directory: boolean :param as_root: Execute as root. :type as_root: boolean """ found = (not is_directory and os.path.isfile(path) or (is_directory and os.path.isdir(path))) # Only check as root if we can't see it as the regular user, since # this is more expensive if not found and as_root: test_flag = '-d' if is_directory else '-f' cmd = 'test %s %s && echo 1 || echo 0' % (test_flag, path) stdout, _ = utils.execute_with_timeout( cmd, shell=True, check_exit_code=False, run_as_root=True, root_helper='sudo') found = bool(int(stdout)) return found def _read_file_as_root(path, codec, decode=True): """Read a file as root. :param path Path to the written file. :type path string :param codec: A codec used to transform the data. :type codec: StreamCodec :param decode: Should the codec decode the data. :type decode: boolean """ with tempfile.NamedTemporaryFile() as fp: copy(path, fp.name, force=True, as_root=True) chmod(fp.name, FileMode.ADD_READ_ALL(), as_root=True) if decode: return codec.deserialize(fp.read()) return codec.serialize(fp.read()) def write_file(path, data, codec=IdentityCodec(), as_root=False, encode=True): """Write data into file using a given codec. Overwrite any existing contents. The written file can be read back into its original form by 'read_file'. :param path Path to the written config file. :type path string :param data: An object representing the file contents. :type data: object :param codec: A codec used to transform the data. :type codec: StreamCodec :param as_root: Execute as root. :type as_root: boolean :param encode: Should the codec encode the data. :type encode: boolean :raises: :class:`UnprocessableEntity` if path not given. """ if path: if as_root: _write_file_as_root(path, data, codec, encode=encode) else: with open(path, 'wb', 0) as fp: if encode: fp.write(codec.serialize(data)) else: fp.write(codec.deserialize(data)) else: raise exception.UnprocessableEntity(_("Invalid path: %s") % path) def _write_file_as_root(path, data, codec, encode=True): """Write a file as root. Overwrite any existing contents. :param path Path to the written file. :type path string :param data: An object representing the file contents. :type data: StreamCodec :param codec: A codec used to transform the data. :type codec: StreamCodec :param encode: Should the codec encode the data. :type encode: boolean """ # The files gets removed automatically once the managing object goes # out of scope. with tempfile.NamedTemporaryFile('wb', 0, delete=False) as fp: if encode: fp.write(codec.serialize(data)) else: fp.write(codec.deserialize(data)) fp.close() # Release the resource before proceeding. copy(fp.name, path, force=True, as_root=True) class FileMode(object): """ Represent file permissions (or 'modes') that can be applied on a filesystem path by functions such as 'chmod'. The way the modes get applied is generally controlled by the operation ('reset', 'add', 'remove') group to which they belong. All modes are represented as octal numbers. Modes are combined in a 'bitwise OR' (|) operation. Multiple modes belonging to a single operation are combined into a net value for that operation which can be retrieved by one of the 'get_*_mode' methods. Objects of this class are compared by the net values of their individual operations. :seealso: chmod :param reset: List of (octal) modes that will be set, other bits will be cleared. :type reset: list :param add: List of (octal) modes that will be added to the current mode. :type add: list :param remove: List of (octal) modes that will be removed from the current mode. :type remove: list """ @classmethod def SET_ALL_RWX(cls): return cls(reset=[stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO]) # =0777 @classmethod def SET_FULL(cls): return cls.SET_ALL_RWX() @classmethod def SET_GRP_RW_OTH_R(cls): return cls(reset=[stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH]) # =0064 @classmethod def SET_USR_RO(cls): return cls(reset=[stat.S_IRUSR]) # =0400 @classmethod def SET_USR_RW(cls): return cls(reset=[stat.S_IRUSR | stat.S_IWUSR]) # =0600 @classmethod def ADD_ALL_R(cls): return cls(add=[stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH]) # +0444 @classmethod def ADD_READ_ALL(cls): return cls.ADD_ALL_R() @classmethod def ADD_USR_RW_GRP_RW(cls): return cls(add=[stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP]) # +0660 @classmethod def ADD_USR_RW_GRP_RW_OTH_R(cls): return cls(add=[stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH]) # +0664 @classmethod def ADD_GRP_RW(cls): return cls(add=[stat.S_IRGRP | stat.S_IWGRP]) # +0060 @classmethod def ADD_GRP_RX(cls): return cls(add=[stat.S_IRGRP | stat.S_IXGRP]) # +0050 @classmethod def ADD_GRP_RX_OTH_RX(cls): return cls(add=[stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH]) # +0055 def __init__(self, reset=None, add=None, remove=None): self._reset = list(reset) if reset is not None else [] self._add = list(add) if add is not None else [] self._remove = list(remove) if remove is not None else [] def get_reset_mode(self): """Get the net (combined) mode that will be set. """ return self._combine_modes(self._reset) def get_add_mode(self): """Get the net (combined) mode that will be added. """ return self._combine_modes(self._add) def get_remove_mode(self): """Get the net (combined) mode that will be removed. """ return self._combine_modes(self._remove) def _combine_modes(self, modes): return reduce(operator.or_, modes) if modes else None def has_any(self): """Check if any modes are specified. """ return bool(self._reset or self._add or self._remove) def __hash__(self): return hash((self.get_reset_mode(), self.get_add_mode(), self.get_remove_mode())) def __eq__(self, other): if other and isinstance(other, FileMode): if other is self: return True return (other.get_reset_mode() == self.get_reset_mode() and other.get_add_mode() == self.get_add_mode() and other.get_remove_mode() == self.get_remove_mode()) return False def __repr__(self): args = [] if self._reset: args.append('reset=[{:03o}]'.format(self.get_reset_mode())) if self._add: args.append('add=[{:03o}]'.format(self.get_add_mode())) if self._remove: args.append('remove=[{:03o}]'.format(self.get_remove_mode())) return 'Modes({:s})'.format(', '.join(args)) def get_os(): if os.path.isfile("/etc/redhat-release"): return REDHAT elif os.path.isfile("/etc/SuSE-release"): return SUSE else: return DEBIAN def file_discovery(file_candidates): for file in file_candidates: if os.path.isfile(file): return file return '' def start_service(service_candidates): _execute_service_command(service_candidates, 'cmd_start') def stop_service(service_candidates): _execute_service_command(service_candidates, 'cmd_stop') def enable_service_on_boot(service_candidates): _execute_service_command(service_candidates, 'cmd_enable') def disable_service_on_boot(service_candidates): _execute_service_command(service_candidates, 'cmd_disable') def _execute_service_command(service_candidates, command_key): """ :param service_candidates List of possible system service names. :type service_candidates list :param command_key One of the actions returned by 'service_discovery'. :type command_key string :raises: :class:`UnprocessableEntity` if no candidate names given. :raises: :class:`RuntimeError` if command not found. """ if service_candidates: service = service_discovery(service_candidates) if command_key in service: utils.execute_with_timeout(service[command_key], shell=True) else: raise RuntimeError(_("Service control command not available: %s") % command_key) else: raise exception.UnprocessableEntity(_("Candidate service names not " "specified.")) def service_discovery(service_candidates): """ This function discovers how to start, stop, enable and disable services in the current environment. "service_candidates" is an array with possible system service names. Works for upstart, systemd, sysvinit. """ result = {} for service in service_candidates: result['service'] = service # check upstart if os.path.isfile("/etc/init/%s.conf" % service): result['type'] = 'upstart' # upstart returns error code when service already started/stopped result['cmd_start'] = "sudo start %s || true" % service result['cmd_stop'] = "sudo stop %s || true" % service result['cmd_enable'] = ("sudo sed -i '/^manual$/d' " "/etc/init/%s.conf" % service) result['cmd_disable'] = ("sudo sh -c 'echo manual >> " "/etc/init/%s.conf'" % service) break # check sysvinit if os.path.isfile("/etc/init.d/%s" % service): result['type'] = 'sysvinit' result['cmd_start'] = "sudo service %s start" % service result['cmd_stop'] = "sudo service %s stop" % service if os.path.isfile("/usr/sbin/update-rc.d"): result['cmd_enable'] = "sudo update-rc.d %s defaults; sudo " \ "update-rc.d %s enable" % (service, service) result['cmd_disable'] = "sudo update-rc.d %s defaults; sudo " \ "update-rc.d %s disable" % (service, service) elif os.path.isfile("/sbin/chkconfig"): result['cmd_enable'] = "sudo chkconfig %s on" % service result['cmd_disable'] = "sudo chkconfig %s off" % service break # check systemd service_path = "/lib/systemd/system/%s.service" % service if os.path.isfile(service_path): result['type'] = 'systemd' result['cmd_start'] = "sudo systemctl start %s" % service result['cmd_stop'] = "sudo systemctl stop %s" % service # currently "systemctl enable" doesn't work for symlinked units # as described in https://bugzilla.redhat.com/1014311, therefore # replacing a symlink with its real path if os.path.islink(service_path): real_path = os.path.realpath(service_path) unit_file_name = os.path.basename(real_path) result['cmd_enable'] = ("sudo systemctl enable %s" % unit_file_name) result['cmd_disable'] = ("sudo systemctl disable %s" % unit_file_name) else: result['cmd_enable'] = "sudo systemctl enable %s" % service result['cmd_disable'] = "sudo systemctl disable %s" % service break return result def create_directory(dir_path, user=None, group=None, force=True, **kwargs): """Create a given directory and update its ownership (recursively) to the given user and group if any. seealso:: _execute_shell_cmd for valid optional keyword arguments. :param dir_path: Path to the created directory. :type dir_path: string :param user: Owner. :type user: string :param group: Group. :type group: string :param force: No error if existing, make parent directories as needed. :type force: boolean :raises: :class:`UnprocessableEntity` if dir_path not given. """ if dir_path: _create_directory(dir_path, force, **kwargs) if user or group: chown(dir_path, user, group, **kwargs) else: raise exception.UnprocessableEntity( _("Cannot create a blank directory.")) def chown(path, user, group, recursive=True, force=False, **kwargs): """Changes the owner and group of a given file. seealso:: _execute_shell_cmd for valid optional keyword arguments. :param path: Path to the modified file. :type path: string :param user: Owner. :type user: string :param group: Group. :type group: string :param recursive: Operate on files and directories recursively. :type recursive: boolean :param force: Suppress most error messages. :type force: boolean :raises: :class:`UnprocessableEntity` if path not given. :raises: :class:`UnprocessableEntity` if owner/group not given. """ if not path: raise exception.UnprocessableEntity( _("Cannot change ownership of a blank file or directory.")) if not user and not group: raise exception.UnprocessableEntity( _("Please specify owner or group, or both.")) owner_group_modifier = _build_user_group_pair(user, group) options = (('f', force), ('R', recursive)) _execute_shell_cmd('chown', options, owner_group_modifier, path, **kwargs) def _build_user_group_pair(user, group): return "%s:%s" % tuple((v if v else '') for v in (user, group)) def _create_directory(dir_path, force=True, **kwargs): """Create a given directory. :param dir_path: Path to the created directory. :type dir_path: string :param force: No error if existing, make parent directories as needed. :type force: boolean """ options = (('p', force),) _execute_shell_cmd('mkdir', options, dir_path, **kwargs) def chmod(path, mode, recursive=True, force=False, **kwargs): """Changes the mode of a given file. :seealso: Modes for more information on the representation of modes. :seealso: _execute_shell_cmd for valid optional keyword arguments. :param path: Path to the modified file. :type path: string :param mode: File permissions (modes). The modes will be applied in the following order: reset (=), add (+), remove (-) :type mode: FileMode :param recursive: Operate on files and directories recursively. :type recursive: boolean :param force: Suppress most error messages. :type force: boolean :raises: :class:`UnprocessableEntity` if path not given. :raises: :class:`UnprocessableEntity` if no mode given. """ if path: options = (('f', force), ('R', recursive)) shell_modes = _build_shell_chmod_mode(mode) _execute_shell_cmd('chmod', options, shell_modes, path, **kwargs) else: raise exception.UnprocessableEntity( _("Cannot change mode of a blank file.")) def change_user_group(user, group, append=True, add_group=True, **kwargs): """Adds a user to groups by using the usermod linux command with -a and -G options. seealso:: _execute_shell_cmd for valid optional keyword arguments. :param user: Username. :type user: string :param group: Group names. :type group: comma separated string :param append: Adds user to a group. :type append: boolean :param add_group: Lists the groups that the user is a member of. While adding a new groups to an existing user with '-G' option alone, will remove all existing groups that user belongs. Therefore, always add the '-a' (append) with '-G' option to add or append new groups. :type add_group: boolean :raises: :class:`UnprocessableEntity` if user or group not given. """ if not user: raise exception.UnprocessableEntity(_("Missing user.")) elif not group: raise exception.UnprocessableEntity(_("Missing group.")) options = (('a', append), ('G', add_group)) _execute_shell_cmd('usermod', options, group, user, **kwargs) def _build_shell_chmod_mode(mode): """ Build a shell representation of given mode. :seealso: Modes for more information on the representation of modes. :param mode: File permissions (modes). :type mode: FileModes :raises: :class:`UnprocessableEntity` if no mode given. :returns: Following string for any non-empty modes: '=,+,-' """ # Handle methods passed in as constant fields. if inspect.ismethod(mode): mode = mode() if mode and mode.has_any(): text_modes = (('=', mode.get_reset_mode()), ('+', mode.get_add_mode()), ('-', mode.get_remove_mode())) return ','.join( ['{0:s}{1:03o}'.format(item[0], item[1]) for item in text_modes if item[1]]) else: raise exception.UnprocessableEntity(_("No file mode specified.")) def remove(path, force=False, recursive=True, **kwargs): """Remove a given file or directory. :seealso: _execute_shell_cmd for valid optional keyword arguments. :param path: Path to the removed file. :type path: string :param force: Ignore nonexistent files. :type force: boolean :param recursive: Remove directories and their contents recursively. :type recursive: boolean :raises: :class:`UnprocessableEntity` if path not given. """ if path: options = (('f', force), ('R', recursive)) _execute_shell_cmd('rm', options, path, **kwargs) else: raise exception.UnprocessableEntity(_("Cannot remove a blank file.")) def move(source, destination, force=False, **kwargs): """Move a given file or directory to a new location. Move attempts to preserve the original ownership, permissions and timestamps. :seealso: _execute_shell_cmd for valid optional keyword arguments. :param source: Path to the source location. :type source: string :param destination: Path to the destination location. :type destination: string :param force: Do not prompt before overwriting. :type force: boolean :raises: :class:`UnprocessableEntity` if source or destination not given. """ if not source: raise exception.UnprocessableEntity(_("Missing source path.")) elif not destination: raise exception.UnprocessableEntity(_("Missing destination path.")) options = (('f', force),) _execute_shell_cmd('mv', options, source, destination, **kwargs) def copy(source, destination, force=False, preserve=False, recursive=True, **kwargs): """Copy a given file or directory to another location. Copy does NOT attempt to preserve ownership, permissions and timestamps unless the 'preserve' option is enabled. :seealso: _execute_shell_cmd for valid optional keyword arguments. :param source: Path to the source location. :type source: string :param destination: Path to the destination location. :type destination: string :param force: If an existing destination file cannot be opened, remove it and try again. :type force: boolean :param preserve: Preserve mode, ownership and timestamps. :type preserve: boolean :param recursive: Copy directories recursively. :type recursive: boolean :raises: :class:`UnprocessableEntity` if source or destination not given. """ if not source: raise exception.UnprocessableEntity(_("Missing source path.")) elif not destination: raise exception.UnprocessableEntity(_("Missing destination path.")) options = (('f', force), ('p', preserve), ('R', recursive)) _execute_shell_cmd('cp', options, source, destination, **kwargs) def get_bytes_free_on_fs(path): """ Returns the number of bytes free for the filesystem that path is on """ v = os.statvfs(path) return v.f_bsize * v.f_bavail def list_files_in_directory(root_dir, recursive=False, pattern=None, include_dirs=False, as_root=False): """ Return absolute paths to all files in a given root directory. :param root_dir Path to the root directory. :type root_dir string :param recursive Also descend into sub-directories if True. :type recursive boolean :param pattern Return only names matching the pattern. :type pattern string :param include_dirs Include paths to individual sub-directories. :type include_dirs boolean """ if as_root: cmd_args = [root_dir, '-noleaf'] if not recursive: cmd_args.extend(['-maxdepth', '0']) if not include_dirs: cmd_args.extend(['-type', 'f']) if pattern: cmd_args.extend(['-regextype', 'posix-extended', '-regex', os.path.join('.*', pattern) + '$']) files = _execute_shell_cmd('find', [], *cmd_args, as_root=True) return {fp for fp in files.splitlines()} return {os.path.abspath(os.path.join(root, name)) for (root, dirs, files) in os.walk(root_dir, topdown=True) if recursive or (root == root_dir) for name in (files + (dirs if include_dirs else [])) if not pattern or re.match(pattern, name)} def _execute_shell_cmd(cmd, options, *args, **kwargs): """Execute a given shell command passing it given options (flags) and arguments. Takes optional keyword arguments: :param as_root: Execute as root. :type as_root: boolean :param timeout: Number of seconds if specified, default if not. There is no timeout if set to None. :type timeout: integer :raises: class:`UnknownArgumentError` if passed unknown args. """ exec_args = {} if kwargs.pop('as_root', False): exec_args['run_as_root'] = True exec_args['root_helper'] = 'sudo' if 'timeout' in kwargs: exec_args['timeout'] = kwargs.pop('timeout') if kwargs: raise UnknownArgumentError(_("Got unknown keyword args: %r") % kwargs) cmd_flags = _build_command_options(options) cmd_args = cmd_flags + list(args) stdout, stderr = utils.execute_with_timeout(cmd, *cmd_args, **exec_args) return stdout def _build_command_options(options): """Build a list of flags from given pairs (option, is_enabled). Each option is prefixed with a single '-'. Include only options for which is_enabled=True. """ return ['-' + item[0] for item in options if item[1]] trove-5.0.0/trove/guestagent/common/guestagent_utils.py0000664000567000056710000000705212701410316024550 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import re def update_dict(updates, target): """Recursively update a target dictionary with given updates. Updates are provided as a dictionary of key-value pairs where a value can also be a nested dictionary in which case its key is treated as a sub-section of the outer key. If a list value is encountered the update is applied iteratively on all its items. :returns: Will always return a dictionary of results (may be empty). """ if target is None: target = {} if isinstance(target, list): for index, item in enumerate(target): target[index] = update_dict(updates, item) return target if updates is not None: for k, v in updates.iteritems(): if isinstance(v, collections.Mapping): target[k] = update_dict(v, target.get(k, {})) else: target[k] = updates[k] return target def expand_dict(target, namespace_sep='.'): """Expand a flat dict to a nested one. This is an inverse of 'flatten_dict'. :seealso: flatten_dict """ nested = {} for k, v in target.items(): sub = nested keys = k.split(namespace_sep) for key in keys[:-1]: sub = sub.setdefault(key, {}) sub[keys[-1]] = v return nested def flatten_dict(target, namespace_sep='.'): """Flatten a nested dict. Return a one-level dict with all sub-level keys joined by a namespace separator. The following nested dict: {'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}} would be flattened to: {'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10} """ def flatten(target, keys, namespace_sep): flattened = {} if isinstance(target, collections.Mapping): for k, v in target.items(): flattened.update( flatten(v, keys + [k], namespace_sep)) else: ns = namespace_sep.join(keys) flattened[ns] = target return flattened return flatten(target, [], namespace_sep) def build_file_path(base_dir, base_name, *extensions): """Build a path to a file in a given directory. The file may have an extension(s). :returns: Path such as: 'base_dir/base_name.ext1.ext2.ext3' """ file_name = os.extsep.join([base_name] + list(extensions)) return os.path.expanduser(os.path.join(base_dir, file_name)) def to_bytes(value): """Convert numbers with a byte suffix to bytes. """ if isinstance(value, basestring): pattern = re.compile('^(\d+)([K,M,G]{1})$') match = pattern.match(value) if match: value = match.group(1) suffix = match.group(2) factor = { 'K': 1024, 'M': 1024 ** 2, 'G': 1024 ** 3, }[suffix] return str(int(round(factor * float(value)))) return value trove-5.0.0/trove/guestagent/common/sql_query.py0000664000567000056710000002737712701410316023222 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Intermediary class for building SQL queries for use by the guest agent. Do not hard-code strings into the guest agent; use this module to build them for you. """ class Query(object): def __init__(self, columns=None, tables=None, where=None, order=None, group=None, limit=None): self.columns = columns or [] self.tables = tables or [] self.where = where or [] self.order = order or [] self.group = group or [] self.limit = limit def __repr__(self): return str(self) @property def _columns(self): if not self.columns: return "SELECT *" return "SELECT %s" % (", ".join(self.columns)) @property def _tables(self): return "FROM %s" % (", ".join(self.tables)) @property def _where(self): if not self.where: return "" return "WHERE %s" % (" AND ".join(self.where)) @property def _order(self): if not self.order: return "" return "ORDER BY %s" % (", ".join(self.order)) @property def _group_by(self): if not self.group: return "" return "GROUP BY %s" % (", ".join(self.group)) @property def _limit(self): if not self.limit: return "" return "LIMIT %s" % str(self.limit) def __str__(self): query = [ self._columns, self._tables, self._where, self._order, self._group_by, self._limit, ] query = [q for q in query if q] return " ".join(query) + ";" class Grant(object): PERMISSIONS = ["ALL", "ALL PRIVILEGES", "ALTER ROUTINE", "ALTER", "CREATE ROUTINE", "CREATE TEMPORARY TABLES", "CREATE USER", "CREATE VIEW", "CREATE", "DELETE", "DROP", "EVENT", "EXECUTE", "FILE", "INDEX", "INSERT", "LOCK TABLES", "PROCESS", "REFERENCES", "RELOAD", "REPLICATION CLIENT", "REPLICATION SLAVE", "SELECT", "SHOW DATABASES", "SHOW VIEW", "SHUTDOWN", "SUPER", "TRIGGER", "UPDATE", "USAGE", ] def __init__(self, permissions=None, database=None, table=None, user=None, host=None, clear=None, hashed=None, grant_option=False): self.permissions = permissions or [] self.database = database self.table = table self.user = user self.host = host self.clear = clear self.hashed = hashed self.grant_option = grant_option def __repr__(self): return str(self) @property def _permissions(self): if not self.permissions: return "USAGE" if "ALL" in self.permissions: return "ALL PRIVILEGES" if "ALL PRIVILEGES" in self.permissions: return "ALL PRIVILEGES" filtered = [perm for perm in set(self.permissions) if perm in self.PERMISSIONS] return ", ".join(sorted(filtered)) @property def _database(self): if not self.database: return "*" return "`%s`" % self.database @property def _table(self): if self.table: return "'%s'" % self.table return "*" @property def _user(self): return self.user or "" @property def _identity(self): if self.clear: return "IDENTIFIED BY '%s'" % self.clear if self.hashed: return "IDENTIFIED BY PASSWORD '%s'" % self.hashed return "" @property def _host(self): return self.host or "%" @property def _user_host(self): return "`%s`@`%s`" % (self._user, self._host) @property def _what(self): # Permissions to be granted to the user. return "GRANT %s" % self._permissions @property def _where(self): # Database and table to which the user is granted permissions. return "ON %s.%s" % (self._database, self._table) @property def _whom(self): # User and host to be granted permission. Optionally, password, too. whom = [("TO %s" % self._user_host), self._identity, ] whom = [w for w in whom if w] return " ".join(whom) @property def _with(self): clauses = [] if self.grant_option: clauses.append("GRANT OPTION") if not clauses: return "" return "WITH %s" % ", ".join(clauses) def __str__(self): query = [self._what, self._where, self._whom, self._with, ] query = [q for q in query if q] return " ".join(query) + ";" class Revoke(Grant): def __init__(self, permissions=None, database=None, table=None, user=None, host=None, clear=None, hashed=None): self.permissions = permissions or [] self.database = database self.table = table self.user = user self.host = host self.clear = clear self.hashed = hashed def __str__(self): query = [self._what, self._where, self._whom, ] query = [q for q in query if q] return " ".join(query) + ";" @property def _permissions(self): if not self.permissions: return "ALL" if "ALL" in self.permissions: return "ALL" if "ALL PRIVILEGES" in self.permissions: return "ALL" filtered = [perm for perm in self.permissions if perm in self.PERMISSIONS] return ", ".join(sorted(filtered)) @property def _what(self): # Permissions to be revoked from the user. return "REVOKE %s" % self._permissions @property def _whom(self): # User and host from whom to revoke permission. # Optionally, password, too. whom = [("FROM %s" % self._user_host), self._identity, ] whom = [w for w in whom if w] return " ".join(whom) class CreateDatabase(object): def __init__(self, database, charset=None, collate=None): self.database = database self.charset = charset self.collate = collate def __repr__(self): return str(self) @property def _charset(self): if not self.charset: return "" return "CHARACTER SET = '%s'" % self.charset @property def _collate(self): if not self.collate: return "" return "COLLATE = '%s'" % self.collate def __str__(self): query = [("CREATE DATABASE IF NOT EXISTS `%s`" % self.database), self._charset, self._collate, ] query = [q for q in query if q] return " ".join(query) + ";" class DropDatabase(object): def __init__(self, database): self.database = database def __repr__(self): return str(self) def __str__(self): return "DROP DATABASE `%s`;" % self.database class CreateUser(object): def __init__(self, user, host=None, clear=None, hashed=None): self.user = user self.host = host self.clear = clear # A clear password self.hashed = hashed # A hashed password def __repr__(self): return str(self) @property def keyArgs(self): return {'user': self.user, 'host': self._host, } @property def _host(self): if not self.host: return "%" return self.host @property def _identity(self): if self.clear: return "IDENTIFIED BY '%s'" % self.clear if self.hashed: return "IDENTIFIED BY PASSWORD '%s'" % self.hashed return "" def __str__(self): query = ["CREATE USER :user@:host", self._identity, ] query = [q for q in query if q] return " ".join(query) + ";" class UpdateUser(object): def __init__(self, user, host=None, clear=None, new_user=None, new_host=None): self.user = user self.host = host self.clear = clear self.new_user = new_user self.new_host = new_host def __repr__(self): return str(self) @property def _set_password(self): if self.clear: return "Password=PASSWORD('%s')" % self.clear @property def _set_user(self): if self.new_user: return "User='%s'" % self.new_user @property def _set_host(self): if self.new_host: return "Host='%s'" % self.new_host @property def _host(self): if not self.host: return "%" return self.host @property def _set_attrs(self): sets = [self._set_user, self._set_host, self._set_password, ] sets = [s for s in sets if s] sets = ', '.join(sets) return 'SET %s' % sets @property def _where(self): clauses = [] if self.user: clauses.append("User = '%s'" % self.user) if self.host: clauses.append("Host = '%s'" % self._host) if not clauses: return "" return "WHERE %s" % " AND ".join(clauses) def __str__(self): query = ["UPDATE mysql.user", self._set_attrs, self._where, ] query = [q for q in query if q] return " ".join(query) + ";" class DropUser(object): def __init__(self, user, host='%'): self.user = user self.host = host def __repr__(self): return str(self) def __str__(self): return "DROP USER `%s`@`%s`;" % (self.user, self.host) class SetServerVariable(object): def __init__(self, key, value): self.key = key self.value = value def __repr__(self): return str(self) def __str__(self): if self.value is True: return "SET GLOBAL %s=%s" % (self.key, 1) elif self.value is False: return "SET GLOBAL %s=%s" % (self.key, 0) elif self.value is None: return "SET GLOBAL %s" % (self.key) elif isinstance(self.value, str): return "SET GLOBAL %s='%s'" % (self.key, self.value) else: return "SET GLOBAL %s=%s" % (self.key, self.value) # Miscellaneous queries that need no parameters. FLUSH = "FLUSH PRIVILEGES;" ROOT_ENABLED = ("SELECT User FROM mysql.user " "WHERE User = 'root' AND Host != 'localhost';") REMOVE_ANON = "DELETE FROM mysql.user WHERE User = '';" REMOVE_ROOT = ("DELETE FROM mysql.user " "WHERE User = 'root' AND Host != 'localhost';") trove-5.0.0/trove/guestagent/common/timeutils.py0000664000567000056710000000022712701410316023176 0ustar jenkinsjenkins00000000000000from datetime import datetime from oslo_utils import timeutils def float_utcnow(): return float(datetime.strftime(timeutils.utcnow(), "%s.%f")) trove-5.0.0/trove/guestagent/common/configuration.py0000664000567000056710000005152512701410316024035 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import os import re import six from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode class ConfigurationManager(object): """ ConfigurationManager is responsible for management of datastore configuration. Its base functionality includes reading and writing configuration files. It is responsible for validating user inputs and requests. When supplied an override strategy it allows the user to manage configuration overrides as well. """ # Configuration group names. The names determine the order in which the # groups get applied. System group should get applied over the user group. USER_GROUP = '20-user' SYSTEM_GROUP = '50-system' DEFAULT_STRATEGY_OVERRIDES_SUB_DIR = 'overrides' DEFAULT_CHANGE_ID = 'common' def __init__(self, base_config_path, owner, group, codec, requires_root=False, override_strategy=None): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration files. :type owner string :param group Group of the configuration files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the manager requires superuser privileges. :type requires_root boolean :param override_strategy Strategy used to manage configuration overrides (e.g. ImportOverrideStrategy). Defaults to OneFileOverrideStrategy if None. This strategy should be compatible with very much any datastore. It is recommended each datastore defines its strategy explicitly to avoid upgrade compatibility issues in case the default implementation changes in the future. :type override_strategy ConfigurationOverrideStrategy """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root self._value_cache = None if not override_strategy: # Use OneFile strategy by default. Store the revisions in a # sub-directory at the location of the configuration file. revision_dir = guestagent_utils.build_file_path( os.path.dirname(base_config_path), self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self._override_strategy = OneFileOverrideStrategy(revision_dir) else: self._override_strategy = override_strategy self._override_strategy.configure( base_config_path, owner, group, codec, requires_root) def get_value(self, key, default=None): """Return the current value at a given key or 'default'. """ if self._value_cache is None: self._refresh_cache() return self._value_cache.get(key, default) def parse_configuration(self): """Read contents of the configuration file (applying overrides if any) and parse it into a dict. :returns: Configuration file as a Python dict. """ base_options = operating_system.read_file( self._base_config_path, codec=self._codec, as_root=self._requires_root) updates = self._override_strategy.parse_updates() guestagent_utils.update_dict(updates, base_options) return base_options def save_configuration(self, options): """Write given contents to the base configuration file. Remove all existing overrides (both system and user). :param contents Contents of the configuration file. :type contents string or dict """ if isinstance(options, dict): # Serialize a dict of options for writing. self.save_configuration(self._codec.serialize(options)) else: self._override_strategy.remove(self.USER_GROUP) self._override_strategy.remove(self.SYSTEM_GROUP) operating_system.write_file( self._base_config_path, options, as_root=self._requires_root) operating_system.chown( self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root) self._refresh_cache() def has_system_override(self, change_id): """Return whether a given 'system' change exists. """ return self._override_strategy.exists(self.SYSTEM_GROUP, change_id) def apply_system_override(self, options, change_id=DEFAULT_CHANGE_ID): """Apply a 'system' change to the configuration. System overrides are always applied after all user changes so that they override any user-defined setting. :param options Configuration changes. :type options string or dict """ self._apply_override(self.SYSTEM_GROUP, change_id, options) def apply_user_override(self, options, change_id=DEFAULT_CHANGE_ID): """Apply a 'user' change to the configuration. The 'system' values will be re-applied over this override. :param options Configuration changes. :type options string or dict """ self._apply_override(self.USER_GROUP, change_id, options) def get_user_override(self, change_id=DEFAULT_CHANGE_ID): """Get the user overrides""" return self._override_strategy.get(self.USER_GROUP, change_id) def _apply_override(self, group_name, change_id, options): if not isinstance(options, dict): # Deserialize the options into a dict if not already. self._apply_override( group_name, change_id, self._codec.deserialize(options)) else: self._override_strategy.apply(group_name, change_id, options) self._refresh_cache() def remove_system_override(self, change_id=DEFAULT_CHANGE_ID): """Revert a 'system' configuration change. """ self._remove_override(self.SYSTEM_GROUP, change_id) def remove_user_override(self, change_id=DEFAULT_CHANGE_ID): """Revert a 'user' configuration change. """ self._remove_override(self.USER_GROUP, change_id) def _remove_override(self, group_name, change_id): self._override_strategy.remove(group_name, change_id) self._refresh_cache() def _refresh_cache(self): self._value_cache = self.parse_configuration() @six.add_metaclass(abc.ABCMeta) class ConfigurationOverrideStrategy(object): """ConfigurationOverrideStrategy handles configuration files. The strategy provides functionality to enumerate, apply and remove configuration overrides. """ @abc.abstractmethod def configure(self, *args, **kwargs): """Configure this strategy. A strategy needs to be configured before it can be used. It would typically be configured by the ConfigurationManager. """ @abc.abstractmethod def exists(self, group_name, change_id): """Return whether a given revision exists. """ @abc.abstractmethod def apply(self, group_name, change_id, options): """Apply given options on the most current configuration revision. Update if a file with the same id already exists. :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string :param options Configuration changes. :type options dict """ @abc.abstractmethod def remove(self, group_name, change_id=None): """Rollback a given configuration override. Remove the whole group if 'change_id' is None. :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string """ @abc.abstractmethod def get(self, group_name, change_id=None): """Return the contents of a given configuration override :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string """ def parse_updates(self): """Return all updates applied to the base revision as a single dict. Return an empty dict if the base file is always the most current version of configuration. :returns: Updates to the base revision as a Python dict. """ return {} class ImportOverrideStrategy(ConfigurationOverrideStrategy): """Import strategy keeps overrides in separate files that get imported into the base configuration file which never changes itself. An override file is simply deleted when the override is removed. We keep two sets of override files in a separate directory. - User overrides - configuration overrides applied by the user via the Trove API. - System overrides - 'internal' configuration changes applied by the guestagent. The name format of override files is: '--.' where 'set prefix' is to used to order user/system sets, 'n' is an index used to keep track of the order in which overrides within their set got applied. """ FILE_NAME_PATTERN = '%s-([0-9]+)-%s\.%s$' def __init__(self, revision_dir, revision_ext): """ :param revision_dir Path to the directory for import files. :type revision_dir string :param revision_ext Extension of revision files. :type revision_ext string """ self._revision_dir = revision_dir self._revision_ext = revision_ext def configure(self, base_config_path, owner, group, codec, requires_root): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration and revision files. :type owner string :param group Group of the configuration and revision files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the strategy requires superuser privileges. :type requires_root boolean """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root def exists(self, group_name, change_id): return self._find_revision_file(group_name, change_id) is not None def apply(self, group_name, change_id, options): self._initialize_import_directory() revision_file = self._find_revision_file(group_name, change_id) if revision_file is None: # Create a new file. last_revision_index = self._get_last_file_index(group_name) revision_file = guestagent_utils.build_file_path( self._revision_dir, '%s-%03d-%s' % (group_name, last_revision_index + 1, change_id), self._revision_ext) else: # Update the existing file. current = operating_system.read_file( revision_file, codec=self._codec, as_root=self._requires_root) options = guestagent_utils.update_dict(options, current) operating_system.write_file( revision_file, options, codec=self._codec, as_root=self._requires_root) operating_system.chown( revision_file, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root) def _initialize_import_directory(self): """Lazy-initialize the directory for imported revision files. """ if not os.path.exists(self._revision_dir): operating_system.create_directory( self._revision_dir, user=self._owner, group=self._group, force=True, as_root=self._requires_root) def remove(self, group_name, change_id=None): removed = set() if change_id: # Remove a given file. revision_file = self._find_revision_file(group_name, change_id) if revision_file: removed.add(revision_file) else: # Remove the entire group. removed = self._collect_revision_files(group_name) for path in removed: operating_system.remove(path, force=True, as_root=self._requires_root) def get(self, group_name, change_id): revision_file = self._find_revision_file(group_name, change_id) return operating_system.read_file(revision_file, codec=self._codec, as_root=self._requires_root) def parse_updates(self): parsed_options = {} for path in self._collect_revision_files(): options = operating_system.read_file(path, codec=self._codec, as_root=self._requires_root) guestagent_utils.update_dict(options, parsed_options) return parsed_options @property def has_revisions(self): """Return True if there currently are any revision files. """ return (operating_system.exists( self._revision_dir, is_directory=True, as_root=self._requires_root) and (len(self._collect_revision_files()) > 0)) def _get_last_file_index(self, group_name): """Get the index of the most current file in a given group. """ current_files = self._collect_revision_files(group_name) if current_files: name_pattern = self._build_rev_name_pattern(group_name=group_name) last_file_name = os.path.basename(current_files[-1]) last_index_match = re.match(name_pattern, last_file_name) if last_index_match: return int(last_index_match.group(1)) return 0 def _collect_revision_files(self, group_name='.+'): """Collect and return a sorted list of paths to existing revision files. The files should be sorted in the same order in which they were applied. """ name_pattern = self._build_rev_name_pattern(group_name=group_name) return sorted(operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root)) def _find_revision_file(self, group_name, change_id): name_pattern = self._build_rev_name_pattern(group_name, change_id) found = operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root) return next(iter(found), None) def _build_rev_name_pattern(self, group_name='.+', change_id='.+'): return self.FILE_NAME_PATTERN % (group_name, change_id, self._revision_ext) class OneFileOverrideStrategy(ConfigurationOverrideStrategy): """This is a strategy for datastores that do not support multiple configuration files. It uses the Import Strategy to keep the overrides internally. When an override is applied or removed a new configuration file is generated by applying all changes on a saved-off base revision. """ BASE_REVISION_NAME = 'base' REVISION_EXT = 'rev' def __init__(self, revision_dir): """ :param revision_dir Path to the directory for import files. :type revision_dir string """ self._revision_dir = revision_dir self._import_strategy = ImportOverrideStrategy(revision_dir, self.REVISION_EXT) def configure(self, base_config_path, owner, group, codec, requires_root): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration and revision files. :type owner string :param group Group of the configuration and revision files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the strategy requires superuser privileges. :type requires_root boolean """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root self._base_revision_file = guestagent_utils.build_file_path( self._revision_dir, self.BASE_REVISION_NAME, self.REVISION_EXT) self._import_strategy.configure( base_config_path, owner, group, codec, requires_root) def exists(self, group_name, change_id): return self._import_strategy.exists(group_name, change_id) def apply(self, group_name, change_id, options): self._import_strategy.apply(group_name, change_id, options) self._regenerate_base_configuration() def remove(self, group_name, change_id=None): if self._import_strategy.has_revisions: self._import_strategy.remove(group_name, change_id=change_id) self._regenerate_base_configuration() if not self._import_strategy.has_revisions: # The base revision file is no longer needed if there are no # overrides. It will be regenerated based on the current # configuration file on the first 'apply()'. operating_system.remove(self._base_revision_file, force=True, as_root=self._requires_root) def get(self, group_name, change_id): return self._import_strategy.get(group_name, change_id) def _regenerate_base_configuration(self): """Gather all configuration changes and apply them in order on the base revision. Write the results to the configuration file. """ if not os.path.exists(self._base_revision_file): # Initialize the file with the current configuration contents if it # does not exist. operating_system.copy( self._base_config_path, self._base_revision_file, force=True, preserve=True, as_root=self._requires_root) base_revision = operating_system.read_file( self._base_revision_file, codec=self._codec, as_root=self._requires_root) changes = self._import_strategy.parse_updates() updated_revision = guestagent_utils.update_dict(changes, base_revision) operating_system.write_file( self._base_config_path, updated_revision, codec=self._codec, as_root=self._requires_root) trove-5.0.0/trove/guestagent/db/0000775000567000056710000000000012701410521017677 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/db/__init__.py0000664000567000056710000000000012701410316022000 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/db/models.py0000664000567000056710000010472212701410316021544 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import re import string import netaddr from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils CONF = cfg.CONF class Base(object): def serialize(self): return self.__dict__ def deserialize(self, o): self.__dict__ = o @classmethod def _validate_dict(cls, value): reqs = cls._dict_requirements() return (isinstance(value, dict) and all(key in value for key in reqs)) @classmethod @abc.abstractmethod def _dict_requirements(cls): """Get the dictionary requirements for a user created via deserialization. :returns: List of required dictionary keys. """ class DatastoreSchema(Base): """Represents a database schema.""" def __init__(self): self._name = None self._collate = None self._character_set = None @classmethod def deserialize_schema(cls, value): if not cls._validate_dict(value): raise ValueError(_("Bad dictionary. Keys: %(keys)s. " "Required: %(reqs)s") % ({'keys': value.keys(), 'reqs': cls._dict_requirements()})) schema = cls(deserializing=True) schema.deserialize(value) return schema @property def name(self): return self._name @name.setter def name(self, value): self._validate_schema_name(value) self._name = value @property def collate(self): return self._collate @property def character_set(self): return self._character_set def _validate_schema_name(self, value): """Perform validations on a given schema name. :param value: Validated schema name. :type value: string :raises: ValueError On validation errors. """ if self._max_schema_name_length and (len(value) > self._max_schema_name_length): raise ValueError(_("Schema name '%(name)s' is too long. " "Max length = %(max_length)d.") % {'name': value, 'max_length': self._max_schema_name_length}) elif not self._is_valid_schema_name(value): raise ValueError(_("'%s' is not a valid schema name.") % value) @abc.abstractproperty def _max_schema_name_length(self): """Return the maximum valid schema name length if any. :returns: Maximum schema name length or None if unlimited. """ @abc.abstractmethod def _is_valid_schema_name(self, value): """Validate a given schema name. :param value: Validated schema name. :type value: string :returns: TRUE if valid, FALSE otherwise. """ @classmethod @abc.abstractmethod def _dict_requirements(cls): """Get the dictionary requirements for a user created via deserialization. :returns: List of required dictionary keys. """ class MongoDBSchema(DatastoreSchema): """Represents the MongoDB schema and its associated properties. MongoDB database names are limited to 128 characters, alphanumeric and - and _ only. """ name_regex = re.compile(r'^[a-zA-Z0-9_\-]+$') def __init__(self, name=None, deserializing=False): super(MongoDBSchema, self).__init__() # need one or the other, not both, not none (!= ~ XOR) if not (bool(deserializing) != bool(name)): raise ValueError(_("Bad args. name: %(name)s, " "deserializing %(deser)s.") % ({'name': bool(name), 'deser': bool(deserializing)})) if not deserializing: self.name = name @property def _max_schema_name_length(self): return 64 def _is_valid_schema_name(self, value): # check against the invalid character set from # http://docs.mongodb.org/manual/reference/limits return not any(c in value for c in '/\. "$') @classmethod def _dict_requirements(cls): return ['_name'] class CassandraSchema(DatastoreSchema): """Represents a Cassandra schema and its associated properties. Keyspace names are 32 or fewer alpha-numeric characters and underscores, the first of which is an alpha character. """ def __init__(self, name=None, deserializing=False): super(CassandraSchema, self).__init__() if not (bool(deserializing) != bool(name)): raise ValueError(_("Bad args. name: %(name)s, " "deserializing %(deser)s.") % ({'name': bool(name), 'deser': bool(deserializing)})) if not deserializing: self.name = name @property def _max_schema_name_length(self): return 32 def _is_valid_schema_name(self, value): return True @classmethod def _dict_requirements(cls): return ['_name'] class CouchDBSchema(DatastoreSchema): '''Represents the CouchDB schema and its associated properties. The database name must consist of one or more of the following characters and the name must begin with a lowercase letter. - Lowercase characters (a-z) - Digits (0-9) - Any of the characters _, $, (, ), +, -, and / ''' name_regex = re.compile(r'^[a-z][a-z0-9_$()+/-]*$') def __init__(self, name=None, deserializing=False): super(CouchDBSchema, self).__init__() self._ignore_dbs = cfg.get_ignored_dbs() # need one or the other, not both, not none (!= ~ XOR) if not (bool(deserializing) != bool(name)): raise ValueError(_("Bad args. name: %(name)s, " "deserializing %(deser)s.") % ({'name': bool(name), 'deser': bool(deserializing)})) if not deserializing: self.name = name @property def _max_schema_name_length(self): return None def _is_valid_schema_name(self, value): # https://wiki.apache.org/couchdb/HTTP_database_API if value.lower() in self._ignore_dbs: return False if re.match(r'^[a-z]*$', value[0]): return True else: return False @classmethod def _dict_requirements(cls): return ['_name'] class MySQLDatabase(Base): """Represents a Database and its properties.""" # Defaults __charset__ = "utf8" __collation__ = "utf8_general_ci" dbname = re.compile("^[A-Za-z0-9_-]+[\s\?\#\@]*[A-Za-z0-9_-]+$") # Complete list of acceptable values charset = {"big5": ["big5_chinese_ci", "big5_bin"], "dec8": ["dec8_swedish_ci", "dec8_bin"], "cp850": ["cp850_general_ci", "cp850_bin"], "hp8": ["hp8_english_ci", "hp8_bin"], "koi8r": ["koi8r_general_ci", "koi8r_bin"], "latin1": ["latin1_swedish_ci", "latin1_german1_ci", "latin1_danish_ci", "latin1_german2_ci", "latin1_bin", "latin1_general_ci", "latin1_general_cs", "latin1_spanish_ci"], "latin2": ["latin2_general_ci", "latin2_czech_cs", "latin2_hungarian_ci", "latin2_croatian_ci", "latin2_bin"], "swe7": ["swe7_swedish_ci", "swe7_bin"], "ascii": ["ascii_general_ci", "ascii_bin"], "ujis": ["ujis_japanese_ci", "ujis_bin"], "sjis": ["sjis_japanese_ci", "sjis_bin"], "hebrew": ["hebrew_general_ci", "hebrew_bin"], "tis620": ["tis620_thai_ci", "tis620_bin"], "euckr": ["euckr_korean_ci", "euckr_bin"], "koi8u": ["koi8u_general_ci", "koi8u_bin"], "gb2312": ["gb2312_chinese_ci", "gb2312_bin"], "greek": ["greek_general_ci", "greek_bin"], "cp1250": ["cp1250_general_ci", "cp1250_czech_cs", "cp1250_croatian_ci", "cp1250_bin", "cp1250_polish_ci"], "gbk": ["gbk_chinese_ci", "gbk_bin"], "latin5": ["latin5_turkish_ci", "latin5_bin"], "armscii8": ["armscii8_general_ci", "armscii8_bin"], "utf8": ["utf8_general_ci", "utf8_bin", "utf8_unicode_ci", "utf8_icelandic_ci", "utf8_latvian_ci", "utf8_romanian_ci", "utf8_slovenian_ci", "utf8_polish_ci", "utf8_estonian_ci", "utf8_spanish_ci", "utf8_swedish_ci", "utf8_turkish_ci", "utf8_czech_ci", "utf8_danish_ci", "utf8_lithuanian_ci", "utf8_slovak_ci", "utf8_spanish2_ci", "utf8_roman_ci", "utf8_persian_ci", "utf8_esperanto_ci", "utf8_hungarian_ci"], "ucs2": ["ucs2_general_ci", "ucs2_bin", "ucs2_unicode_ci", "ucs2_icelandic_ci", "ucs2_latvian_ci", "ucs2_romanian_ci", "ucs2_slovenian_ci", "ucs2_polish_ci", "ucs2_estonian_ci", "ucs2_spanish_ci", "ucs2_swedish_ci", "ucs2_turkish_ci", "ucs2_czech_ci", "ucs2_danish_ci", "ucs2_lithuanian_ci", "ucs2_slovak_ci", "ucs2_spanish2_ci", "ucs2_roman_ci", "ucs2_persian_ci", "ucs2_esperanto_ci", "ucs2_hungarian_ci"], "cp866": ["cp866_general_ci", "cp866_bin"], "keybcs2": ["keybcs2_general_ci", "keybcs2_bin"], "macce": ["macce_general_ci", "macce_bin"], "macroman": ["macroman_general_ci", "macroman_bin"], "cp852": ["cp852_general_ci", "cp852_bin"], "latin7": ["latin7_general_ci", "latin7_estonian_cs", "latin7_general_cs", "latin7_bin"], "cp1251": ["cp1251_general_ci", "cp1251_bulgarian_ci", "cp1251_ukrainian_ci", "cp1251_bin", "cp1251_general_cs"], "cp1256": ["cp1256_general_ci", "cp1256_bin"], "cp1257": ["cp1257_general_ci", "cp1257_lithuanian_ci", "cp1257_bin"], "binary": ["binary"], "geostd8": ["geostd8_general_ci", "geostd8_bin"], "cp932": ["cp932_japanese_ci", "cp932_bin"], "eucjpms": ["eucjpms_japanese_ci", "eucjpms_bin"]} collation = {"big5_chinese_ci": "big5", "big5_bin": "big5", "dec8_swedish_ci": "dec8", "dec8_bin": "dec8", "cp850_general_ci": "cp850", "cp850_bin": "cp850", "hp8_english_ci": "hp8", "hp8_bin": "hp8", "koi8r_general_ci": "koi8r", "koi8r_bin": "koi8r", "latin1_german1_ci": "latin1", "latin1_swedish_ci": "latin1", "latin1_danish_ci": "latin1", "latin1_german2_ci": "latin1", "latin1_bin": "latin1", "latin1_general_ci": "latin1", "latin1_general_cs": "latin1", "latin1_spanish_ci": "latin1", "latin2_czech_cs": "latin2", "latin2_general_ci": "latin2", "latin2_hungarian_ci": "latin2", "latin2_croatian_ci": "latin2", "latin2_bin": "latin2", "swe7_swedish_ci": "swe7", "swe7_bin": "swe7", "ascii_general_ci": "ascii", "ascii_bin": "ascii", "ujis_japanese_ci": "ujis", "ujis_bin": "ujis", "sjis_japanese_ci": "sjis", "sjis_bin": "sjis", "hebrew_general_ci": "hebrew", "hebrew_bin": "hebrew", "tis620_thai_ci": "tis620", "tis620_bin": "tis620", "euckr_korean_ci": "euckr", "euckr_bin": "euckr", "koi8u_general_ci": "koi8u", "koi8u_bin": "koi8u", "gb2312_chinese_ci": "gb2312", "gb2312_bin": "gb2312", "greek_general_ci": "greek", "greek_bin": "greek", "cp1250_general_ci": "cp1250", "cp1250_czech_cs": "cp1250", "cp1250_croatian_ci": "cp1250", "cp1250_bin": "cp1250", "cp1250_polish_ci": "cp1250", "gbk_chinese_ci": "gbk", "gbk_bin": "gbk", "latin5_turkish_ci": "latin5", "latin5_bin": "latin5", "armscii8_general_ci": "armscii8", "armscii8_bin": "armscii8", "utf8_general_ci": "utf8", "utf8_bin": "utf8", "utf8_unicode_ci": "utf8", "utf8_icelandic_ci": "utf8", "utf8_latvian_ci": "utf8", "utf8_romanian_ci": "utf8", "utf8_slovenian_ci": "utf8", "utf8_polish_ci": "utf8", "utf8_estonian_ci": "utf8", "utf8_spanish_ci": "utf8", "utf8_swedish_ci": "utf8", "utf8_turkish_ci": "utf8", "utf8_czech_ci": "utf8", "utf8_danish_ci": "utf8", "utf8_lithuanian_ci": "utf8", "utf8_slovak_ci": "utf8", "utf8_spanish2_ci": "utf8", "utf8_roman_ci": "utf8", "utf8_persian_ci": "utf8", "utf8_esperanto_ci": "utf8", "utf8_hungarian_ci": "utf8", "ucs2_general_ci": "ucs2", "ucs2_bin": "ucs2", "ucs2_unicode_ci": "ucs2", "ucs2_icelandic_ci": "ucs2", "ucs2_latvian_ci": "ucs2", "ucs2_romanian_ci": "ucs2", "ucs2_slovenian_ci": "ucs2", "ucs2_polish_ci": "ucs2", "ucs2_estonian_ci": "ucs2", "ucs2_spanish_ci": "ucs2", "ucs2_swedish_ci": "ucs2", "ucs2_turkish_ci": "ucs2", "ucs2_czech_ci": "ucs2", "ucs2_danish_ci": "ucs2", "ucs2_lithuanian_ci": "ucs2", "ucs2_slovak_ci": "ucs2", "ucs2_spanish2_ci": "ucs2", "ucs2_roman_ci": "ucs2", "ucs2_persian_ci": "ucs2", "ucs2_esperanto_ci": "ucs2", "ucs2_hungarian_ci": "ucs2", "cp866_general_ci": "cp866", "cp866_bin": "cp866", "keybcs2_general_ci": "keybcs2", "keybcs2_bin": "keybcs2", "macce_general_ci": "macce", "macce_bin": "macce", "macroman_general_ci": "macroman", "macroman_bin": "macroman", "cp852_general_ci": "cp852", "cp852_bin": "cp852", "latin7_estonian_cs": "latin7", "latin7_general_ci": "latin7", "latin7_general_cs": "latin7", "latin7_bin": "latin7", "cp1251_bulgarian_ci": "cp1251", "cp1251_ukrainian_ci": "cp1251", "cp1251_bin": "cp1251", "cp1251_general_ci": "cp1251", "cp1251_general_cs": "cp1251", "cp1256_general_ci": "cp1256", "cp1256_bin": "cp1256", "cp1257_lithuanian_ci": "cp1257", "cp1257_bin": "cp1257", "cp1257_general_ci": "cp1257", "binary": "binary", "geostd8_general_ci": "geostd8", "geostd8_bin": "geostd8", "cp932_japanese_ci": "cp932", "cp932_bin": "cp932", "eucjpms_japanese_ci": "eucjpms", "eucjpms_bin": "eucjpms"} def __init__(self): self._name = None self._collate = None self._character_set = None self._ignore_dbs = cfg.get_ignored_dbs() @property def name(self): return self._name def _is_valid(self, value): return value.lower() not in self._ignore_dbs @name.setter def name(self, value): self._name = value @property def collate(self): """Get the appropriate collate value.""" if not self._collate and not self._character_set: return self.__collation__ elif not self._collate: return self.charset[self._character_set][0] else: return self._collate @collate.setter def collate(self, value): """Validate the collation and set it.""" if not value: pass elif self._character_set: if value not in self.charset[self._character_set]: msg = (_("%(val)s not a valid collation for charset %(char)s.") % {'val': value, 'char': self._character_set}) raise ValueError(msg) self._collate = value else: if value not in self.collation: raise ValueError(_("'%s' not a valid collation.") % value) self._collate = value self._character_set = self.collation[value] @property def character_set(self): """Get the appropriate character set value.""" if not self._character_set: return self.__charset__ else: return self._character_set @character_set.setter def character_set(self, value): """Validate the character set and set it.""" if not value: pass elif value not in self.charset: raise ValueError(_("'%s' not a valid character set.") % value) else: self._character_set = value class ValidatedMySQLDatabase(MySQLDatabase): @MySQLDatabase.name.setter def name(self, value): if any([not value, not self._is_valid(value), not self.dbname.match(value), string.find("%r" % value, "\\") != -1]): raise ValueError(_("'%s' is not a valid database name.") % value) elif len(value) > 64: msg = _("Database name '%s' is too long. Max length = 64.") raise ValueError(msg % value) else: self._name = value class DatastoreUser(Base): """Represents a datastore user.""" _HOSTNAME_WILDCARD = '%' def __init__(self): self._name = None self._password = None self._host = None self._databases = [] @classmethod def deserialize_user(cls, value): if not cls._validate_dict(value): raise ValueError(_("Bad dictionary. Keys: %(keys)s. " "Required: %(reqs)s") % ({'keys': value.keys(), 'reqs': cls._dict_requirements()})) user = cls(deserializing=True) user.deserialize(value) return user @property def name(self): return self._name @name.setter def name(self, value): self._validate_user_name(value) self._name = value @property def password(self): return self._password @password.setter def password(self, value): if self._is_valid_password(value): self._password = value else: raise ValueError(_("'%s' is not a valid password.") % value) @property def databases(self): return self._databases @databases.setter def databases(self, value): mydb = self._build_database_schema(value) self._databases.append(mydb.serialize()) @property def host(self): if self._host is None: return self._HOSTNAME_WILDCARD return self._host @host.setter def host(self, value): if self._is_valid_host_name(value): self._host = value else: raise ValueError(_("'%s' is not a valid hostname.") % value) @abc.abstractmethod def _build_database_schema(self, name): """Build a schema for this user. :type name: string :type character_set: string :type collate: string """ def _validate_user_name(self, value): """Perform validations on a given user name. :param value: Validated user name. :type value: string :raises: ValueError On validation errors. """ if self._max_username_length and (len(value) > self._max_username_length): raise ValueError(_("User name '%(name)s' is too long. " "Max length = %(max_length)d.") % {'name': value, 'max_length': self._max_username_length}) elif not self._is_valid_name(value): raise ValueError(_("'%s' is not a valid user name.") % value) @abc.abstractproperty def _max_username_length(self): """Return the maximum valid user name length if any. :returns: Maximum user name length or None if unlimited. """ @abc.abstractmethod def _is_valid_name(self, value): """Validate a given user name. :param value: User name to be validated. :type value: string :returns: TRUE if valid, FALSE otherwise. """ @abc.abstractmethod def _is_valid_host_name(self, value): """Validate a given host name. :param value: Host name to be validated. :type value: string :returns: TRUE if valid, FALSE otherwise. """ @abc.abstractmethod def _is_valid_password(self, value): """Validate a given password. :param value: Password to be validated. :type value: string :returns: TRUE if valid, FALSE otherwise. """ @classmethod @abc.abstractmethod def _dict_requirements(cls): """Get the dictionary requirements for a user created via deserialization. :returns: List of required dictionary keys. """ class MongoDBUser(DatastoreUser): """Represents a MongoDB user and its associated properties. MongoDB users are identified using their namd and database. Trove stores this as . """ def __init__(self, name=None, password=None, deserializing=False): super(MongoDBUser, self).__init__() self._name = None self._username = None self._database = None self._roles = [] # need only one of: deserializing, name, or (name and password) if ((not (bool(deserializing) != bool(name))) or (bool(deserializing) and bool(password))): raise ValueError(_("Bad args. name: %(name)s, " "password %(pass)s, " "deserializing %(deser)s.") % ({'name': bool(name), 'pass': bool(password), 'deser': bool(deserializing)})) if not deserializing: self.name = name self.password = password @property def username(self): return self._username @username.setter def username(self, value): self._update_name(username=value) @property def database(self): return MongoDBSchema.deserialize_schema(self._database) @database.setter def database(self, value): self._update_name(database=value) @property def name(self): return self._name @name.setter def name(self, value): self._update_name(name=value) def _update_name(self, name=None, username=None, database=None): """Keep the name, username, and database values in sync.""" if name: (database, username) = self._parse_name(name) if not (database and username): missing = 'username' if self.database else 'database' raise ValueError(_("MongoDB user's name missing %s.") % missing) else: if username: if not self.database: raise ValueError(_('MongoDB user missing database.')) database = self.database.name else: # database if not self.username: raise ValueError(_('MongoDB user missing username.')) username = self.username name = '%s.%s' % (database, username) self._name = name self._username = username self._database = self._build_database_schema(database).serialize() @property def roles(self): return self._roles @roles.setter def roles(self, value): if isinstance(value, list): for role in value: self._add_role(role) else: self._add_role(value) def revoke_role(self, role): if role in self.roles: self._roles.remove(role) def _init_roles(self): if '_roles' not in self.__dict__: self._roles = [] for db in self._databases: self._roles.append({'db': db['_name'], 'role': 'readWrite'}) @classmethod def deserialize_user(cls, value): user = super(MongoDBUser, cls).deserialize_user(value) user.name = user._name user._init_roles() return user def _build_database_schema(self, name): return MongoDBSchema(name) @staticmethod def _parse_name(value): """The name will be ., so split it.""" parts = value.split('.', 1) if len(parts) != 2: raise exception.BadRequest(_( 'MongoDB user name "%s" not in . format.' ) % value) return parts[0], parts[1] @property def _max_username_length(self): return None def _is_valid_name(self, value): return True def _is_valid_host_name(self, value): return True def _is_valid_password(self, value): return True def _add_role(self, value): if not self._is_valid_role(value): raise ValueError(_('Role %s is invalid.') % value) self._roles.append(value) if value['role'] == 'readWrite': self.databases = value['db'] def _is_valid_role(self, value): if not isinstance(value, dict): return False if not {'db', 'role'} == set(value): return False return True @classmethod def _dict_requirements(cls): return ['_name'] class CassandraUser(DatastoreUser): """Represents a Cassandra user and its associated properties.""" def __init__(self, name=None, password=None, deserializing=False): super(CassandraUser, self).__init__() if ((not (bool(deserializing) != bool(name))) or (bool(deserializing) and bool(password))): raise ValueError(_("Bad args. name: %(name)s, " "password %(pass)s, " "deserializing %(deser)s.") % ({'name': bool(name), 'pass': bool(password), 'deser': bool(deserializing)})) if not deserializing: self.name = name self.password = password def _build_database_schema(self, name): return CassandraSchema(name) @property def _max_username_length(self): return 65535 def _is_valid_name(self, value): return True def _is_valid_host_name(self, value): return True def _is_valid_password(self, value): return True @classmethod def _dict_requirements(cls): return ['_name'] class CouchDBUser(DatastoreUser): """Represents a CouchDB user and its associated properties.""" def __init__(self): self._name = None self._host = None self._password = None self._databases = [] self._ignore_users = cfg.get_ignored_users() def _is_valid(self, value): return True @property def name(self): return self._name @name.setter def name(self, value): if not self._is_valid(value): raise ValueError(_("'%s' is not a valid user name.") % value) else: self._name = value @property def password(self): return self._password @password.setter def password(self, value): if not self._is_valid(value): raise ValueError(_("'%s' is not a valid password.") % value) else: self._password = value @property def databases(self): return self._databases @databases.setter def databases(self, value): mydb = ValidatedMySQLDatabase() mydb.name = value self._databases.append(mydb.serialize()) @property def host(self): if self._host is None: return '%' return self._host @host.setter def host(self, value): if not self._is_valid_host_name(value): raise ValueError(_("'%s' is not a valid hostname.") % value) else: self._host = value class MySQLUser(Base): """Represents a MySQL User and its associated properties.""" not_supported_chars = re.compile("^\s|\s$|'|\"|;|`|,|/|\\\\") def __init__(self): self._name = None self._host = None self._password = None self._databases = [] self._ignore_users = cfg.get_ignored_users() def _is_valid(self, value): if (not value or self.not_supported_chars.search(value) or string.find("%r" % value, "\\") != -1): return False else: return True def _is_valid_user_name(self, value): if (self._is_valid(value) and value.lower() not in self._ignore_users): return True return False def _is_valid_host_name(self, value): if value in [None, "%"]: # % is MySQL shorthand for "everywhere". Always permitted. # Null host defaults to % anyway. return True if CONF.hostname_require_valid_ip: try: # '%' works as a MySQL wildcard, but it is not a valid # part of an IPAddress netaddr.IPAddress(value.replace('%', '1')) except (ValueError, netaddr.AddrFormatError): return False else: return True else: # If it wasn't required, anything else goes. return True @property def name(self): return self._name @name.setter def name(self, value): if not self._is_valid_user_name(value): raise ValueError(_("'%s' is not a valid user name.") % value) elif len(value) > 16: raise ValueError(_("User name '%s' is too long. Max length = 16.") % value) else: self._name = value @property def password(self): return self._password @password.setter def password(self, value): if not self._is_valid(value): raise ValueError(_("'%s' is not a valid password.") % value) else: self._password = value @property def databases(self): return self._databases @databases.setter def databases(self, value): mydb = ValidatedMySQLDatabase() mydb.name = value self._databases.append(mydb.serialize()) @property def host(self): if self._host is None: return '%' return self._host @host.setter def host(self, value): if not self._is_valid_host_name(value): raise ValueError(_("'%s' is not a valid hostname.") % value) else: self._host = value class RootUser(MySQLUser): """Overrides _ignore_users from the MySQLUser class.""" def __init__(self): self._ignore_users = [] class MySQLRootUser(RootUser): """Represents the MySQL root user.""" def __init__(self, password=None): super(MySQLRootUser, self).__init__() self._name = "root" self._host = "%" if password is None: self._password = utils.generate_random_password() else: self._password = password class CassandraRootUser(CassandraUser): """Represents the Cassandra default superuser.""" def __init__(self, password=None, *args, **kwargs): if password is None: password = utils.generate_random_password() super(CassandraRootUser, self).__init__("cassandra", password=password, *args, **kwargs) trove-5.0.0/trove/guestagent/datastore/0000775000567000056710000000000012701410521021300 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/__init__.py0000664000567000056710000000000012701410316023401 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/mysql_common/0000775000567000056710000000000012701410521024015 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/mysql_common/__init__.py0000664000567000056710000000000012701410316026116 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/mysql_common/manager.py0000664000567000056710000003652312701410316026014 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from oslo_log import log as logging from trove.common import cfg from trove.common import configurations from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.common import operating_system from trove.guestagent.datastore import manager from trove.guestagent.datastore.mysql_common import service from trove.guestagent import guest_log from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF class MySqlManager(manager.Manager): def __init__(self, mysql_app, mysql_app_status, mysql_admin, manager_name='mysql'): super(MySqlManager, self).__init__(manager_name) self._mysql_app = mysql_app self._mysql_app_status = mysql_app_status self._mysql_admin = mysql_admin self.volume_do_not_start_on_reboot = False @property def mysql_app(self): return self._mysql_app @property def mysql_app_status(self): return self._mysql_app_status @property def mysql_admin(self): return self._mysql_admin @property def status(self): return self.mysql_app_status.get() @property def configuration_manager(self): return self.mysql_app( self.mysql_app_status.get()).configuration_manager @property def datastore_log_defs(self): owner = 'mysql' datastore_dir = self.mysql_app.get_data_dir() server_section = configurations.MySQLConfParser.SERVER_CONF_SECTION long_query_time = CONF.get(self.manager).get( 'guest_log_long_query_time') / 1000 general_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_GENERAL_LABEL, owner, datastore_dir=datastore_dir) error_log_file = self.validate_log_file('/var/log/mysqld.log', owner) slow_query_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_SLOW_QUERY_LABEL, owner, datastore_dir=datastore_dir) return { self.GUEST_LOG_DEFS_GENERAL_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: general_log_file, self.GUEST_LOG_SECTION_LABEL: server_section, self.GUEST_LOG_ENABLE_LABEL: { 'general_log': 'on', 'general_log_file': general_log_file, 'log_output': 'file', }, self.GUEST_LOG_DISABLE_LABEL: { 'general_log': 'off', }, }, self.GUEST_LOG_DEFS_SLOW_QUERY_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: slow_query_log_file, self.GUEST_LOG_SECTION_LABEL: server_section, self.GUEST_LOG_ENABLE_LABEL: { 'slow_query_log': 'on', 'slow_query_log_file': slow_query_log_file, 'long_query_time': long_query_time, }, self.GUEST_LOG_DISABLE_LABEL: { 'slow_query_log': 'off', }, }, self.GUEST_LOG_DEFS_ERROR_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.SYS, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: error_log_file, }, } def change_passwords(self, context, users): with EndNotification(context): self.mysql_admin().change_passwords(users) def update_attributes(self, context, username, hostname, user_attrs): with EndNotification(context): self.mysql_admin().update_attributes( username, hostname, user_attrs) def reset_configuration(self, context, configuration): app = self.mysql_app(self.mysql_app_status.get()) app.reset_configuration(configuration) def create_database(self, context, databases): with EndNotification(context): return self.mysql_admin().create_database(databases) def create_user(self, context, users): with EndNotification(context): self.mysql_admin().create_user(users) def delete_database(self, context, database): with EndNotification(context): return self.mysql_admin().delete_database(database) def delete_user(self, context, user): with EndNotification(context): self.mysql_admin().delete_user(user) def get_user(self, context, username, hostname): return self.mysql_admin().get_user(username, hostname) def grant_access(self, context, username, hostname, databases): return self.mysql_admin().grant_access(username, hostname, databases) def revoke_access(self, context, username, hostname, database): return self.mysql_admin().revoke_access(username, hostname, database) def list_access(self, context, username, hostname): return self.mysql_admin().list_access(username, hostname) def list_databases(self, context, limit=None, marker=None, include_marker=False): return self.mysql_admin().list_databases(limit, marker, include_marker) def list_users(self, context, limit=None, marker=None, include_marker=False): return self.mysql_admin().list_users(limit, marker, include_marker) def enable_root(self, context): return self.mysql_admin().enable_root() def enable_root_with_password(self, context, root_password=None): return self.mysql_admin().enable_root(root_password) def is_root_enabled(self, context): return self.mysql_admin().is_root_enabled() def disable_root(self, context): return self.mysql_admin().disable_root() def _perform_restore(self, backup_info, context, restore_location, app): LOG.info(_("Restoring database from backup %s.") % backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception(_("Error performing restore from backup %s.") % backup_info['id']) app.status.set_status(rd_instance.ServiceStatuses.FAILED) raise LOG.info(_("Restored database successfully.")) def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" app = self.mysql_app(self.mysql_app_status.get()) app.install_if_needed(packages) if device_path: # stop and do not update database app.stop_db( do_not_start_on_reboot=self.volume_do_not_start_on_reboot) device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync existing data to a "data" sub-directory # on the new volume device.migrate_data(mount_point, target_subdir="data") # mount the volume device.mount(mount_point) operating_system.chown(mount_point, service.MYSQL_OWNER, service.MYSQL_OWNER, recursive=False, as_root=True) LOG.debug("Mounted the volume at %s." % mount_point) # We need to temporarily update the default my.cnf so that # mysql will start after the volume is mounted. Later on it # will be changed based on the config template # (see MySqlApp.secure()) and restart. app.set_data_dir(mount_point + '/data') app.start_mysql() if backup_info: self._perform_restore(backup_info, context, mount_point + "/data", app) LOG.debug("Securing MySQL now.") app.secure(config_contents) enable_root_on_restore = (backup_info and self.mysql_admin().is_root_enabled()) if enable_root_on_restore: app.secure_root(secure_remote_root=False) self.mysql_app_status.get().report_root(context, 'root') else: app.secure_root(secure_remote_root=True) if snapshot: self.attach_replica(context, snapshot, snapshot['config']) def restart(self, context): app = self.mysql_app(self.mysql_app_status.get()) app.restart() def start_db_with_conf_changes(self, context, config_contents): app = self.mysql_app(self.mysql_app_status.get()) app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): app = self.mysql_app(self.mysql_app_status.get()) app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def create_backup(self, context, backup_info): """ Entry point for initiating a backup for this guest agents db instance. The call currently blocks until the backup is complete or errors. If device_path is specified, it will be mounted based to a point specified in configuration. :param backup_info: a dictionary containing the db instance id of the backup task, location, type, and other data. """ with EndNotification(context): backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): app = self.mysql_app(self.mysql_app_status.get()) if remove: app.remove_overrides() app.update_overrides(overrides) def apply_overrides(self, context, overrides): LOG.debug("Applying overrides (%s)." % overrides) app = self.mysql_app(self.mysql_app_status.get()) app.apply_overrides(overrides) def backup_required_for_replication(self, context): return self.replication.backup_required_for_replication() def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") app = self.mysql_app(self.mysql_app_status.get()) self.replication.enable_as_master(app, replica_source_config) snapshot_id, log_position = self.replication.snapshot_for_replication( context, app, None, snapshot_info) volume_stats = self.get_filesystem_stats(context, None) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': self.replication.get_master_ref(app, snapshot_info), 'log_position': log_position } return replication_snapshot def enable_as_master(self, context, replica_source_config): LOG.debug("Calling enable_as_master.") app = self.mysql_app(self.mysql_app_status.get()) self.replication.enable_as_master(app, replica_source_config) # DEPRECATED: Maintain for API Compatibility def get_txn_count(self, context): LOG.debug("Calling get_txn_count") return self.mysql_app(self.mysql_app_status.get()).get_txn_count() def get_last_txn(self, context): LOG.debug("Calling get_last_txn") return self.mysql_app(self.mysql_app_status.get()).get_last_txn() def get_latest_txn_id(self, context): LOG.debug("Calling get_latest_txn_id.") return self.mysql_app(self.mysql_app_status.get()).get_latest_txn_id() def wait_for_txn(self, context, txn): LOG.debug("Calling wait_for_txn.") self.mysql_app(self.mysql_app_status.get()).wait_for_txn(txn) def detach_replica(self, context, for_failover=False): LOG.debug("Detaching replica.") app = self.mysql_app(self.mysql_app_status.get()) replica_info = self.replication.detach_slave(app, for_failover) return replica_info def get_replica_context(self, context): LOG.debug("Getting replica context.") app = self.mysql_app(self.mysql_app_status.get()) replica_info = self.replication.get_replica_context(app) return replica_info def _validate_slave_for_replication(self, context, replica_info): if replica_info['replication_strategy'] != self.replication_strategy: raise exception.IncompatibleReplicationStrategy( replica_info.update({ 'guest_strategy': self.replication_strategy })) volume_stats = self.get_filesystem_stats(context, None) if (volume_stats.get('total', 0.0) < replica_info['dataset']['dataset_size']): raise exception.InsufficientSpaceForReplica( replica_info.update({ 'slave_volume_size': volume_stats.get('total', 0.0) })) def attach_replica(self, context, replica_info, slave_config): LOG.debug("Attaching replica.") app = self.mysql_app(self.mysql_app_status.get()) try: if 'replication_strategy' in replica_info: self._validate_slave_for_replication(context, replica_info) self.replication.enable_as_slave(app, replica_info, slave_config) except Exception: LOG.exception("Error enabling replication.") app.status.set_status(rd_instance.ServiceStatuses.FAILED) raise def make_read_only(self, context, read_only): LOG.debug("Executing make_read_only(%s)" % read_only) app = self.mysql_app(self.mysql_app_status.get()) app.make_read_only(read_only) def cleanup_source_on_replica_detach(self, context, replica_info): LOG.debug("Cleaning up the source on the detach of a replica.") self.replication.cleanup_source_on_replica_detach(self.mysql_admin(), replica_info) def demote_replication_master(self, context): LOG.debug("Demoting replication master.") app = self.mysql_app(self.mysql_app_status.get()) self.replication.demote_master(app) trove-5.0.0/trove/guestagent/datastore/mysql_common/service.py0000664000567000056710000012460612701410316026042 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from collections import defaultdict import os import re import six import uuid from oslo_log import log as logging import sqlalchemy from sqlalchemy import exc from sqlalchemy import interfaces from sqlalchemy.sql.expression import text from trove.common import cfg from trove.common.configurations import MySQLConfParser from trove.common import exception from trove.common.exception import PollTimeOut from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.stream_codecs import IniCodec from trove.common import utils as utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common import sql_query from trove.guestagent.datastore import service from trove.guestagent.db import models from trove.guestagent import pkg ADMIN_USER_NAME = "os_admin" LOG = logging.getLogger(__name__) FLUSH = text(sql_query.FLUSH) ENGINE = None DATADIR = None PREPARING = False UUID = False TMP_MYCNF = "/tmp/my.cnf.tmp" MYSQL_BASE_DIR = "/var/lib/mysql" CONF = cfg.CONF INCLUDE_MARKER_OPERATORS = { True: ">=", False: ">" } OS_NAME = operating_system.get_os() MYSQL_CONFIG = {operating_system.REDHAT: "/etc/my.cnf", operating_system.DEBIAN: "/etc/mysql/my.cnf", operating_system.SUSE: "/etc/my.cnf"}[OS_NAME] MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"] MYSQL_OWNER = 'mysql' CNF_EXT = 'cnf' CNF_INCLUDE_DIR = '/etc/mysql/conf.d/' CNF_MASTER = 'master-replication' CNF_SLAVE = 'slave-replication' # Create a package impl packager = pkg.Package() def clear_expired_password(): """ Some mysql installations generate random root password and save it in /root/.mysql_secret, this password is expired and should be changed by client that supports expired passwords. """ LOG.debug("Removing expired password.") secret_file = "/root/.mysql_secret" try: out, err = utils.execute("cat", secret_file, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: LOG.exception(_("/root/.mysql_secret does not exist.")) return m = re.match('# The random password set for the root user at .*: (.*)', out) if m: try: out, err = utils.execute("mysqladmin", "-p%s" % m.group(1), "password", "", run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: LOG.exception(_("Cannot change mysql password.")) return operating_system.remove(secret_file, force=True, as_root=True) LOG.debug("Expired password removed.") def load_mysqld_options(): # find mysqld bin for bin in MYSQL_BIN_CANDIDATES: if os.path.isfile(bin): mysqld_bin = bin break else: return {} try: out, err = utils.execute(mysqld_bin, "--print-defaults", run_as_root=True, root_helper="sudo") arglist = re.split("\n", out)[1].split() args = defaultdict(list) for item in arglist: if "=" in item: key, value = item.split("=", 1) args[key.lstrip("--")].append(value) else: args[item.lstrip("--")].append(None) return args except exception.ProcessExecutionError: return {} class BaseMySqlAppStatus(service.BaseDbStatus): @classmethod def get(cls): if not cls._instance: cls._instance = BaseMySqlAppStatus() return cls._instance def _get_actual_db_status(self): try: out, err = utils.execute_with_timeout( "/usr/bin/mysqladmin", "ping", run_as_root=True, root_helper="sudo", log_output_on_error=True) LOG.info(_("MySQL Service Status is RUNNING.")) return rd_instance.ServiceStatuses.RUNNING except exception.ProcessExecutionError: LOG.exception(_("Failed to get database status.")) try: out, err = utils.execute_with_timeout("/bin/ps", "-C", "mysqld", "h") pid = out.split()[0] # TODO(rnirmal): Need to create new statuses for instances # where the mysql service is up, but unresponsive LOG.info(_('MySQL Service Status %(pid)s is BLOCKED.') % {'pid': pid}) return rd_instance.ServiceStatuses.BLOCKED except exception.ProcessExecutionError: LOG.exception(_("Process execution failed.")) mysql_args = load_mysqld_options() pid_file = mysql_args.get('pid_file', ['/var/run/mysqld/mysqld.pid'])[0] if os.path.exists(pid_file): LOG.info(_("MySQL Service Status is CRASHED.")) return rd_instance.ServiceStatuses.CRASHED else: LOG.info(_("MySQL Service Status is SHUTDOWN.")) return rd_instance.ServiceStatuses.SHUTDOWN class BaseLocalSqlClient(object): """A sqlalchemy wrapper to manage transactions.""" def __init__(self, engine, use_flush=True): self.engine = engine self.use_flush = use_flush def __enter__(self): self.conn = self.engine.connect() self.trans = self.conn.begin() return self.conn def __exit__(self, type, value, traceback): if self.trans: if type is not None: # An error occurred self.trans.rollback() else: if self.use_flush: self.conn.execute(FLUSH) self.trans.commit() self.conn.close() def execute(self, t, **kwargs): try: return self.conn.execute(t, kwargs) except Exception: self.trans.rollback() self.trans = None raise @six.add_metaclass(abc.ABCMeta) class BaseMySqlAdmin(object): """Handles administrative tasks on the MySQL database.""" def __init__(self, local_sql_client, mysql_root_access, mysql_app): self._local_sql_client = local_sql_client self._mysql_root_access = mysql_root_access self._mysql_app = mysql_app(local_sql_client) @property def local_sql_client(self): return self._local_sql_client @property def mysql_root_access(self): return self._mysql_root_access @property def mysql_app(self): return self._mysql_app def _associate_dbs(self, user): """Internal. Given a MySQLUser, populate its databases attribute.""" LOG.debug("Associating dbs to user %s at %s." % (user.name, user.host)) with self.local_sql_client(self.mysql_app.get_engine()) as client: q = sql_query.Query() q.columns = ["grantee", "table_schema"] q.tables = ["information_schema.SCHEMA_PRIVILEGES"] q.group = ["grantee", "table_schema"] q.where = ["privilege_type != 'USAGE'"] t = text(str(q)) db_result = client.execute(t) for db in db_result: LOG.debug("\t db: %s." % db) if db['grantee'] == "'%s'@'%s'" % (user.name, user.host): mysql_db = models.MySQLDatabase() mysql_db.name = db['table_schema'] user.databases.append(mysql_db.serialize()) def change_passwords(self, users): """Change the passwords of one or more existing users.""" LOG.debug("Changing the password of some users.") with self.local_sql_client(self.mysql_app.get_engine()) as client: for item in users: LOG.debug("Changing password for user %s." % item) user_dict = {'_name': item['name'], '_host': item['host'], '_password': item['password']} user = models.MySQLUser() user.deserialize(user_dict) LOG.debug("\tDeserialized: %s." % user.__dict__) uu = sql_query.UpdateUser(user.name, host=user.host, clear=user.password) t = text(str(uu)) client.execute(t) def update_attributes(self, username, hostname, user_attrs): """Change the attributes of an existing user.""" LOG.debug("Changing user attributes for user %s." % username) user = self._get_user(username, hostname) db_access = set() grantee = set() with self.local_sql_client(self.mysql_app.get_engine()) as client: q = sql_query.Query() q.columns = ["grantee", "table_schema"] q.tables = ["information_schema.SCHEMA_PRIVILEGES"] q.group = ["grantee", "table_schema"] q.where = ["privilege_type != 'USAGE'"] t = text(str(q)) db_result = client.execute(t) for db in db_result: grantee.add(db['grantee']) if db['grantee'] == "'%s'@'%s'" % (user.name, user.host): db_name = db['table_schema'] db_access.add(db_name) with self.local_sql_client(self.mysql_app.get_engine()) as client: uu = sql_query.UpdateUser(user.name, host=user.host, clear=user_attrs.get('password'), new_user=user_attrs.get('name'), new_host=user_attrs.get('host')) t = text(str(uu)) client.execute(t) uname = user_attrs.get('name') or username host = user_attrs.get('host') or hostname find_user = "'%s'@'%s'" % (uname, host) if find_user not in grantee: self.grant_access(uname, host, db_access) def create_database(self, databases): """Create the list of specified databases.""" with self.local_sql_client(self.mysql_app.get_engine()) as client: for item in databases: mydb = models.ValidatedMySQLDatabase() mydb.deserialize(item) cd = sql_query.CreateDatabase(mydb.name, mydb.character_set, mydb.collate) t = text(str(cd)) client.execute(t) def create_user(self, users): """Create users and grant them privileges for the specified databases. """ with self.local_sql_client(self.mysql_app.get_engine()) as client: for item in users: user = models.MySQLUser() user.deserialize(item) # TODO(cp16net):Should users be allowed to create users # 'os_admin' or 'debian-sys-maint' g = sql_query.Grant(user=user.name, host=user.host, clear=user.password) t = text(str(g)) client.execute(t) for database in user.databases: mydb = models.ValidatedMySQLDatabase() mydb.deserialize(database) g = sql_query.Grant(permissions='ALL', database=mydb.name, user=user.name, host=user.host, clear=user.password) t = text(str(g)) client.execute(t) def delete_database(self, database): """Delete the specified database.""" with self.local_sql_client(self.mysql_app.get_engine()) as client: mydb = models.ValidatedMySQLDatabase() mydb.deserialize(database) dd = sql_query.DropDatabase(mydb.name) t = text(str(dd)) client.execute(t) def delete_user(self, user): """Delete the specified user.""" mysql_user = models.MySQLUser() mysql_user.deserialize(user) self.delete_user_by_name(mysql_user.name, mysql_user.host) def delete_user_by_name(self, name, host='%'): with self.local_sql_client(self.mysql_app.get_engine()) as client: du = sql_query.DropUser(name, host=host) t = text(str(du)) LOG.debug("delete_user_by_name: %s", t) client.execute(t) def get_user(self, username, hostname): user = self._get_user(username, hostname) if not user: return None return user.serialize() def _get_user(self, username, hostname): """Return a single user matching the criteria.""" user = models.MySQLUser() try: user.name = username # Could possibly throw a BadRequest here. except ValueError as ve: LOG.exception(_("Error Getting user information")) raise exception.BadRequest(_("Username %(user)s is not valid" ": %(reason)s") % {'user': username, 'reason': ve.message} ) with self.local_sql_client(self.mysql_app.get_engine()) as client: q = sql_query.Query() q.columns = ['User', 'Host', 'Password'] q.tables = ['mysql.user'] q.where = ["Host != 'localhost'", "User = '%s'" % username, "Host = '%s'" % hostname] q.order = ['User', 'Host'] t = text(str(q)) result = client.execute(t).fetchall() LOG.debug("Getting user information %s." % result) if len(result) != 1: return None found_user = result[0] user.password = found_user['Password'] user.host = found_user['Host'] self._associate_dbs(user) return user def grant_access(self, username, hostname, databases): """Grant a user permission to use a given database.""" user = self._get_user(username, hostname) mydb = models.ValidatedMySQLDatabase() with self.local_sql_client(self.mysql_app.get_engine()) as client: for database in databases: try: mydb.name = database except ValueError: LOG.exception(_("Error granting access")) raise exception.BadRequest(_( "Grant access to %s is not allowed") % database) g = sql_query.Grant(permissions='ALL', database=mydb.name, user=user.name, host=user.host, hashed=user.password) t = text(str(g)) client.execute(t) def is_root_enabled(self): """Return True if root access is enabled; False otherwise.""" LOG.debug("Class type of mysql_root_access is %s " % self.mysql_root_access) return self.mysql_root_access.is_root_enabled() def enable_root(self, root_password=None): """Enable the root user global access and/or reset the root password. """ return self.mysql_root_access.enable_root(root_password) def disable_root(self): """Disable the root user global access """ return self.mysql_root_access.disable_root() def list_databases(self, limit=None, marker=None, include_marker=False): """List databases the user created on this mysql instance.""" LOG.debug("---Listing Databases---") ignored_database_names = "'%s'" % "', '".join(cfg.get_ignored_dbs()) LOG.debug("The following database names are on ignore list and will " "be omitted from the listing: %s" % ignored_database_names) databases = [] with self.local_sql_client(self.mysql_app.get_engine()) as client: # If you have an external volume mounted at /var/lib/mysql # the lost+found directory will show up in mysql as a database # which will create errors if you try to do any database ops # on it. So we remove it here if it exists. q = sql_query.Query() q.columns = [ 'schema_name as name', 'default_character_set_name as charset', 'default_collation_name as collation', ] q.tables = ['information_schema.schemata'] q.where = ["schema_name NOT IN (" + ignored_database_names + ")"] q.order = ['schema_name ASC'] if limit: q.limit = limit + 1 if marker: q.where.append("schema_name %s '%s'" % (INCLUDE_MARKER_OPERATORS[include_marker], marker)) t = text(str(q)) database_names = client.execute(t) next_marker = None LOG.debug("database_names = %r." % database_names) for count, database in enumerate(database_names): if count >= limit: break LOG.debug("database = %s." % str(database)) mysql_db = models.MySQLDatabase() mysql_db.name = database[0] next_marker = mysql_db.name mysql_db.character_set = database[1] mysql_db.collate = database[2] databases.append(mysql_db.serialize()) LOG.debug("databases = " + str(databases)) if database_names.rowcount <= limit: next_marker = None return databases, next_marker def list_users(self, limit=None, marker=None, include_marker=False): """List users that have access to the database.""" ''' SELECT User, Host, Marker FROM (SELECT User, Host, CONCAT(User, '@', Host) as Marker FROM mysql.user ORDER BY 1, 2) as innerquery WHERE Marker > :marker ORDER BY Marker LIMIT :limit; ''' LOG.debug("---Listing Users---") ignored_user_names = "'%s'" % "', '".join(cfg.get_ignored_users()) LOG.debug("The following user names are on ignore list and will " "be omitted from the listing: %s" % ignored_user_names) users = [] with self.local_sql_client(self.mysql_app.get_engine()) as client: mysql_user = models.MySQLUser() iq = sql_query.Query() # Inner query. iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"] iq.tables = ['mysql.user'] iq.order = ['User', 'Host'] innerquery = str(iq).rstrip(';') oq = sql_query.Query() # Outer query. oq.columns = ['User', 'Host', 'Marker'] oq.tables = ['(%s) as innerquery' % innerquery] oq.where = [ "Host != 'localhost'", "User NOT IN (" + ignored_user_names + ")"] oq.order = ['Marker'] if marker: oq.where.append("Marker %s '%s'" % (INCLUDE_MARKER_OPERATORS[include_marker], marker)) if limit: oq.limit = limit + 1 t = text(str(oq)) result = client.execute(t) next_marker = None LOG.debug("result = " + str(result)) for count, row in enumerate(result): if count >= limit: break LOG.debug("user = " + str(row)) mysql_user = models.MySQLUser() mysql_user.name = row['User'] mysql_user.host = row['Host'] self._associate_dbs(mysql_user) next_marker = row['Marker'] users.append(mysql_user.serialize()) if result.rowcount <= limit: next_marker = None LOG.debug("users = " + str(users)) return users, next_marker def revoke_access(self, username, hostname, database): """Revoke a user's permission to use a given database.""" user = self._get_user(username, hostname) with self.local_sql_client(self.mysql_app.get_engine()) as client: r = sql_query.Revoke(database=database, user=user.name, host=user.host) t = text(str(r)) client.execute(t) def list_access(self, username, hostname): """Show all the databases to which the user has more than USAGE granted. """ user = self._get_user(username, hostname) return user.databases class BaseKeepAliveConnection(interfaces.PoolListener): """ A connection pool listener that ensures live connections are returned from the connection pool at checkout. This alleviates the problem of MySQL connections timing out. """ def checkout(self, dbapi_con, con_record, con_proxy): """Event triggered when a connection is checked out from the pool.""" try: try: dbapi_con.ping(False) except TypeError: dbapi_con.ping() except dbapi_con.OperationalError as ex: if ex.args[0] in (2006, 2013, 2014, 2045, 2055): raise exc.DisconnectionError() else: raise @six.add_metaclass(abc.ABCMeta) class BaseMySqlApp(object): """Prepares DBaaS on a Guest container.""" TIME_OUT = 1000 CFG_CODEC = IniCodec() @property def local_sql_client(self): return self._local_sql_client @property def keep_alive_connection_cls(self): return self._keep_alive_connection_cls @property def mysql_service(self): MYSQL_SERVICE_CANDIDATES = ["mysql", "mysqld", "mysql-server"] return operating_system.service_discovery(MYSQL_SERVICE_CANDIDATES) configuration_manager = ConfigurationManager( MYSQL_CONFIG, MYSQL_OWNER, MYSQL_OWNER, CFG_CODEC, requires_root=True, override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT)) def get_engine(self): """Create the default engine with the updated admin user.""" # TODO(rnirmal):Based on permission issues being resolved we may revert # url = URL(drivername='mysql', host='localhost', # query={'read_default_file': '/etc/mysql/my.cnf'}) global ENGINE if ENGINE: return ENGINE pwd = self.get_auth_password() ENGINE = sqlalchemy.create_engine("mysql://%s:%s@localhost:3306" % (ADMIN_USER_NAME, pwd.strip()), pool_recycle=7200, echo=CONF.sql_query_logging, listeners=[ self.keep_alive_connection_cls()] ) return ENGINE @classmethod def get_auth_password(cls): auth_config = operating_system.read_file( cls.get_client_auth_file(), codec=cls.CFG_CODEC) return auth_config['client']['password'] @classmethod def get_data_dir(cls): return cls.configuration_manager.get_value( MySQLConfParser.SERVER_CONF_SECTION).get('datadir') @classmethod def set_data_dir(cls, value): cls.configuration_manager.apply_system_override( {MySQLConfParser.SERVER_CONF_SECTION: {'datadir': value}}) @classmethod def get_client_auth_file(self): return guestagent_utils.build_file_path("~", ".my.cnf") def __init__(self, status, local_sql_client, keep_alive_connection_cls): """By default login with root no password for initial setup.""" self.state_change_wait_time = CONF.state_change_wait_time self.status = status self._local_sql_client = local_sql_client self._keep_alive_connection_cls = keep_alive_connection_cls def _create_admin_user(self, client, password): """ Create a os_admin user with a random password with all privileges similar to the root user. """ localhost = "localhost" g = sql_query.Grant(permissions='ALL', user=ADMIN_USER_NAME, host=localhost, grant_option=True, clear=password) t = text(str(g)) client.execute(t) @staticmethod def _generate_root_password(client): """Generate and set a random root password and forget about it.""" localhost = "localhost" uu = sql_query.UpdateUser("root", host=localhost, clear=utils.generate_random_password()) t = text(str(uu)) client.execute(t) def install_if_needed(self, packages): """Prepare the guest machine with a secure mysql server installation. """ LOG.info(_("Preparing Guest as MySQL Server.")) if not packager.pkg_is_installed(packages): LOG.debug("Installing MySQL server.") self._clear_mysql_config() # set blank password on pkg configuration stage pkg_opts = {'root_password': '', 'root_password_again': ''} packager.pkg_install(packages, pkg_opts, self.TIME_OUT) self._create_mysql_confd_dir() LOG.info(_("Finished installing MySQL server.")) self.start_mysql() def secure(self, config_contents): LOG.info(_("Generating admin password.")) admin_password = utils.generate_random_password() clear_expired_password() engine = sqlalchemy.create_engine("mysql://root:@localhost:3306", echo=True) with self.local_sql_client(engine) as client: self._remove_anonymous_user(client) self._create_admin_user(client, admin_password) self.stop_db() self._reset_configuration(config_contents, admin_password) self.start_mysql() LOG.debug("MySQL secure complete.") def _reset_configuration(self, configuration, admin_password=None): if not admin_password: # Take the current admin password from the base configuration file # if not given. admin_password = self.get_auth_password() self.configuration_manager.save_configuration(configuration) self._save_authentication_properties(admin_password) self.wipe_ib_logfiles() def _save_authentication_properties(self, admin_password): client_sect = {'client': {'user': ADMIN_USER_NAME, 'password': admin_password, 'host': '127.0.0.1'}} operating_system.write_file(self.get_client_auth_file(), client_sect, codec=self.CFG_CODEC) def secure_root(self, secure_remote_root=True): with self.local_sql_client(self.get_engine()) as client: LOG.info(_("Preserving root access from restore.")) self._generate_root_password(client) if secure_remote_root: self._remove_remote_root_access(client) def _clear_mysql_config(self): """Clear old configs, which can be incompatible with new version.""" LOG.debug("Clearing old MySQL config.") random_uuid = str(uuid.uuid4()) configs = ["/etc/my.cnf", "/etc/mysql/conf.d", "/etc/mysql/my.cnf"] for config in configs: try: old_conf_backup = "%s_%s" % (config, random_uuid) operating_system.move(config, old_conf_backup, as_root=True) LOG.debug("%s saved to %s_%s." % (config, config, random_uuid)) except exception.ProcessExecutionError: pass def _create_mysql_confd_dir(self): conf_dir = "/etc/mysql/conf.d" LOG.debug("Creating %s." % conf_dir) operating_system.create_directory(conf_dir, as_root=True) def _enable_mysql_on_boot(self): LOG.debug("Enabling MySQL on boot.") try: utils.execute_with_timeout(self.mysql_service['cmd_enable'], shell=True) except KeyError: LOG.exception(_("Error enabling MySQL start on boot.")) raise RuntimeError("Service is not discovered.") def _disable_mysql_on_boot(self): try: utils.execute_with_timeout(self.mysql_service['cmd_disable'], shell=True) except KeyError: LOG.exception(_("Error disabling MySQL start on boot.")) raise RuntimeError("Service is not discovered.") def stop_db(self, update_db=False, do_not_start_on_reboot=False): LOG.info(_("Stopping MySQL.")) if do_not_start_on_reboot: self._disable_mysql_on_boot() try: utils.execute_with_timeout(self.mysql_service['cmd_stop'], shell=True) except KeyError: LOG.exception(_("Error stopping MySQL.")) raise RuntimeError("Service is not discovered.") if not self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db): LOG.error(_("Could not stop MySQL.")) self.status.end_restart() raise RuntimeError("Could not stop MySQL!") def _remove_anonymous_user(self, client): t = text(sql_query.REMOVE_ANON) client.execute(t) def _remove_remote_root_access(self, client): t = text(sql_query.REMOVE_ROOT) client.execute(t) def restart(self): try: self.status.begin_restart() self.stop_db() self.start_mysql() finally: self.status.end_restart() def update_overrides(self, overrides): self._apply_user_overrides(overrides) def _apply_user_overrides(self, overrides): # All user-defined values go to the server section of the configuration # file. if overrides: self.configuration_manager.apply_user_override( {MySQLConfParser.SERVER_CONF_SECTION: overrides}) def apply_overrides(self, overrides): LOG.debug("Applying overrides to MySQL.") with self.local_sql_client(self.get_engine()) as client: LOG.debug("Updating override values in running MySQL.") for k, v in overrides.iteritems(): byte_value = guestagent_utils.to_bytes(v) q = sql_query.SetServerVariable(key=k, value=byte_value) t = text(str(q)) try: client.execute(t) except exc.OperationalError: output = {'key': k, 'value': byte_value} LOG.exception(_("Unable to set %(key)s with value " "%(value)s.") % output) def make_read_only(self, read_only): with self.local_sql_client(self.get_engine()) as client: q = "set global read_only = %s" % read_only client.execute(text(str(q))) def wipe_ib_logfiles(self): """Destroys the iblogfiles. If for some reason the selected log size in the conf changes from the current size of the files MySQL will fail to start, so we delete the files to be safe. """ LOG.info(_("Wiping ib_logfiles.")) for index in range(2): try: # On restarts, sometimes these are wiped. So it can be a race # to have MySQL start up before it's restarted and these have # to be deleted. That's why its ok if they aren't found and # that is why we use the "force" option to "remove". operating_system.remove("%s/ib_logfile%d" % (self.get_data_dir(), index), force=True, as_root=True) except exception.ProcessExecutionError: LOG.exception("Could not delete logfile.") raise def remove_overrides(self): self.configuration_manager.remove_user_override() def _remove_replication_overrides(self, cnf_file): LOG.info(_("Removing replication configuration file.")) if os.path.exists(cnf_file): operating_system.remove(cnf_file, as_root=True) def exists_replication_source_overrides(self): return self.configuration_manager.has_system_override(CNF_MASTER) def write_replication_source_overrides(self, overrideValues): self.configuration_manager.apply_system_override(overrideValues, CNF_MASTER) def write_replication_replica_overrides(self, overrideValues): self.configuration_manager.apply_system_override(overrideValues, CNF_SLAVE) def remove_replication_source_overrides(self): self.configuration_manager.remove_system_override(CNF_MASTER) def remove_replication_replica_overrides(self): self.configuration_manager.remove_system_override(CNF_SLAVE) def grant_replication_privilege(self, replication_user): LOG.info(_("Granting Replication Slave privilege.")) LOG.debug("grant_replication_privilege: %s" % replication_user) with self.local_sql_client(self.get_engine()) as client: g = sql_query.Grant(permissions=['REPLICATION SLAVE'], user=replication_user['name'], clear=replication_user['password']) t = text(str(g)) client.execute(t) def get_port(self): with self.local_sql_client(self.get_engine()) as client: result = client.execute('SELECT @@port').first() return result[0] def get_binlog_position(self): with self.local_sql_client(self.get_engine()) as client: result = client.execute('SHOW MASTER STATUS').first() binlog_position = { 'log_file': result['File'], 'position': result['Position'] } return binlog_position def execute_on_client(self, sql_statement): LOG.debug("Executing SQL: %s" % sql_statement) with self.local_sql_client(self.get_engine()) as client: return client.execute(sql_statement) def start_slave(self): LOG.info(_("Starting slave replication.")) with self.local_sql_client(self.get_engine()) as client: client.execute('START SLAVE') self._wait_for_slave_status("ON", client, 60) def stop_slave(self, for_failover): replication_user = None LOG.info(_("Stopping slave replication.")) with self.local_sql_client(self.get_engine()) as client: result = client.execute('SHOW SLAVE STATUS') replication_user = result.first()['Master_User'] client.execute('STOP SLAVE') client.execute('RESET SLAVE ALL') self._wait_for_slave_status("OFF", client, 30) if not for_failover: client.execute('DROP USER ' + replication_user) return { 'replication_user': replication_user } def stop_master(self): LOG.info(_("Stopping replication master.")) with self.local_sql_client(self.get_engine()) as client: client.execute('RESET MASTER') def _wait_for_slave_status(self, status, client, max_time): def verify_slave_status(): actual_status = client.execute( "SHOW GLOBAL STATUS like 'slave_running'").first()[1] return actual_status.upper() == status.upper() LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status) try: utils.poll_until(verify_slave_status, sleep_time=3, time_out=max_time) LOG.info(_("Replication is now %s.") % status.lower()) except PollTimeOut: raise RuntimeError( _("Replication is not %(status)s after %(max)d seconds.") % { 'status': status.lower(), 'max': max_time}) def start_mysql(self, update_db=False, disable_on_boot=False, timeout=120): LOG.info(_("Starting MySQL.")) # This is the site of all the trouble in the restart tests. # Essentially what happens is that mysql start fails, but does not # die. It is then impossible to kill the original, so if disable_on_boot: self._disable_mysql_on_boot() else: self._enable_mysql_on_boot() try: utils.execute_with_timeout(self.mysql_service['cmd_start'], shell=True, timeout=timeout) except KeyError: raise RuntimeError("Service is not discovered.") except exception.ProcessExecutionError: # it seems mysql (percona, at least) might come back with [Fail] # but actually come up ok. we're looking into the timing issue on # parallel, but for now, we'd like to give it one more chance to # come up. so regardless of the execute_with_timeout() response, # we'll assume mysql comes up and check it's status for a while. pass if not self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.RUNNING, self.state_change_wait_time, update_db): LOG.error(_("Start up of MySQL failed.")) # If it won't start, but won't die either, kill it by hand so we # don't let a rouge process wander around. try: utils.execute_with_timeout("sudo", "pkill", "-9", "mysql") except exception.ProcessExecutionError: LOG.exception(_("Error killing stalled MySQL start command.")) # There's nothing more we can do... self.status.end_restart() raise RuntimeError("Could not start MySQL!") def start_db_with_conf_changes(self, config_contents): LOG.info(_("Starting MySQL with conf changes.")) LOG.debug("Inside the guest - Status is_running = (%s)." % self.status.is_running) if self.status.is_running: LOG.error(_("Cannot execute start_db_with_conf_changes because " "MySQL state == %s.") % self.status) raise RuntimeError("MySQL not stopped.") LOG.info(_("Resetting configuration.")) self._reset_configuration(config_contents) self.start_mysql(True) def reset_configuration(self, configuration): config_contents = configuration['config_contents'] LOG.info(_("Resetting configuration.")) self._reset_configuration(config_contents) def reset_admin_password(self, admin_password): """Replace the password in the my.cnf file.""" # grant the new admin password with self.local_sql_client(self.get_engine()) as client: self._create_admin_user(client, admin_password) # reset the ENGINE because the password could have changed global ENGINE ENGINE = None self._save_authentication_properties(admin_password) class BaseMySqlRootAccess(object): def __init__(self, local_sql_client, mysql_app): self._local_sql_client = local_sql_client self._mysql_app = mysql_app @property def mysql_app(self): return self._mysql_app @property def local_sql_client(self): return self._local_sql_client def is_root_enabled(self): """Return True if root access is enabled; False otherwise.""" with self.local_sql_client(self.mysql_app.get_engine()) as client: t = text(sql_query.ROOT_ENABLED) result = client.execute(t) LOG.debug("Found %s with remote root access." % result.rowcount) return result.rowcount != 0 def enable_root(self, root_password=None): """Enable the root user global access and/or reset the root password. """ user = models.MySQLRootUser(root_password) with self.local_sql_client(self.mysql_app.get_engine()) as client: print(client) try: cu = sql_query.CreateUser(user.name, host=user.host) t = text(str(cu)) client.execute(t, **cu.keyArgs) except exc.OperationalError as err: # Ignore, user is already created, just reset the password # TODO(rnirmal): More fine grained error checking later on LOG.debug(err) with self.local_sql_client(self.mysql_app.get_engine()) as client: print(client) uu = sql_query.UpdateUser(user.name, host=user.host, clear=user.password) t = text(str(uu)) client.execute(t) LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s." % (CONF.root_grant, CONF.root_grant_option)) g = sql_query.Grant(permissions=CONF.root_grant, user=user.name, host=user.host, grant_option=CONF.root_grant_option, clear=user.password) t = text(str(g)) client.execute(t) return user.serialize() def disable_root(self): """Disable the root user global access """ with self.local_sql_client(self.mysql_app.get_engine()) as client: client.execute(text(sql_query.REMOVE_ROOT)) trove-5.0.0/trove/guestagent/datastore/manager.py0000664000567000056710000010707112701410316023274 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from oslo_config import cfg as oslo_cfg from oslo_log import log as logging from oslo_service import periodic_task from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance from trove.common.notification import EndNotification from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent import dbaas from trove.guestagent import guest_log from trove.guestagent.module import driver_manager from trove.guestagent.module import module_manager from trove.guestagent.strategies import replication as repl_strategy from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(periodic_task.PeriodicTasks): """This is the base class for all datastore managers. Over time, common functionality should be pulled back here from the existing managers. """ GUEST_LOG_TYPE_LABEL = 'type' GUEST_LOG_USER_LABEL = 'user' GUEST_LOG_FILE_LABEL = 'file' GUEST_LOG_SECTION_LABEL = 'section' GUEST_LOG_ENABLE_LABEL = 'enable' GUEST_LOG_DISABLE_LABEL = 'disable' GUEST_LOG_RESTART_LABEL = 'restart' GUEST_LOG_BASE_DIR = '/var/log/trove' GUEST_LOG_DATASTORE_DIRNAME = 'datastore' GUEST_LOG_DEFS_GUEST_LABEL = 'guest' GUEST_LOG_DEFS_GENERAL_LABEL = 'general' GUEST_LOG_DEFS_ERROR_LABEL = 'error' GUEST_LOG_DEFS_SLOW_QUERY_LABEL = 'slow_query' def __init__(self, manager_name): super(Manager, self).__init__(CONF) # Manager properties self.__manager_name = manager_name self.__manager = None self.__prepare_error = False # Guest log self._guest_log_context = None self._guest_log_loaded_context = None self._guest_log_cache = None self._guest_log_defs = None # Module self.module_driver_manager = driver_manager.ModuleDriverManager() @property def manager_name(self): """This returns the passed-in name of the manager.""" return self.__manager_name @property def manager(self): """This returns the name of the manager.""" if not self.__manager: self.__manager = CONF.datastore_manager or self.__manager_name return self.__manager @property def prepare_error(self): return self.__prepare_error @prepare_error.setter def prepare_error(self, prepare_error): self.__prepare_error = prepare_error @property def replication(self): """If the datastore supports replication, return an instance of the strategy. """ try: return repl_strategy.get_instance(self.manager) except Exception as ex: LOG.debug("Cannot get replication instance for '%s': %s" % ( self.manager, ex.message)) return None @property def replication_strategy(self): """If the datastore supports replication, return the strategy.""" try: return repl_strategy.get_strategy(self.manager) except Exception as ex: LOG.debug("Cannot get replication strategy for '%s': %s" % ( self.manager, ex.message)) return None @abc.abstractproperty def status(self): """This should return an instance of a status class that has been inherited from datastore.service.BaseDbStatus. Each datastore must implement this property. """ return None @property def configuration_manager(self): """If the datastore supports the new-style configuration manager, it should override this to return it. """ return None @property def datastore_log_defs(self): """Any datastore-specific log files should be overridden in this dict by the corresponding Manager class. Format of a dict entry: 'name_of_log': {self.GUEST_LOG_TYPE_LABEL: Specified by the Enum in guest_log.LogType, self.GUEST_LOG_USER_LABEL: User that owns the file, self.GUEST_LOG_FILE_LABEL: Path on filesystem where the log resides, self.GUEST_LOG_SECTION_LABEL: Section where to put config (if ini style) self.GUEST_LOG_ENABLE_LABEL: { Dict of config_group settings to enable log}, self.GUEST_LOG_DISABLE_LABEL: { Dict of config_group settings to disable log}, See guestagent_log_defs for an example. """ return {} @property def guestagent_log_defs(self): """These are log files that should be available on every Trove instance. By definition, these should be of type LogType.SYS """ log_dir = CONF.get('log_dir', '/var/log/trove/') log_file = CONF.get('log_file', 'trove-guestagent.log') guestagent_log = guestagent_utils.build_file_path(log_dir, log_file) return { self.GUEST_LOG_DEFS_GUEST_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.SYS, self.GUEST_LOG_USER_LABEL: None, self.GUEST_LOG_FILE_LABEL: guestagent_log, }, } @property def guest_log_defs(self): """Return all the guest log defs.""" if not self._guest_log_defs: self._guest_log_defs = dict(self.datastore_log_defs) self._guest_log_defs.update(self.guestagent_log_defs) return self._guest_log_defs @property def guest_log_context(self): return self._guest_log_context @guest_log_context.setter def guest_log_context(self, context): self._guest_log_context = context @property def guest_log_cache(self): """Make sure the guest_log_cache is loaded and return it.""" self._refresh_guest_log_cache() return self._guest_log_cache def _refresh_guest_log_cache(self): if self._guest_log_cache: # Replace the context if it's changed if self._guest_log_loaded_context != self.guest_log_context: for log_name in self._guest_log_cache.keys(): self._guest_log_cache[log_name].context = ( self.guest_log_context) else: # Load the initial cache self._guest_log_cache = {} if self.guest_log_context: gl_defs = self.guest_log_defs try: exposed_logs = CONF.get(self.manager).get( 'guest_log_exposed_logs') except oslo_cfg.NoSuchOptError: exposed_logs = '' LOG.debug("Available log defs: %s" % ",".join(gl_defs.keys())) exposed_logs = exposed_logs.lower().replace(',', ' ').split() LOG.debug("Exposing log defs: %s" % ",".join(exposed_logs)) expose_all = 'all' in exposed_logs for log_name in gl_defs.keys(): gl_def = gl_defs[log_name] exposed = expose_all or log_name in exposed_logs LOG.debug("Building guest log '%s' from def: %s " "(exposed: %s)" % (log_name, gl_def, exposed)) self._guest_log_cache[log_name] = guest_log.GuestLog( self.guest_log_context, log_name, gl_def[self.GUEST_LOG_TYPE_LABEL], gl_def[self.GUEST_LOG_USER_LABEL], gl_def[self.GUEST_LOG_FILE_LABEL], exposed) self._guest_log_loaded_context = self.guest_log_context ################ # Status related ################ @periodic_task.periodic_task def update_status(self, context): """Update the status of the trove instance. It is decorated with perodic_task so it is called automatically. """ LOG.debug("Update status called.") self.status.update() def rpc_ping(self, context): LOG.debug("Responding to RPC ping.") return True ################# # Prepare related ################# def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, modules=None): """Set up datastore on a Guest Instance.""" with EndNotification(context, instance_id=CONF.guest_id): self._prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot, modules) def _prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot, modules): LOG.info(_("Starting datastore prepare for '%s'.") % self.manager) self.status.begin_install() post_processing = True if cluster_config else False try: # Since all module handling is common, don't pass it down to the # individual 'do_prepare' methods. self.do_prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) if overrides: LOG.info(_("Applying user-specified configuration " "(called from 'prepare').")) self.apply_overrides_on_prepare(context, overrides) except Exception as ex: self.prepare_error = True LOG.exception(_("An error occurred preparing datastore: %s") % ex.message) raise finally: LOG.info(_("Ending datastore prepare for '%s'.") % self.manager) self.status.end_install(error_occurred=self.prepare_error, post_processing=post_processing) # At this point critical 'prepare' work is done and the instance # is now in the correct 'ACTIVE' 'INSTANCE_READY' or 'ERROR' state. # Of cource if an error has occurred, none of the code that follows # will run. LOG.info(_("Completed setup of '%s' datastore successfully.") % self.manager) # The following block performs additional instance initialization. # Failures will be recorded, but won't stop the provisioning # or change the instance state. try: if modules: LOG.info(_("Applying modules (called from 'prepare').")) self.module_apply(context, modules) LOG.info(_('Module apply completed.')) except Exception as ex: LOG.exception(_("An error occurred applying modules: " "%s") % ex.message) # The following block performs single-instance initialization. # Failures will be recorded, but won't stop the provisioning # or change the instance state. if not cluster_config: try: if databases: LOG.info(_("Creating databases (called from 'prepare').")) self.create_database(context, databases) LOG.info(_('Databases created successfully.')) if users: LOG.info(_("Creating users (called from 'prepare')")) self.create_user(context, users) LOG.info(_('Users created successfully.')) except Exception as ex: LOG.exception(_("An error occurred creating databases/users: " "%s") % ex.message) # We only enable-root automatically if not restoring a backup # that may already have root enabled in which case we keep it # unchanged. if root_password and not backup_info: try: LOG.info(_("Enabling root user (with password).")) self.enable_root_on_prepare(context, root_password) LOG.info(_('Root enabled successfully.')) except Exception as ex: LOG.exception(_("An error occurred enabling root user: " "%s") % ex.message) try: LOG.info(_("Calling post_prepare for '%s' datastore.") % self.manager) self.post_prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) LOG.info(_("Post prepare for '%s' datastore completed.") % self.manager) except Exception as ex: LOG.exception(_("An error occurred in post prepare: %s") % ex.message) raise def apply_overrides_on_prepare(self, context, overrides): self.update_overrides(context, overrides) self.restart(context) def enable_root_on_prepare(self, context, root_password): self.enable_root_with_password(context, root_password) @abc.abstractmethod def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare when the Trove instance first comes online. 'Prepare' is the first rpc message passed from the task manager. do_prepare handles all the base configuration of the instance and is where the actual work is done. Once this method completes, the datastore is considered either 'ready' for use (or for final connections to other datastores) or in an 'error' state, and the status is changed accordingly. Each datastore must implement this method. """ pass def post_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called after prepare has completed successfully. Processing done here should be limited to things that will not affect the actual 'running' status of the datastore (for example, creating databases and users, although these are now handled automatically). Any exceptions are caught, logged and rethrown, however no status changes are made and the end-user will not be informed of the error. """ LOG.info(_('No post_prepare work has been defined.')) pass ################# # Service related ################# @abc.abstractmethod def restart(self, context): """Restart the database service.""" pass ##################### # File System related ##################### def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" # TODO(peterstac) - note that fs_path is not used in this method. mount_point = CONF.get(self.manager).mount_point LOG.debug("Getting file system stats for '%s'" % mount_point) return dbaas.get_filesystem_volume_stats(mount_point) def mount_volume(self, context, device_path=None, mount_point=None): LOG.debug("Mounting the device %s at the mount point %s." % (device_path, mount_point)) device = volume.VolumeDevice(device_path) device.mount(mount_point, write_to_fstab=False) def unmount_volume(self, context, device_path=None, mount_point=None): LOG.debug("Unmounting the device %s from the mount point %s." % (device_path, mount_point)) device = volume.VolumeDevice(device_path) device.unmount(mount_point) def resize_fs(self, context, device_path=None, mount_point=None): LOG.debug("Resizing the filesystem at %s." % mount_point) device = volume.VolumeDevice(device_path) device.resize_fs(mount_point) ############### # Configuration ############### def reset_configuration(self, context, configuration): """The default implementation should be sufficient if a configuration_manager is provided. Even if one is not, this method needs to be implemented to allow the rollback of flavor-resize on the guestagent side. """ LOG.debug("Resetting configuration.") if self.configuration_manager: config_contents = configuration['config_contents'] self.configuration_manager.save_configuration(config_contents) ################# # Cluster related ################# def cluster_complete(self, context): LOG.debug("Cluster creation complete, starting status checks.") self.status.end_install() ############# # Log related ############# def guest_log_list(self, context): LOG.info(_("Getting list of guest logs.")) self.guest_log_context = context gl_cache = self.guest_log_cache result = filter(None, [gl_cache[log_name].show() if gl_cache[log_name].exposed else None for log_name in gl_cache.keys()]) LOG.info(_("Returning list of logs: %s") % result) return result def guest_log_action(self, context, log_name, enable, disable, publish, discard): if enable and disable: raise exception.BadRequest("Cannot enable and disable log '%s'." % log_name) # Enable if we are publishing, unless told to disable if publish and not disable: enable = True LOG.info(_("Processing guest log '%(log)s' " "(enable=%(en)s, disable=%(dis)s, " "publish=%(pub)s, discard=%(disc)s).") % {'log': log_name, 'en': enable, 'dis': disable, 'pub': publish, 'disc': discard}) self.guest_log_context = context gl_cache = self.guest_log_cache if log_name in gl_cache: if ((gl_cache[log_name].type == guest_log.LogType.SYS) and not publish): if enable or disable: if enable: action_text = "enable" else: action_text = "disable" raise exception.BadRequest("Cannot %s a SYSTEM log ('%s')." % (action_text, log_name)) if gl_cache[log_name].type == guest_log.LogType.USER: requires_change = ( (gl_cache[log_name].enabled and disable) or (not gl_cache[log_name].enabled and enable)) if requires_change: restart_required = self.guest_log_enable( context, log_name, disable) if restart_required: self.set_guest_log_status( guest_log.LogStatus.Restart_Required, log_name) gl_cache[log_name].enabled = enable log_details = gl_cache[log_name].show() if discard: log_details = gl_cache[log_name].discard_log() if publish: log_details = gl_cache[log_name].publish_log() LOG.info(_("Details for log '%(log)s': %(det)s") % {'log': log_name, 'det': log_details}) return log_details raise exception.NotFound("Log '%s' is not defined." % log_name) def guest_log_enable(self, context, log_name, disable): """This method can be overridden by datastore implementations to facilitate enabling and disabling USER type logs. If the logs can be enabled with simple configuration group changes, however, the code here will probably suffice. Must return whether the datastore needs to be restarted in order for the logging to begin. """ restart_required = False verb = ("Disabling" if disable else "Enabling") if self.configuration_manager: LOG.debug("%s log '%s'" % (verb, log_name)) gl_def = self.guest_log_defs[log_name] enable_cfg_label = "%s_%s_log" % (self.GUEST_LOG_ENABLE_LABEL, log_name) disable_cfg_label = "%s_%s_log" % (self.GUEST_LOG_DISABLE_LABEL, log_name) restart_required = gl_def.get(self.GUEST_LOG_RESTART_LABEL, restart_required) if disable: self._apply_log_overrides( context, enable_cfg_label, disable_cfg_label, gl_def.get(self.GUEST_LOG_DISABLE_LABEL), gl_def.get(self.GUEST_LOG_SECTION_LABEL), restart_required) else: self._apply_log_overrides( context, disable_cfg_label, enable_cfg_label, gl_def.get(self.GUEST_LOG_ENABLE_LABEL), gl_def.get(self.GUEST_LOG_SECTION_LABEL), restart_required) else: msg = (_("%(verb)s log '%(log)s' not supported - " "no configuration manager defined!") % {'verb': verb, 'log': log_name}) LOG.error(msg) raise exception.GuestError(msg) return restart_required def _apply_log_overrides(self, context, remove_label, apply_label, cfg_values, section_label, restart_required): self.configuration_manager.remove_system_override( change_id=remove_label) if cfg_values: config_man_values = cfg_values if section_label: config_man_values = {section_label: cfg_values} self.configuration_manager.apply_system_override( config_man_values, change_id=apply_label) if restart_required: self.status.set_status(instance.ServiceStatuses.RESTART_REQUIRED) else: self.apply_overrides(context, cfg_values) def set_guest_log_status(self, status, log_name=None): """Sets the status of log_name to 'status' - if log_name is not provided, sets the status on all logs. """ gl_cache = self.guest_log_cache names = [log_name] if not log_name or log_name not in gl_cache: names = gl_cache.keys() for name in names: # If we're already in restart mode and we're asked to set the # status to restart, assume enable/disable has been flipped # without a restart and set the status to restart done if (gl_cache[name].status == guest_log.LogStatus.Restart_Required and status == guest_log.LogStatus.Restart_Required): gl_cache[name].status = guest_log.LogStatus.Restart_Completed else: gl_cache[name].status = status def build_log_file_name(self, log_name, owner, datastore_dir=None): """Build a log file name based on the log_name and make sure the directories exist and are accessible by owner. """ if datastore_dir is None: base_dir = self.GUEST_LOG_BASE_DIR if not operating_system.exists(base_dir, is_directory=True): operating_system.create_directory( base_dir, user=owner, group=owner, force=True, as_root=True) datastore_dir = guestagent_utils.build_file_path( base_dir, self.GUEST_LOG_DATASTORE_DIRNAME) if not operating_system.exists(datastore_dir, is_directory=True): operating_system.create_directory( datastore_dir, user=owner, group=owner, force=True, as_root=True) log_file_name = guestagent_utils.build_file_path( datastore_dir, '%s-%s.log' % (self.manager, log_name)) return self.validate_log_file(log_file_name, owner) def validate_log_file(self, log_file, owner): """Make sure the log file exists and is accessible by owner. """ if not operating_system.exists(log_file, as_root=True): operating_system.write_file(log_file, '', as_root=True) operating_system.chown(log_file, user=owner, group=owner, as_root=True) operating_system.chmod(log_file, FileMode.ADD_USR_RW_GRP_RW_OTH_R, as_root=True) LOG.debug("Set log file '%s' as readable" % log_file) return log_file ################ # Module related ################ def module_list(self, context, include_contents=False): LOG.info(_("Getting list of modules.")) results = module_manager.ModuleManager.read_module_results( is_admin=context.is_admin, include_contents=include_contents) LOG.info(_("Returning list of modules: %s") % results) return results def module_apply(self, context, modules=None): LOG.info(_("Applying modules.")) results = [] for module_data in modules: module = module_data['module'] id = module.get('id', None) module_type = module.get('type', None) name = module.get('name', None) tenant = module.get('tenant', None) datastore = module.get('datastore', None) ds_version = module.get('datastore_version', None) contents = module.get('contents', None) md5 = module.get('md5', None) auto_apply = module.get('auto_apply', True) visible = module.get('visible', True) if not name: raise AttributeError(_("Module name not specified")) if not contents: raise AttributeError(_("Module contents not specified")) driver = self.module_driver_manager.get_driver(module_type) if not driver: raise exception.ModuleTypeNotFound( _("No driver implemented for module type '%s'") % module_type) result = module_manager.ModuleManager.apply_module( driver, module_type, name, tenant, datastore, ds_version, contents, id, md5, auto_apply, visible) results.append(result) LOG.info(_("Returning list of modules: %s") % results) return results def module_remove(self, context, module=None): LOG.info(_("Removing module.")) module = module['module'] id = module.get('id', None) module_type = module.get('type', None) name = module.get('name', None) datastore = module.get('datastore', None) ds_version = module.get('datastore_version', None) if not name: raise AttributeError(_("Module name not specified")) driver = self.module_driver_manager.get_driver(module_type) if not driver: raise exception.ModuleTypeNotFound( _("No driver implemented for module type '%s'") % module_type) module_manager.ModuleManager.remove_module( driver, module_type, id, name, datastore, ds_version) LOG.info(_("Deleted module: %s") % name) ############### # Not Supported ############### def change_passwords(self, context, users): LOG.debug("Changing passwords.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='change_passwords', datastore=self.manager) def enable_root(self, context): LOG.debug("Enabling root.") raise exception.DatastoreOperationNotSupported( operation='enable_root', datastore=self.manager) def enable_root_with_password(self, context, root_password=None): LOG.debug("Enabling root with password.") raise exception.DatastoreOperationNotSupported( operation='enable_root_with_password', datastore=self.manager) def disable_root(self, context): LOG.debug("Disabling root.") raise exception.DatastoreOperationNotSupported( operation='disable_root', datastore=self.manager) def is_root_enabled(self, context): LOG.debug("Checking if root was ever enabled.") raise exception.DatastoreOperationNotSupported( operation='is_root_enabled', datastore=self.manager) def create_backup(self, context, backup_info): LOG.debug("Creating backup.") raise exception.DatastoreOperationNotSupported( operation='create_backup', datastore=self.manager) def _perform_restore(self, backup_info, context, restore_location, app): LOG.debug("Performing restore.") raise exception.DatastoreOperationNotSupported( operation='_perform_restore', datastore=self.manager) def create_database(self, context, databases): LOG.debug("Creating databases.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='create_database', datastore=self.manager) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing databases.") raise exception.DatastoreOperationNotSupported( operation='list_databases', datastore=self.manager) def delete_database(self, context, database): LOG.debug("Deleting database.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='delete_database', datastore=self.manager) def create_user(self, context, users): LOG.debug("Creating users.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='create_user', datastore=self.manager) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing users.") raise exception.DatastoreOperationNotSupported( operation='list_users', datastore=self.manager) def delete_user(self, context, user): LOG.debug("Deleting user.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='delete_user', datastore=self.manager) def get_user(self, context, username, hostname): LOG.debug("Getting user.") raise exception.DatastoreOperationNotSupported( operation='get_user', datastore=self.manager) def update_attributes(self, context, username, hostname, user_attrs): LOG.debug("Updating user attributes.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='update_attributes', datastore=self.manager) def grant_access(self, context, username, hostname, databases): LOG.debug("Granting user access.") raise exception.DatastoreOperationNotSupported( operation='grant_access', datastore=self.manager) def revoke_access(self, context, username, hostname, database): LOG.debug("Revoking user access.") raise exception.DatastoreOperationNotSupported( operation='revoke_access', datastore=self.manager) def list_access(self, context, username, hostname): LOG.debug("Listing user access.") raise exception.DatastoreOperationNotSupported( operation='list_access', datastore=self.manager) def get_config_changes(self, cluster_config, mount_point=None): LOG.debug("Get configuration changes.") raise exception.DatastoreOperationNotSupported( operation='get_configuration_changes', datastore=self.manager) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") raise exception.DatastoreOperationNotSupported( operation='update_overrides', datastore=self.manager) def apply_overrides(self, context, overrides): LOG.debug("Applying overrides.") raise exception.DatastoreOperationNotSupported( operation='apply_overrides', datastore=self.manager) def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") raise exception.DatastoreOperationNotSupported( operation='get_replication_snapshot', datastore=self.manager) def attach_replication_slave(self, context, snapshot, slave_config): LOG.debug("Attaching replication slave.") raise exception.DatastoreOperationNotSupported( operation='attach_replication_slave', datastore=self.manager) def detach_replica(self, context, for_failover=False): LOG.debug("Detaching replica.") raise exception.DatastoreOperationNotSupported( operation='detach_replica', datastore=self.manager) def get_replica_context(self, context): LOG.debug("Getting replica context.") raise exception.DatastoreOperationNotSupported( operation='get_replica_context', datastore=self.manager) def make_read_only(self, context, read_only): LOG.debug("Making datastore read-only.") raise exception.DatastoreOperationNotSupported( operation='make_read_only', datastore=self.manager) def enable_as_master(self, context, replica_source_config): LOG.debug("Enabling as master.") raise exception.DatastoreOperationNotSupported( operation='enable_as_master', datastore=self.manager) def get_txn_count(self, context): LOG.debug("Getting transaction count.") raise exception.DatastoreOperationNotSupported( operation='get_txn_count', datastore=self.manager) def get_latest_txn_id(self, context): LOG.debug("Getting latest transaction id.") raise exception.DatastoreOperationNotSupported( operation='get_latest_txn_id', datastore=self.manager) def wait_for_txn(self, context, txn): LOG.debug("Waiting for transaction.") raise exception.DatastoreOperationNotSupported( operation='wait_for_txn', datastore=self.manager) def demote_replication_master(self, context): LOG.debug("Demoting replication master.") raise exception.DatastoreOperationNotSupported( operation='demote_replication_master', datastore=self.manager) trove-5.0.0/trove/guestagent/datastore/service.py0000664000567000056710000003533612701410320023321 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from oslo_log import log as logging from trove.common import cfg from trove.common import context as trove_context from trove.common.i18n import _ from trove.common import instance from trove.conductor import api as conductor_api from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common import timeutils LOG = logging.getLogger(__name__) CONF = cfg.CONF class BaseDbStatus(object): """ Answers the question "what is the status of the DB application on this box?" The answer can be that the application is not installed, or the state of the application is determined by calling a series of commands. This class also handles saving and load the status of the DB application in the database. The status is updated whenever the update() method is called, except if the state is changed to building or restart mode using the "begin_install" and "begin_restart" methods. The building mode persists in the database while restarting mode does not (so if there is a Python Pete crash update() will set the status to show a failure). These modes are exited and functionality to update() returns when end_install or end_restart() is called, at which point the status again reflects the actual status of the DB app. This is a base class, subclasses must implement real logic for determining current status of DB in _get_actual_db_status() """ _instance = None GUESTAGENT_DIR = '~' PREPARE_START_FILENAME = '.guestagent.prepare.start' PREPARE_END_FILENAME = '.guestagent.prepare.end' def __init__(self): if self._instance is not None: raise RuntimeError("Cannot instantiate twice.") self.status = None self.restart_mode = False self.__prepare_completed = None @property def prepare_completed(self): if self.__prepare_completed is None: # Force the file check self.__refresh_prepare_completed() return self.__prepare_completed def __refresh_prepare_completed(self): # Set the value of __prepared_completed based on the existence of # the file. This is required as the state is cached so this method # must be called any time the existence of the file changes. self.__prepare_completed = os.path.isfile( guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME)) def begin_install(self): """First call of the DB prepare.""" prepare_start_file = guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_START_FILENAME) operating_system.write_file(prepare_start_file, '') self.__refresh_prepare_completed() self.set_status(instance.ServiceStatuses.BUILDING, True) def begin_restart(self): """Called before restarting DB server.""" self.restart_mode = True def end_install(self, error_occurred=False, post_processing=False): """Called after prepare has ended.""" # Set the "we're done" flag if there's no error and # no post_processing is necessary if not (error_occurred or post_processing): prepare_end_file = guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME) operating_system.write_file(prepare_end_file, '') self.__refresh_prepare_completed() final_status = None if error_occurred: final_status = instance.ServiceStatuses.FAILED elif post_processing: final_status = instance.ServiceStatuses.INSTANCE_READY if final_status: LOG.info(_("Set final status to %s.") % final_status) self.set_status(final_status, force=True) else: self._end_install_or_restart(True) def end_restart(self): self.restart_mode = False LOG.info(_("Ending restart.")) self._end_install_or_restart(False) def _end_install_or_restart(self, force): """Called after DB is installed or restarted. Updates the database with the actual DB server status. """ real_status = self._get_actual_db_status() LOG.info(_("Current database status is '%s'.") % real_status) self.set_status(real_status, force=force) def _get_actual_db_status(self): raise NotImplementedError() @property def is_installed(self): """ True if DB app should be installed and attempts to ascertain its status won't result in nonsense. """ return self.prepare_completed @property def _is_restarting(self): return self.restart_mode @property def is_running(self): """True if DB server is running.""" return (self.status is not None and self.status == instance.ServiceStatuses.RUNNING) def set_status(self, status, force=False): """Use conductor to update the DB app status.""" if force or self.is_installed: LOG.debug("Casting set_status message to conductor " "(status is '%s')." % status.description) context = trove_context.TroveContext() heartbeat = {'service_status': status.description} conductor_api.API(context).heartbeat( CONF.guest_id, heartbeat, sent=timeutils.float_utcnow()) LOG.debug("Successfully cast set_status.") self.status = status else: LOG.debug("Prepare has not completed yet, skipping heartbeat.") def update(self): """Find and report status of DB on this machine. The database is updated and the status is also returned. """ if self.is_installed and not self._is_restarting: LOG.debug("Determining status of DB server.") status = self._get_actual_db_status() self.set_status(status) else: LOG.info(_("DB server is not installed or is in restart mode, so " "for now we'll skip determining the status of DB on " "this instance.")) def restart_db_service(self, service_candidates, timeout): """Restart the database. Do not change the service auto-start setting. Disable the Trove instance heartbeat updates during the restart. 1. Stop the database service. 2. Wait for the database to shutdown. 3. Start the database service. 4. Wait for the database to start running. :param service_candidates: List of possible system service names. :type service_candidates: list :param timeout: Wait timeout in seconds. :type timeout: integer :raises: :class:`RuntimeError` on failure. """ try: self.begin_restart() self.stop_db_service(service_candidates, timeout, disable_on_boot=False, update_db=False) self.start_db_service(service_candidates, timeout, enable_on_boot=False, update_db=False) except Exception as e: LOG.exception(e) raise RuntimeError(_("Database restart failed.")) finally: self.end_restart() def start_db_service(self, service_candidates, timeout, enable_on_boot=True, update_db=False): """Start the database service and wait for the database to become available. The service auto-start will be updated only if the service command succeeds. :param service_candidates: List of possible system service names. :type service_candidates: list :param timeout: Wait timeout in seconds. :type timeout: integer :param enable_on_boot: Enable service auto-start. The auto-start setting will be updated only if the service command succeeds. :type enable_on_boot: boolean :param update_db: Suppress the Trove instance heartbeat. :type update_db: boolean :raises: :class:`RuntimeError` on failure. """ LOG.info(_("Starting database service.")) operating_system.start_service(service_candidates) self.wait_for_database_service_start(timeout, update_db=update_db) if enable_on_boot: LOG.info(_("Enable service auto-start on boot.")) operating_system.enable_service_on_boot(service_candidates) def wait_for_database_service_start(self, timeout, update_db=False): """Wait for the database to become available. :param timeout: Wait timeout in seconds. :type timeout: integer :param update_db: Suppress the Trove instance heartbeat. :type update_db: boolean :raises: :class:`RuntimeError` on failure. """ LOG.debug("Waiting for database to start up.") if not self._wait_for_database_service_status( instance.ServiceStatuses.RUNNING, timeout, update_db): raise RuntimeError(_("Database failed to start.")) LOG.info(_("Database has started successfully.")) def stop_db_service(self, service_candidates, timeout, disable_on_boot=False, update_db=False): """Stop the database service and wait for the database to shutdown. :param service_candidates: List of possible system service names. :type service_candidates: list :param timeout: Wait timeout in seconds. :type timeout: integer :param disable_on_boot: Disable service auto-start. The auto-start setting will be updated only if the service command succeeds. :type disable_on_boot: boolean :param update_db: Suppress the Trove instance heartbeat. :type update_db: boolean :raises: :class:`RuntimeError` on failure. """ LOG.info(_("Stopping database service.")) operating_system.stop_service(service_candidates) LOG.debug("Waiting for database to shutdown.") if not self._wait_for_database_service_status( instance.ServiceStatuses.SHUTDOWN, timeout, update_db): raise RuntimeError(_("Database failed to stop.")) LOG.info(_("Database has stopped successfully.")) if disable_on_boot: LOG.info(_("Disable service auto-start on boot.")) operating_system.disable_service_on_boot(service_candidates) def _wait_for_database_service_status(self, status, timeout, update_db): """Wait for the given database status. :param status: The status to wait for. :type status: BaseDbStatus :param timeout: Wait timeout in seconds. :type timeout: integer :param update_db: Suppress the Trove instance heartbeat. :type update_db: boolean :returns: True on success, False otherwise. """ if not self.wait_for_real_status_to_change_to( status, timeout, update_db): LOG.info(_("Service status did not change to %(status)s " "within the given timeout: %(timeout)ds") % {'status': status, 'timeout': timeout}) LOG.debug("Attempting to cleanup stalled services.") try: self.cleanup_stalled_db_services() except Exception: LOG.debug("Cleanup failed.", exc_info=True) return False return True def wait_for_real_status_to_change_to(self, status, max_time, update_db=False): """Waits the given time for the real status to change to the one specified. The internal status is always updated. The public instance state stored in the Trove database is updated only if "update_db" is True. """ end_time = time.time() + max_time # since python does not support a real do-while loop, we have # to emulate one. Hence these shenanigans. We force at least # one pass into the loop and therefore it is safe that # actual_status is initialized in the loop while it is used # outside. loop = True while loop: self.status = self._get_actual_db_status() if self.status == status: if update_db: self.set_status(self.status) return True # should we remain in this loop? this is the thing # that emulates the do-while construct. loop = (time.time() < end_time) # no point waiting if our time is up and we're # just going to error out anyway. if loop: LOG.debug("Waiting for DB status to change from " "%(actual_status)s to %(status)s." % {"actual_status": self.status, "status": status}) time.sleep(CONF.state_change_poll_time) LOG.error(_("Timeout while waiting for database status to change." "Expected state %(status)s, " "current state is %(actual_status)s") % {"status": status, "actual_status": self.status}) return False def cleanup_stalled_db_services(self): """An optional datastore-specific code to cleanup stalled database services and other resources after a status change timeout. """ LOG.debug("No cleanup action specified for this datastore.") def report_root(self, context, user): """Use conductor to update the root-enable status.""" LOG.debug("Casting report_root message to conductor.") conductor_api.API(context).report_root(CONF.guest_id, user) LOG.debug("Successfully cast report_root.") trove-5.0.0/trove/guestagent/datastore/technical-preview/0000775000567000056710000000000012701410521024711 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/technical-preview/__init__.py0000664000567000056710000000000012701410316027012 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/mysql/0000775000567000056710000000000012701410521022445 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/mysql/__init__.py0000664000567000056710000000000012701410316024546 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/mysql/manager.py0000664000567000056710000000252612701410316024440 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_utils import importutils from trove.guestagent.datastore.mysql_common import manager MYSQL_APP = "trove.guestagent.datastore.mysql.service.MySqlApp" MYSQL_APP_STATUS = "trove.guestagent.datastore.mysql.service.MySqlAppStatus" MYSQL_ADMIN = "trove.guestagent.datastore.mysql.service.MySqlAdmin" class Manager(manager.MySqlManager): def __init__(self): mysql_app = importutils.import_class(MYSQL_APP) mysql_app_status = importutils.import_class(MYSQL_APP_STATUS) mysql_admin = importutils.import_class(MYSQL_ADMIN) super(Manager, self).__init__(mysql_app, mysql_app_status, mysql_admin) trove-5.0.0/trove/guestagent/datastore/mysql/service.py0000664000567000056710000000714612701410316024471 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common.i18n import _ from trove.guestagent.datastore.mysql_common import service LOG = logging.getLogger(__name__) CONF = service.CONF class KeepAliveConnection(service.BaseKeepAliveConnection): pass class MySqlAppStatus(service.BaseMySqlAppStatus): pass class LocalSqlClient(service.BaseLocalSqlClient): pass class MySqlApp(service.BaseMySqlApp): def __init__(self, status): super(MySqlApp, self).__init__(status, LocalSqlClient, KeepAliveConnection) # DEPRECATED: Mantain for API Compatibility def get_txn_count(self): LOG.info(_("Retrieving latest txn id.")) txn_count = 0 with self.local_sql_client(self.get_engine()) as client: result = client.execute('SELECT @@global.gtid_executed').first() for uuid_set in result[0].split(','): for interval in uuid_set.split(':')[1:]: if '-' in interval: iparts = interval.split('-') txn_count += int(iparts[1]) - int(iparts[0]) else: txn_count += 1 return txn_count def _get_slave_status(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SHOW SLAVE STATUS').first() def _get_master_UUID(self): slave_status = self._get_slave_status() return slave_status and slave_status['Master_UUID'] or None def _get_gtid_executed(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SELECT @@global.gtid_executed').first()[0] def get_last_txn(self): master_UUID = self._get_master_UUID() last_txn_id = '0' gtid_executed = self._get_gtid_executed() for gtid_set in gtid_executed.split(','): uuid_set = gtid_set.split(':') if uuid_set[0] == master_UUID: last_txn_id = uuid_set[-1].split('-')[-1] break return master_UUID, int(last_txn_id) def get_latest_txn_id(self): LOG.info(_("Retrieving latest txn id.")) return self._get_gtid_executed() def wait_for_txn(self, txn): LOG.info(_("Waiting on txn '%s'.") % txn) with self.local_sql_client(self.get_engine()) as client: client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')" % txn) class MySqlRootAccess(service.BaseMySqlRootAccess): def __init__(self): super(MySqlRootAccess, self).__init__(LocalSqlClient, MySqlApp(MySqlAppStatus.get())) class MySqlAdmin(service.BaseMySqlAdmin): def __init__(self): super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(), MySqlApp) get_engine = MySqlApp.get_engine trove-5.0.0/trove/guestagent/datastore/experimental/0000775000567000056710000000000012701410521023775 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/percona/0000775000567000056710000000000012701410521025424 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/percona/__init__.py0000664000567000056710000000000012701410316027525 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/percona/manager.py0000664000567000056710000000253712701410316027421 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_utils import importutils from trove.guestagent.datastore.mysql_common import manager MYSQL_APP = ("trove.guestagent.datastore.experimental.percona.service." "MySqlApp") MYSQL_APP_STATUS = ("trove.guestagent.datastore.experimental.percona.service." "MySqlAppStatus") MYSQL_ADMIN = ("trove.guestagent.datastore.experimental.percona.service." "MySqlAdmin") class Manager(manager.MySqlManager): def __init__(self): mysql_app = importutils.import_class(MYSQL_APP) mysql_app_status = importutils.import_class(MYSQL_APP_STATUS) mysql_admin = importutils.import_class(MYSQL_ADMIN) super(Manager, self).__init__(mysql_app, mysql_app_status, mysql_admin) trove-5.0.0/trove/guestagent/datastore/experimental/percona/service.py0000664000567000056710000000543612701410316027450 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.guestagent.datastore.mysql_common import service LOG = logging.getLogger(__name__) class KeepAliveConnection(service.BaseKeepAliveConnection): pass class MySqlAppStatus(service.BaseMySqlAppStatus): pass class LocalSqlClient(service.BaseLocalSqlClient): pass class MySqlApp(service.BaseMySqlApp): def __init__(self, status): super(MySqlApp, self).__init__(status, LocalSqlClient, KeepAliveConnection) def _get_slave_status(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SHOW SLAVE STATUS').first() def _get_master_UUID(self): slave_status = self._get_slave_status() return slave_status and slave_status['Master_UUID'] or None def _get_gtid_executed(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SELECT @@global.gtid_executed').first()[0] def get_last_txn(self): master_UUID = self._get_master_UUID() last_txn_id = '0' gtid_executed = self._get_gtid_executed() for gtid_set in gtid_executed.split(','): uuid_set = gtid_set.split(':') if uuid_set[0] == master_UUID: last_txn_id = uuid_set[-1].split('-')[-1] break return master_UUID, int(last_txn_id) def get_latest_txn_id(self): LOG.info(_("Retrieving latest txn id.")) return self._get_gtid_executed() def wait_for_txn(self, txn): LOG.info(_("Waiting on txn '%s'.") % txn) with self.local_sql_client(self.get_engine()) as client: client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')" % txn) class MySqlRootAccess(service.BaseMySqlRootAccess): def __init__(self): super(MySqlRootAccess, self).__init__(LocalSqlClient, MySqlApp(MySqlAppStatus.get())) class MySqlAdmin(service.BaseMySqlAdmin): def __init__(self): super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(), MySqlApp) trove-5.0.0/trove/guestagent/datastore/experimental/mariadb/0000775000567000056710000000000012701410521025374 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/mariadb/__init__.py0000664000567000056710000000000012701410316027475 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/mariadb/manager.py0000664000567000056710000000213712701410316027365 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.guestagent.datastore.experimental.mariadb import ( service as mariadb_service) from trove.guestagent.datastore.galera_common import manager as galera_manager from trove.guestagent.datastore.mysql_common import service as mysql_service class Manager(galera_manager.GaleraManager): def __init__(self): super(Manager, self).__init__( mariadb_service.MariaDBApp, mysql_service.BaseMySqlAppStatus, mariadb_service.MariaDBAdmin) trove-5.0.0/trove/guestagent/datastore/experimental/mariadb/service.py0000664000567000056710000000657712701410316027427 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.guestagent.datastore.galera_common import service as galera_service from trove.guestagent.datastore.mysql_common import service as mysql_service LOG = logging.getLogger(__name__) class MariaDBApp(galera_service.GaleraApp): def __init__(self, status): super(MariaDBApp, self).__init__( status, mysql_service.BaseLocalSqlClient, mysql_service.BaseKeepAliveConnection) @property def mysql_service(self): result = super(MariaDBApp, self).mysql_service if result['type'] == 'sysvinit': result['cmd_bootstrap_galera_cluster'] = ( "sudo service %s bootstrap" % result['service']) elif result['type'] == 'systemd': # TODO(mwj 2016/01/28): determine RHEL start for MariaDB Cluster result['cmd_bootstrap_galera_cluster'] = ( "sudo systemctl start %s@bootstrap.service" % result['service']) return result @property def cluster_configuration(self): return self.configuration_manager.get_value('galera') def _get_slave_status(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SHOW SLAVE STATUS').first() def _get_master_UUID(self): slave_status = self._get_slave_status() return slave_status and slave_status['Master_Server_Id'] or None def _get_gtid_executed(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SELECT @@global.gtid_binlog_pos').first()[0] def get_last_txn(self): master_UUID = self._get_master_UUID() last_txn_id = '0' gtid_executed = self._get_gtid_executed() for gtid_set in gtid_executed.split(','): uuid_set = gtid_set.split('-') if uuid_set[1] == master_UUID: last_txn_id = uuid_set[-1] break return master_UUID, int(last_txn_id) def get_latest_txn_id(self): LOG.info(_("Retrieving latest txn id.")) return self._get_gtid_executed() def wait_for_txn(self, txn): LOG.info(_("Waiting on txn '%s'.") % txn) with self.local_sql_client(self.get_engine()) as client: client.execute("SELECT MASTER_GTID_WAIT('%s')" % txn) class MariaDBRootAccess(mysql_service.BaseMySqlRootAccess): def __init__(self): super(MariaDBRootAccess, self).__init__( mysql_service.BaseLocalSqlClient, MariaDBApp(mysql_service.BaseMySqlAppStatus.get())) class MariaDBAdmin(mysql_service.BaseMySqlAdmin): def __init__(self): super(MariaDBAdmin, self).__init__( mysql_service.BaseLocalSqlClient, MariaDBRootAccess(), MariaDBApp) trove-5.0.0/trove/guestagent/datastore/experimental/mongodb/0000775000567000056710000000000012701410521025422 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/mongodb/system.py0000664000567000056710000000340212701410316027321 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os import path from trove.guestagent.common import operating_system from trove.guestagent import pkg OS_NAME = operating_system.get_os() MONGODB_MOUNT_POINT = "/var/lib/mongodb" MONGO_PID_FILE = '/var/run/mongodb/mongodb.pid' MONGO_LOG_FILE = '/var/log/mongodb/mongod.log' CONFIG_CANDIDATES = ["/etc/mongodb.conf", "/etc/mongod.conf"] MONGO_ADMIN_NAME = 'os_admin' MONGO_ADMIN_ROLES = [{'db': 'admin', 'role': 'userAdminAnyDatabase'}, {'db': 'admin', 'role': 'dbAdminAnyDatabase'}, {'db': 'admin', 'role': 'clusterAdmin'}, {'db': 'admin', 'role': 'readWriteAnyDatabase'}] MONGO_ADMIN_CREDS_FILE = path.join(path.expanduser('~'), '.os_mongo_admin_creds.json') MONGO_KEY_FILE = '/etc/mongo_key' MONGOS_SERVICE_CANDIDATES = ["mongos"] MONGOD_SERVICE_CANDIDATES = ["mongodb", "mongod"] MONGODB_KILL = "sudo kill %s" FIND_PID = "ps xau | grep 'mongo[ds]'" TIME_OUT = 1000 MONGO_USER = {operating_system.REDHAT: "mongod", operating_system.DEBIAN: "mongodb", operating_system.SUSE: "mongod"}[OS_NAME] PACKAGER = pkg.Package() trove-5.0.0/trove/guestagent/datastore/experimental/mongodb/__init__.py0000664000567000056710000000000012701410316027523 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/mongodb/manager.py0000664000567000056710000002454212701410316027417 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common.i18n import _ from trove.common import instance as ds_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import service from trove.guestagent.datastore.experimental.mongodb import system from trove.guestagent.datastore import manager from trove.guestagent import dbaas from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): def __init__(self): self.app = service.MongoDBApp() super(Manager, self).__init__('mongodb') @property def status(self): return self.app.status @property def configuration_manager(self): return self.app.configuration_manager def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) self.status.wait_for_database_service_start( self.app.state_change_wait_time) self.app.stop_db() self.app.clear_storage() mount_point = system.MONGODB_MOUNT_POINT if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(system.MONGODB_MOUNT_POINT): device.migrate_data(mount_point) device.mount(mount_point) operating_system.chown(mount_point, system.MONGO_USER, system.MONGO_USER, as_root=True) LOG.debug("Mounted the volume %(path)s as %(mount)s." % {'path': device_path, "mount": mount_point}) if config_contents: # Save resolved configuration template first. self.app.configuration_manager.save_configuration(config_contents) # Apply guestagent specific configuration changes. self.app.apply_initial_guestagent_configuration( cluster_config, mount_point) if not cluster_config: # Create the Trove admin user. self.app.secure() # Don't start mongos until add_config_servers is invoked, # don't start members as they should already be running. if not (self.app.is_query_router or self.app.is_cluster_member): self.app.start_db(update_db=True) if not cluster_config and backup_info: self._perform_restore(backup_info, context, mount_point, self.app) if service.MongoDBAdmin().is_root_enabled(): self.app.status.report_root(context, 'root') def restart(self, context): LOG.debug("Restarting MongoDB.") self.app.restart() def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting MongoDB with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): LOG.debug("Stopping MongoDB.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" LOG.debug("Getting file system status.") # TODO(peterstac) - why is this hard-coded? return dbaas.get_filesystem_volume_stats(system.MONGODB_MOUNT_POINT) def change_passwords(self, context, users): LOG.debug("Changing password.") with EndNotification(context): return service.MongoDBAdmin().change_passwords(users) def update_attributes(self, context, username, hostname, user_attrs): LOG.debug("Updating database attributes.") with EndNotification(context): return service.MongoDBAdmin().update_attributes(username, user_attrs) def create_database(self, context, databases): LOG.debug("Creating database(s).") with EndNotification(context): return service.MongoDBAdmin().create_database(databases) def create_user(self, context, users): LOG.debug("Creating user(s).") with EndNotification(context): return service.MongoDBAdmin().create_users(users) def delete_database(self, context, database): LOG.debug("Deleting database.") with EndNotification(context): return service.MongoDBAdmin().delete_database(database) def delete_user(self, context, user): LOG.debug("Deleting user.") with EndNotification(context): return service.MongoDBAdmin().delete_user(user) def get_user(self, context, username, hostname): LOG.debug("Getting user.") return service.MongoDBAdmin().get_user(username) def grant_access(self, context, username, hostname, databases): LOG.debug("Granting acccess.") return service.MongoDBAdmin().grant_access(username, databases) def revoke_access(self, context, username, hostname, database): LOG.debug("Revoking access.") return service.MongoDBAdmin().revoke_access(username, database) def list_access(self, context, username, hostname): LOG.debug("Listing access.") return service.MongoDBAdmin().list_access(username) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing databases.") return service.MongoDBAdmin().list_databases(limit, marker, include_marker) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing users.") return service.MongoDBAdmin().list_users(limit, marker, include_marker) def enable_root(self, context): LOG.debug("Enabling root.") return service.MongoDBAdmin().enable_root() def enable_root_with_password(self, context, root_password=None): return service.MongoDBAdmin().enable_root(root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return service.MongoDBAdmin().is_root_enabled() def _perform_restore(self, backup_info, context, restore_location, app): LOG.info(_("Restoring database from backup %s.") % backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception(_("Error performing restore from backup %s.") % backup_info['id']) self.status.set_status(ds_instance.ServiceStatuses.FAILED) raise LOG.info(_("Restored database successfully.")) def create_backup(self, context, backup_info): LOG.debug("Creating backup.") with EndNotification(context): backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self.app.remove_overrides() else: self.app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): LOG.debug("Overrides will be applied after restart.") pass def add_members(self, context, members): try: LOG.debug("add_members called.") LOG.debug("args: members=%s." % members) self.app.add_members(members) LOG.debug("add_members call has finished.") except Exception: self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) raise def add_config_servers(self, context, config_servers): try: LOG.debug("add_config_servers called.") LOG.debug("args: config_servers=%s." % config_servers) self.app.add_config_servers(config_servers) LOG.debug("add_config_servers call has finished.") except Exception: self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) raise def add_shard(self, context, replica_set_name, replica_set_member): try: LOG.debug("add_shard called.") LOG.debug("args: replica_set_name=%s, replica_set_member=%s." % (replica_set_name, replica_set_member)) self.app.add_shard(replica_set_name, replica_set_member) LOG.debug("add_shard call has finished.") except Exception: self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) raise def get_key(self, context): # Return the cluster key LOG.debug("Getting the cluster key.") return self.app.get_key() def prep_primary(self, context): LOG.debug("Preparing to be primary member.") self.app.prep_primary() def create_admin_user(self, context, password): self.app.create_admin_user(password) def store_admin_password(self, context, password): self.app.store_admin_password(password) def get_replica_set_name(self, context): # Return this nodes replica set name LOG.debug("Getting the replica set name.") return self.app.replica_set_name def get_admin_password(self, context): # Return the admin password from this instance LOG.debug("Getting the admin password.") return self.app.admin_password def is_shard_active(self, context, replica_set_name): return self.app.is_shard_active(replica_set_name) trove-5.0.0/trove/guestagent/datastore/experimental/mongodb/service.py0000664000567000056710000010375312701410320027442 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from oslo_utils import netutils import pymongo from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance as ds_instance from trove.common import pagination from trove.common.stream_codecs import JsonCodec, SafeYamlCodec from trove.common import utils as utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import system from trove.guestagent.datastore import service from trove.guestagent.db import models LOG = logging.getLogger(__name__) CONF = cfg.CONF CONFIG_FILE = operating_system.file_discovery(system.CONFIG_CANDIDATES) MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mongodb' # Configuration group for clustering-related settings. CNF_CLUSTER = 'clustering' MONGODB_PORT = CONF.mongodb.mongodb_port CONFIGSVR_PORT = CONF.mongodb.configsvr_port class MongoDBApp(object): """Prepares DBaaS on a Guest container.""" def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time revision_dir = guestagent_utils.build_file_path( os.path.dirname(CONFIG_FILE), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self.configuration_manager = ConfigurationManager( CONFIG_FILE, system.MONGO_USER, system.MONGO_USER, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) self.is_query_router = False self.is_cluster_member = False self.status = MongoDBAppStatus() def install_if_needed(self, packages): """Prepare the guest machine with a MongoDB installation.""" LOG.info(_("Preparing Guest as MongoDB.")) if not system.PACKAGER.pkg_is_installed(packages): LOG.debug("Installing packages: %s." % str(packages)) system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT) LOG.info(_("Finished installing MongoDB server.")) def _get_service_candidates(self): if self.is_query_router: return system.MONGOS_SERVICE_CANDIDATES return system.MONGOD_SERVICE_CANDIDATES def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( self._get_service_candidates(), self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( self._get_service_candidates(), self.state_change_wait_time) def start_db(self, update_db=False): self.status.start_db_service( self._get_service_candidates(), self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def remove_overrides(self): self.configuration_manager.remove_user_override() def start_db_with_conf_changes(self, config_contents): LOG.info(_('Starting MongoDB with configuration changes.')) if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info(_("Initiating config.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration( None, mount_point=system.MONGODB_MOUNT_POINT) self.start_db(True) def apply_initial_guestagent_configuration( self, cluster_config, mount_point=None): LOG.debug("Applying initial configuration.") # Mongodb init scripts assume the PID-file path is writable by the # database service. # See: https://jira.mongodb.org/browse/SERVER-20075 self._initialize_writable_run_dir() self.configuration_manager.apply_system_override( {'processManagement.fork': False, 'processManagement.pidFilePath': system.MONGO_PID_FILE, 'systemLog.destination': 'file', 'systemLog.path': system.MONGO_LOG_FILE, 'systemLog.logAppend': True }) if mount_point: self.configuration_manager.apply_system_override( {'storage.dbPath': mount_point}) if cluster_config is not None: self._configure_as_cluster_instance(cluster_config) else: self._configure_network(MONGODB_PORT) def _initialize_writable_run_dir(self): """Create a writable directory for Mongodb's runtime data (e.g. PID-file). """ mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE) LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir) operating_system.create_directory( mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True) def _configure_as_cluster_instance(self, cluster_config): """Configure this guest as a cluster instance and return its new status. """ if cluster_config['instance_type'] == "query_router": self._configure_as_query_router() elif cluster_config["instance_type"] == "config_server": self._configure_as_config_server() elif cluster_config["instance_type"] == "member": self._configure_as_cluster_member( cluster_config['replica_set_name']) else: LOG.error(_("Bad cluster configuration; instance type " "given as %s.") % cluster_config['instance_type']) return ds_instance.ServiceStatuses.FAILED if 'key' in cluster_config: self._configure_cluster_security(cluster_config['key']) def _configure_as_query_router(self): LOG.info(_("Configuring instance as a cluster query router.")) self.is_query_router = True # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict( self.configuration_manager.parse_configuration()) if 'storage' in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config['storage'] self.configuration_manager.save_configuration( guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override( {'sharding.configDB': ''}, CNF_CLUSTER) def _configure_as_config_server(self): LOG.info(_("Configuring instance as a cluster config server.")) self._configure_network(CONFIGSVR_PORT) self.configuration_manager.apply_system_override( {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER) def _configure_as_cluster_member(self, replica_set_name): LOG.info(_("Configuring instance as a cluster member.")) self.is_cluster_member = True self._configure_network(MONGODB_PORT) # we don't want these thinking they are in a replica set yet # as that would prevent us from creating the admin user, # so start mongo before updating the config. # mongo will be started by the cluster taskmanager self.start_db() self.configuration_manager.apply_system_override( {'replication.replSetName': replica_set_name}, CNF_CLUSTER) def _configure_cluster_security(self, key_value): """Force cluster key-file-based authentication. This will enabled RBAC. """ # Store the cluster member authentication key. self.store_key(key_value) self.configuration_manager.apply_system_override( {'security.clusterAuthMode': 'keyFile', 'security.keyFile': self.get_key_file()}, CNF_CLUSTER) def _configure_network(self, port=None): """Make the service accessible at a given (or default if not) port. """ instance_ip = netutils.get_my_ipv4() bind_interfaces_string = ','.join([instance_ip, '127.0.0.1']) options = {'net.bindIp': bind_interfaces_string} if port is not None: guestagent_utils.update_dict({'net.port': port}, options) self.configuration_manager.apply_system_override(options) self.status.set_host(instance_ip, port=port) def clear_storage(self): mount_point = "/var/lib/mongodb/*" LOG.debug("Clearing storage at %s." % mount_point) try: operating_system.remove(mount_point, force=True, as_root=True) except exception.ProcessExecutionError: LOG.exception(_("Error clearing storage.")) def _has_config_db(self): value_string = self.configuration_manager.get_value( 'sharding', {}).get('configDB') return value_string is not None # FIXME(pmalik): This method should really be called 'set_config_servers'. # The current name suggests it adds more config servers, but it # rather replaces the existing ones. def add_config_servers(self, config_server_hosts): """Set config servers on a query router (mongos) instance. """ config_servers_string = ','.join(['%s:27019' % host for host in config_server_hosts]) LOG.info(_("Setting config servers: %s") % config_servers_string) self.configuration_manager.apply_system_override( {'sharding.configDB': config_servers_string}, CNF_CLUSTER) self.start_db(True) def add_shard(self, replica_set_name, replica_set_member): """ This method is used by query router (mongos) instances. """ url = "%(rs)s/%(host)s:%(port)s"\ % {'rs': replica_set_name, 'host': replica_set_member, 'port': MONGODB_PORT} MongoDBAdmin().add_shard(url) def add_members(self, members): """ This method is used by a replica-set member instance. """ def check_initiate_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() if((status["ok"] == 1) and (status["members"][0]["stateStr"] == "PRIMARY") and (status["myState"] == 1)): return True else: return False def check_rs_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() primary_count = 0 if status["ok"] != 1: return False if len(status["members"]) != (len(members) + 1): return False for rs_member in status["members"]: if rs_member["state"] not in [1, 2, 7]: return False if rs_member["health"] != 1: return False if rs_member["state"] == 1: primary_count += 1 return primary_count == 1 MongoDBAdmin().rs_initiate() # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_initiate_status, sleep_time=30, time_out=100) # add replica-set members MongoDBAdmin().rs_add_members(members) # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_rs_status, sleep_time=10, time_out=100) def _set_localhost_auth_bypass(self, enabled): """When active, the localhost exception allows connections from the localhost interface to create the first user on the admin database. The exception applies only when there are no users created in the MongoDB instance. """ self.configuration_manager.apply_system_override( {'setParameter': {'enableLocalhostAuthBypass': enabled}}) def list_all_dbs(self): return MongoDBAdmin().list_database_names() def db_data_size(self, db_name): schema = models.MongoDBSchema(db_name) return MongoDBAdmin().db_stats(schema.serialize())['dataSize'] def admin_cmd_auth_params(self): return MongoDBAdmin().cmd_admin_auth_params def get_key_file(self): return system.MONGO_KEY_FILE def get_key(self): return operating_system.read_file( system.MONGO_KEY_FILE, as_root=True).rstrip() def store_key(self, key): """Store the cluster key.""" LOG.debug('Storing key for MongoDB cluster.') operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True) def store_admin_password(self, password): LOG.debug('Storing admin password.') creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME, password=password) creds.write(system.MONGO_ADMIN_CREDS_FILE) return creds def create_admin_user(self, password): """Create the admin user while the localhost exception is active.""" LOG.debug('Creating the admin user.') creds = self.store_admin_password(password) user = models.MongoDBUser(name='admin.%s' % creds.username, password=creds.password) user.roles = system.MONGO_ADMIN_ROLES # the driver engine is already cached, but we need to change it it with MongoDBClient(None, host='localhost', port=MONGODB_PORT) as client: MongoDBAdmin().create_validated_user(user, client=client) # now revert to the normal engine self.status.set_host(host=netutils.get_my_ipv4(), port=MONGODB_PORT) LOG.debug('Created admin user.') def secure(self): """Create the Trove admin user. The service should not be running at this point. This will enable role-based access control (RBAC) by default. """ if self.status.is_running: raise RuntimeError(_("Cannot secure the instance. " "The service is still running.")) try: self.configuration_manager.apply_system_override( {'security.authorization': 'enabled'}) self._set_localhost_auth_bypass(True) self.start_db(update_db=False) password = utils.generate_random_password() self.create_admin_user(password) LOG.debug("MongoDB secure complete.") finally: self._set_localhost_auth_bypass(False) self.stop_db() def get_configuration_property(self, name, default=None): """Return the value of a MongoDB configuration property. """ return self.configuration_manager.get_value(name, default) def prep_primary(self): # Prepare the primary member of a replica set. password = utils.generate_random_password() self.create_admin_user(password) self.restart() @property def replica_set_name(self): return MongoDBAdmin().get_repl_status()['set'] @property def admin_password(self): creds = MongoDBCredentials() creds.read(system.MONGO_ADMIN_CREDS_FILE) return creds.password def is_shard_active(self, replica_set_name): shards = MongoDBAdmin().list_active_shards() if replica_set_name in [shard['_id'] for shard in shards]: LOG.debug('Replica set %s is active.' % replica_set_name) return True else: LOG.debug('Replica set %s is not active.' % replica_set_name) return False class MongoDBAppStatus(service.BaseDbStatus): def __init__(self, host='localhost', port=None): super(MongoDBAppStatus, self).__init__() self.set_host(host, port=port) def set_host(self, host, port=None): # This forces refresh of the 'pymongo' engine cached in the # MongoDBClient class. # Authentication is not required to check the server status. MongoDBClient(None, host=host, port=port) def _get_actual_db_status(self): try: with MongoDBClient(None) as client: client.server_info() return ds_instance.ServiceStatuses.RUNNING except (pymongo.errors.ServerSelectionTimeoutError, pymongo.errors.AutoReconnect): return ds_instance.ServiceStatuses.SHUTDOWN except Exception: LOG.exception(_("Error getting MongoDB status.")) return ds_instance.ServiceStatuses.SHUTDOWN def cleanup_stalled_db_services(self): out, err = utils.execute_with_timeout(system.FIND_PID, shell=True) pid = "".join(out.split(" ")[1:2]) utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True) class MongoDBAdmin(object): """Handles administrative tasks on MongoDB.""" # user is cached by making it a class attribute admin_user = None def _admin_user(self): if not type(self).admin_user: creds = MongoDBCredentials() creds.read(system.MONGO_ADMIN_CREDS_FILE) user = models.MongoDBUser( 'admin.%s' % creds.username, creds.password ) type(self).admin_user = user return type(self).admin_user def _is_modifiable_user(self, name): if ((name in cfg.get_ignored_users(manager=MANAGER)) or name == system.MONGO_ADMIN_NAME): return False return True @property def cmd_admin_auth_params(self): """Returns a list of strings that constitute MongoDB command line authentication parameters. """ user = self._admin_user() return ['--username', user.username, '--password', user.password, '--authenticationDatabase', user.database.name] def _create_user_with_client(self, user, client): """Run the add user command.""" client[user.database.name].add_user( user.username, password=user.password, roles=user.roles ) def create_validated_user(self, user, client=None): """Creates a user on their database. The caller should ensure that this action is valid. :param user: a MongoDBUser object """ LOG.debug('Creating user %s on database %s with roles %s.' % (user.username, user.database.name, str(user.roles))) if not user.password: raise exception.BadRequest(_("User's password is empty.")) if client: self._create_user_with_client(user, client) else: with MongoDBClient(self._admin_user()) as admin_client: self._create_user_with_client(user, admin_client) def create_users(self, users): """Create the given user(s). :param users: list of serialized user objects """ with MongoDBClient(self._admin_user()) as client: for item in users: user = models.MongoDBUser.deserialize_user(item) if not self._is_modifiable_user(user.name): LOG.warning('Skipping creation of user with reserved ' 'name %(user)s' % {'user': user.name}) elif self._get_user_record(user.name, client=client): LOG.warning('Skipping creation of user with pre-existing ' 'name %(user)s' % {'user': user.name}) else: self.create_validated_user(user, client=client) def delete_validated_user(self, user): """Deletes a user from their database. The caller should ensure that this action is valid. :param user: a MongoDBUser object """ LOG.debug('Deleting user %s from database %s.' % (user.username, user.database.name)) with MongoDBClient(self._admin_user()) as admin_client: admin_client[user.database.name].remove_user(user.username) def delete_user(self, user): """Delete the given user. :param user: a serialized user object """ user = models.MongoDBUser.deserialize_user(user) if not self._is_modifiable_user(user.name): raise exception.BadRequest(_( 'Cannot delete user with reserved name %(user)s') % {'user': user.name}) else: self.delete_validated_user(user) def _get_user_record(self, name, client=None): """Get the user's record.""" user = models.MongoDBUser(name) if not self._is_modifiable_user(user.name): LOG.warning('Skipping retrieval of user with reserved ' 'name %(user)s' % {'user': user.name}) return None if client: user_info = client.admin.system.users.find_one( {'user': user.username, 'db': user.database.name}) else: with MongoDBClient(self._admin_user()) as admin_client: user_info = admin_client.admin.system.users.find_one( {'user': user.username, 'db': user.database.name}) if not user_info: return None user.roles = user_info['roles'] return user def get_user(self, name): """Get information for the given user.""" LOG.debug('Getting user %s.' % name) user = self._get_user_record(name) if not user: return None return user.serialize() def list_users(self, limit=None, marker=None, include_marker=False): """Get a list of all users.""" users = [] with MongoDBClient(self._admin_user()) as admin_client: for user_info in admin_client.admin.system.users.find(): user = models.MongoDBUser(name=user_info['_id']) user.roles = user_info['roles'] if self._is_modifiable_user(user.name): users.append(user.serialize()) LOG.debug('users = ' + str(users)) return pagination.paginate_list(users, limit, marker, include_marker) def change_passwords(self, users): with MongoDBClient(self._admin_user()) as admin_client: for item in users: user = models.MongoDBUser.deserialize_user(item) if not self._is_modifiable_user(user.name): LOG.warning('Skipping password change for user with ' 'reserved name %(user)s.' % {'user': user.name}) return None LOG.debug('Changing password for user %(user)s' % {'user': user.name}) self._create_user_with_client(user, admin_client) def update_attributes(self, name, user_attrs): """Update user attributes.""" user = self._get_user_record(name) if not user: raise exception.BadRequest(_( 'Cannot update attributes for user %(user)s as it either does ' 'not exist or is a reserved user.') % {'user': name}) password = user_attrs.get('password') if password: user.password = password self.change_passwords([user.serialize()]) if user_attrs.get('name'): LOG.warning('Changing user name is not supported.') if user_attrs.get('host'): LOG.warning('Changing user host is not supported.') def enable_root(self, password=None): """Create a user 'root' with role 'root'.""" if not password: LOG.debug('Generating root user password.') password = utils.generate_random_password() root_user = models.MongoDBUser(name='admin.root', password=password) root_user.roles = {'db': 'admin', 'role': 'root'} self.create_validated_user(root_user) return root_user.serialize() def is_root_enabled(self): """Check if user 'admin.root' exists.""" with MongoDBClient(self._admin_user()) as admin_client: return bool(admin_client.admin.system.users.find_one( {'roles.role': 'root'} )) def _update_user_roles(self, user): with MongoDBClient(self._admin_user()) as admin_client: admin_client[user.database.name].add_user( user.username, roles=user.roles ) def grant_access(self, username, databases): """Adds the RW role to the user for each specified database.""" user = self._get_user_record(username) if not user: raise exception.BadRequest(_( 'Cannot grant access for reserved or non-existant user ' '%(user)s') % {'user': username}) for db_name in databases: # verify the database name models.MongoDBSchema(db_name) role = {'db': db_name, 'role': 'readWrite'} if role not in user.roles: LOG.debug('Adding role %s to user %s.' % (str(role), username)) user.roles = role else: LOG.debug('User %s already has role %s.' % (username, str(role))) LOG.debug('Updating user %s.' % username) self._update_user_roles(user) def revoke_access(self, username, database): """Removes the RW role from the user for the specified database.""" user = self._get_user_record(username) if not user: raise exception.BadRequest(_( 'Cannot revoke access for reserved or non-existant user ' '%(user)s') % {'user': username}) # verify the database name models.MongoDBSchema(database) role = {'db': database, 'role': 'readWrite'} LOG.debug('Removing role %s from user %s.' % (str(role), username)) user.revoke_role(role) LOG.debug('Updating user %s.' % username) self._update_user_roles(user) def list_access(self, username): """Returns a list of all databases for which the user has the RW role. """ user = self._get_user_record(username) if not user: raise exception.BadRequest(_( 'Cannot list access for reserved or non-existant user ' '%(user)s') % {'user': username}) return user.databases def create_database(self, databases): """Forces creation of databases. For each new database creates a dummy document in a dummy collection, then drops the collection. """ tmp = 'dummy' with MongoDBClient(self._admin_user()) as admin_client: for item in databases: db_name = models.MongoDBSchema.deserialize_schema(item).name LOG.debug('Creating MongoDB database %s' % db_name) db = admin_client[db_name] db[tmp].insert({'dummy': True}) db.drop_collection(tmp) def delete_database(self, database): """Deletes the database.""" with MongoDBClient(self._admin_user()) as admin_client: db_name = models.MongoDBSchema.deserialize_schema(database).name admin_client.drop_database(db_name) def list_database_names(self): """Get the list of database names.""" with MongoDBClient(self._admin_user()) as admin_client: return admin_client.database_names() def list_databases(self, limit=None, marker=None, include_marker=False): """Lists the databases.""" db_names = self.list_database_names() for hidden in cfg.get_ignored_dbs(manager=MANAGER): if hidden in db_names: db_names.remove(hidden) databases = [models.MongoDBSchema(db_name).serialize() for db_name in db_names] LOG.debug('databases = ' + str(databases)) return pagination.paginate_list(databases, limit, marker, include_marker) def add_shard(self, url): """Runs the addShard command.""" with MongoDBClient(self._admin_user()) as admin_client: admin_client.admin.command({'addShard': url}) def get_repl_status(self): """Runs the replSetGetStatus command.""" with MongoDBClient(self._admin_user()) as admin_client: status = admin_client.admin.command('replSetGetStatus') LOG.debug('Replica set status: %s' % status) return status def rs_initiate(self): """Runs the replSetInitiate command.""" with MongoDBClient(self._admin_user()) as admin_client: return admin_client.admin.command('replSetInitiate') def rs_add_members(self, members): """Adds the given members to the replication set.""" with MongoDBClient(self._admin_user()) as admin_client: # get the current config, add the new members, then save it config = admin_client.admin.command('replSetGetConfig')['config'] config['version'] += 1 next_id = max([m['_id'] for m in config['members']]) + 1 for member in members: config['members'].append({'_id': next_id, 'host': member}) next_id += 1 admin_client.admin.command('replSetReconfig', config) def db_stats(self, database, scale=1): """Gets the stats for the given database.""" with MongoDBClient(self._admin_user()) as admin_client: db_name = models.MongoDBSchema.deserialize_schema(database).name return admin_client[db_name].command('dbStats', scale=scale) def list_active_shards(self): """Get a list of shards active in this cluster.""" with MongoDBClient(self._admin_user()) as admin_client: return [shard for shard in admin_client.config.shards.find()] class MongoDBClient(object): """A wrapper to manage a MongoDB connection.""" # engine information is cached by making it a class attribute engine = {} def __init__(self, user, host=None, port=None): """Get the client. Specifying host and/or port updates cached values. :param user: MongoDBUser instance used to authenticate :param host: server address, defaults to localhost :param port: server port, defaults to 27017 :return: """ new_client = False self._logged_in = False if not type(self).engine: # no engine cached type(self).engine['host'] = (host if host else 'localhost') type(self).engine['port'] = (port if port else MONGODB_PORT) new_client = True elif host or port: LOG.debug("Updating MongoDB client.") if host: type(self).engine['host'] = host if port: type(self).engine['port'] = port new_client = True if new_client: host = type(self).engine['host'] port = type(self).engine['port'] LOG.debug("Creating MongoDB client to %(host)s:%(port)s." % {'host': host, 'port': port}) type(self).engine['client'] = pymongo.MongoClient(host=host, port=port, connect=False) self.session = type(self).engine['client'] if user: db_name = user.database.name LOG.debug("Authenticating MongoDB client on %s." % db_name) self._db = self.session[db_name] self._db.authenticate(user.username, password=user.password) self._logged_in = True def __enter__(self): return self.session def __exit__(self, exc_type, exc_value, traceback): LOG.debug("Disconnecting from MongoDB.") if self._logged_in: self._db.logout() self.session.close() class MongoDBCredentials(object): """Handles storing/retrieving credentials. Stored as json in files.""" def __init__(self, username=None, password=None): self.username = username self.password = password def read(self, filename): credentials = operating_system.read_file(filename, codec=JsonCodec()) self.username = credentials['username'] self.password = credentials['password'] def write(self, filename): credentials = {'username': self.username, 'password': self.password} operating_system.write_file(filename, credentials, codec=JsonCodec()) operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW) trove-5.0.0/trove/guestagent/datastore/experimental/cassandra/0000775000567000056710000000000012701410521025734 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/cassandra/__init__.py0000664000567000056710000000000012701410316030035 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/cassandra/manager.py0000664000567000056710000002475612701410316027740 0ustar jenkinsjenkins00000000000000# Copyright 2013 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common import instance as trove_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.datastore.experimental.cassandra import service from trove.guestagent.datastore.experimental.cassandra.service import ( CassandraAdmin ) from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(manager.Manager): def __init__(self): self._app = service.CassandraApp() self.__admin = CassandraAdmin(self.app.get_current_superuser()) super(Manager, self).__init__('cassandra') @property def status(self): return self.app.status @property def app(self): return self._app @property def admin(self): return self.__admin @property def configuration_manager(self): return self.app.configuration_manager def restart(self, context): self.app.restart() def start_db_with_conf_changes(self, context, config_contents): self.app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def reset_configuration(self, context, configuration): self.app.reset_configuration(configuration) def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) self.app.init_storage_structure(mount_point) if config_contents or device_path or backup_info: # FIXME(pmalik) Once the cassandra bug # https://issues.apache.org/jira/browse/CASSANDRA-2356 # is fixed, this code may have to be revisited. # # Cassandra generates system keyspaces on the first start. # The stored properties include the 'cluster_name', which once # saved cannot be easily changed without removing the system # tables. It is crucial that the service does not boot up in # the middle of the configuration procedure. # We wait here for the service to come up, stop it properly and # remove the generated keyspaces before proceeding with # configuration. If it does not start up within the time limit # we assume it is not going to and proceed with configuration # right away. LOG.debug("Waiting for database first boot.") if (self.app.status.wait_for_real_status_to_change_to( trove_instance.ServiceStatuses.RUNNING, CONF.state_change_wait_time, False)): LOG.debug("Stopping database prior to initial configuration.") self.app.stop_db() self.app._remove_system_tables() LOG.debug("Starting initial configuration.") if config_contents: LOG.debug("Applying configuration.") self.app.configuration_manager.save_configuration( config_contents) cluster_name = None if cluster_config: cluster_name = cluster_config.get('id', None) self.app.apply_initial_guestagent_configuration( cluster_name=cluster_name) if cluster_config: self.app.write_cluster_topology( cluster_config['dc'], cluster_config['rack'], prefer_local=True) if device_path: LOG.debug("Preparing data volume.") device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync exiting data LOG.debug("Migrating existing data.") device.migrate_data(mount_point) # mount the volume LOG.debug("Mounting new volume.") device.mount(mount_point) if not cluster_config: if backup_info: self._perform_restore(backup_info, context, mount_point) LOG.debug("Starting database with configuration changes.") self.app.start_db(update_db=False) if not self.app.has_user_config(): LOG.debug("Securing superuser access.") self.app.secure() self.app.restart() self.__admin = CassandraAdmin(self.app.get_current_superuser()) if not cluster_config and self.is_root_enabled(context): self.status.report_root(context, self.app.default_superuser_name) def change_passwords(self, context, users): with EndNotification(context): self.admin.change_passwords(context, users) def update_attributes(self, context, username, hostname, user_attrs): with EndNotification(context): self.admin.update_attributes(context, username, hostname, user_attrs) def create_database(self, context, databases): with EndNotification(context): self.admin.create_database(context, databases) def create_user(self, context, users): with EndNotification(context): self.admin.create_user(context, users) def delete_database(self, context, database): with EndNotification(context): self.admin.delete_database(context, database) def delete_user(self, context, user): with EndNotification(context): self.admin.delete_user(context, user) def get_user(self, context, username, hostname): return self.admin.get_user(context, username, hostname) def grant_access(self, context, username, hostname, databases): self.admin.grant_access(context, username, hostname, databases) def revoke_access(self, context, username, hostname, database): self.admin.revoke_access(context, username, hostname, database) def list_access(self, context, username, hostname): return self.admin.list_access(context, username, hostname) def list_databases(self, context, limit=None, marker=None, include_marker=False): return self.admin.list_databases(context, limit, marker, include_marker) def list_users(self, context, limit=None, marker=None, include_marker=False): return self.admin.list_users(context, limit, marker, include_marker) def enable_root(self, context): return self.app.enable_root() def enable_root_with_password(self, context, root_password=None): return self.app.enable_root(root_password=root_password) def disable_root(self, context): self.app.enable_root(root_password=None) def is_root_enabled(self, context): return self.app.is_root_enabled() def _perform_restore(self, backup_info, context, restore_location): LOG.info(_("Restoring database from backup %s.") % backup_info['id']) try: backup.restore(context, backup_info, restore_location) self.app._apply_post_restore_updates(backup_info) except Exception as e: LOG.error(e) LOG.error(_("Error performing restore from backup %s.") % backup_info['id']) self.app.status.set_status(trove_instance.ServiceStatuses.FAILED) raise LOG.info(_("Restored database successfully.")) def create_backup(self, context, backup_info): """ Entry point for initiating a backup for this instance. The call currently blocks guestagent until the backup is finished. :param backup_info: a dictionary containing the db instance id of the backup task, location, type, and other data. """ with EndNotification(context): backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self.app.remove_overrides() else: self.app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): """Configuration changes are made in the config YAML file and require restart, so this is a no-op. """ pass def get_data_center(self, context): return self.app.get_data_center() def get_rack(self, context): return self.app.get_rack() def set_seeds(self, context, seeds): self.app.set_seeds(seeds) def get_seeds(self, context): return self.app.get_seeds() def set_auto_bootstrap(self, context, enabled): self.app.set_auto_bootstrap(enabled) def node_cleanup_begin(self, context): self.app.node_cleanup_begin() def node_cleanup(self, context): self.app.node_cleanup() def node_decommission(self, context): self.app.node_decommission() def cluster_secure(self, context, password): os_admin = self.app.cluster_secure(password) self.__admin = CassandraAdmin(self.app.get_current_superuser()) return os_admin def get_admin_credentials(self, context): return self.app.get_admin_credentials() def store_admin_credentials(self, context, admin_credentials): self.app.store_admin_credentials(admin_credentials) self.__admin = CassandraAdmin(self.app.get_current_superuser()) trove-5.0.0/trove/guestagent/datastore/experimental/cassandra/service.py0000664000567000056710000014136512701410320027755 0ustar jenkinsjenkins00000000000000# Copyright 2013 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import stat from cassandra.auth import PlainTextAuthProvider from cassandra.cluster import Cluster from cassandra.cluster import NoHostAvailable from cassandra import OperationTimedOut from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common import pagination from trove.common.stream_codecs import IniCodec from trove.common.stream_codecs import PropertiesCodec from trove.common.stream_codecs import SafeYamlCodec from trove.common import utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore import service from trove.guestagent.db import models from trove.guestagent import pkg LOG = logging.getLogger(__name__) CONF = cfg.CONF MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'cassandra' packager = pkg.Package() class CassandraApp(object): """Prepares DBaaS on a Guest container.""" _ADMIN_USER = 'os_admin' _CONF_AUTH_SEC = 'authentication' _CONF_USR_KEY = 'username' _CONF_PWD_KEY = 'password' _CONF_DIR_MODS = stat.S_IRWXU _CONF_FILE_MODS = stat.S_IRUSR CASSANDRA_CONF_FILE = "cassandra.yaml" CASSANDRA_TOPOLOGY_FILE = 'cassandra-rackdc.properties' _TOPOLOGY_CODEC = PropertiesCodec( delimiter='=', unpack_singletons=True, string_mappings={ 'true': True, 'false': False}) CASSANDRA_KILL_CMD = "sudo killall java || true" def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time self.status = CassandraAppStatus(self.get_current_superuser()) revision_dir = guestagent_utils.build_file_path( os.path.dirname(self.cassandra_conf), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self.configuration_manager = ConfigurationManager( self.cassandra_conf, self.cassandra_owner, self.cassandra_owner, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) @property def service_candidates(self): return ['cassandra'] @property def cassandra_conf_dir(self): return { operating_system.REDHAT: "/etc/cassandra/default.conf/", operating_system.DEBIAN: "/etc/cassandra/", operating_system.SUSE: "/etc/cassandra/default.conf/" }[operating_system.get_os()] @property def cassandra_conf(self): return guestagent_utils.build_file_path(self.cassandra_conf_dir, self.CASSANDRA_CONF_FILE) @property def cassandra_topology(self): return guestagent_utils.build_file_path(self.cassandra_conf_dir, self.CASSANDRA_TOPOLOGY_FILE) @property def cassandra_owner(self): return 'cassandra' @property def cassandra_data_dir(self): return guestagent_utils.build_file_path( self.cassandra_working_dir, 'data') @property def cassandra_working_dir(self): return "/var/lib/cassandra" @property def default_superuser_name(self): return "cassandra" @property def default_superuser_password(self): return "cassandra" @property def default_superuser_pwd_hash(self): # Default 'salted_hash' value for 'cassandra' user on Cassandra 2.1. return "$2a$10$wPEVuXBU7WE2Uwzqq3t19ObRJyoKztzC/Doyfr0VtDmVXC4GDAV3e" @property def cqlsh_conf_path(self): return "~/.cassandra/cqlshrc" def install_if_needed(self, packages): """Prepare the guest machine with a Cassandra server installation.""" LOG.info(_("Preparing Guest as a Cassandra Server")) if not packager.pkg_is_installed(packages): self._install_db(packages) LOG.debug("Cassandra install_if_needed complete") def init_storage_structure(self, mount_point): try: operating_system.create_directory(mount_point, as_root=True) except exception.ProcessExecutionError: LOG.exception(_("Error while initiating storage structure.")) def start_db(self, update_db=False, enable_on_boot=True): self.status.start_db_service( self.service_candidates, self.state_change_wait_time, enable_on_boot=enable_on_boot, update_db=update_db) def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( self.service_candidates, self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( self.service_candidates, self.state_change_wait_time) def _install_db(self, packages): """Install Cassandra server""" LOG.debug("Installing Cassandra server.") packager.pkg_install(packages, None, 10000) LOG.debug("Finished installing Cassandra server") def _remove_system_tables(self): """ Clean up the system keyspace. System tables are initialized on the first boot. They store certain properties, such as 'cluster_name', that cannot be easily changed once afterwards. The system keyspace needs to be cleaned up first. The tables will be regenerated on the next startup. Make sure to also cleanup the commitlog and caches to avoid startup errors due to inconsistencies. The service should not be running at this point. """ if self.status.is_running: raise RuntimeError(_("Cannot remove system tables. " "The service is still running.")) LOG.info(_('Removing existing system tables.')) system_keyspace_dir = guestagent_utils.build_file_path( self.cassandra_data_dir, 'system') commitlog_file = guestagent_utils.build_file_path( self.cassandra_working_dir, 'commitlog') chaches_dir = guestagent_utils.build_file_path( self.cassandra_working_dir, 'saved_caches') operating_system.remove(system_keyspace_dir, force=True, recursive=True, as_root=True) operating_system.remove(commitlog_file, force=True, recursive=True, as_root=True) operating_system.remove(chaches_dir, force=True, recursive=True, as_root=True) operating_system.create_directory( system_keyspace_dir, user=self.cassandra_owner, group=self.cassandra_owner, force=True, as_root=True) operating_system.create_directory( commitlog_file, user=self.cassandra_owner, group=self.cassandra_owner, force=True, as_root=True) operating_system.create_directory( chaches_dir, user=self.cassandra_owner, group=self.cassandra_owner, force=True, as_root=True) def _apply_post_restore_updates(self, backup_info): """The service should not be running at this point. The restored database files carry some properties over from the original instance that need to be updated with appropriate values for the new instance. These include: - Reset the 'cluster_name' property to match the new unique ID of this instance. This is to ensure that the restored instance is a part of a new single-node cluster rather than forming a one with the original node. - Reset the administrator's password. The original password from the parent instance may be compromised or long lost. A general procedure is: - update the configuration property with the current value so that the service can start up - reset the superuser password - restart the service - change the cluster name - restart the service :seealso: _reset_admin_password :seealso: change_cluster_name """ if self.status.is_running: raise RuntimeError(_("Cannot reset the cluster name. " "The service is still running.")) LOG.debug("Applying post-restore updates to the database.") try: # Change the 'cluster_name' property to the current in-database # value so that the database can start up. self._update_cluster_name_property(backup_info['instance_id']) # Reset the superuser password so that we can log-in. self._reset_admin_password() # Start the database and update the 'cluster_name' to the # new value. self.start_db(update_db=False) self.change_cluster_name(CONF.guest_id) finally: self.stop_db() # Always restore the initial state of the service. def cluster_secure(self, password): return self.secure(password=password).serialize() def secure(self, update_user=None, password=None): """Configure the Trove administrative user. Update an existing user if given. Create a new one using the default database credentials otherwise and drop the built-in user when finished. """ LOG.info(_('Configuring Trove superuser.')) if password is None: password = utils.generate_random_password() admin_username = update_user.name if update_user else self._ADMIN_USER os_admin = models.CassandraUser(admin_username, password) if update_user: CassandraAdmin(update_user).alter_user_password(os_admin) else: cassandra = models.CassandraUser( self.default_superuser_name, self.default_superuser_password) CassandraAdmin(cassandra)._create_superuser(os_admin) CassandraAdmin(os_admin).drop_user(cassandra) self._update_admin_credentials(os_admin) return os_admin def _update_admin_credentials(self, user): self.__create_cqlsh_config({self._CONF_AUTH_SEC: {self._CONF_USR_KEY: user.name, self._CONF_PWD_KEY: user.password}}) # Update the internal status with the new user. self.status = CassandraAppStatus(user) def store_admin_credentials(self, admin_credentials): user = models.CassandraUser.deserialize_user(admin_credentials) self._update_admin_credentials(user) def get_admin_credentials(self): return self.get_current_superuser().serialize() def _reset_admin_password(self): """ Reset the password of the Trove's administrative superuser. The service should not be running at this point. A general password reset procedure is: - disable user authentication and remote access - restart the service - update the password in the 'system_auth.credentials' table - re-enable authentication and make the host reachable - restart the service """ if self.status.is_running: raise RuntimeError(_("Cannot reset the administrative password. " "The service is still running.")) try: # Disable automatic startup in case the node goes down before # we have the superuser secured. operating_system.disable_service_on_boot(self.service_candidates) self.__disable_remote_access() self.__disable_authentication() # We now start up the service and immediately re-enable # authentication in the configuration file (takes effect after # restart). # Then we reset the superuser password to its default value # and restart the service to get user functions back. self.start_db(update_db=False, enable_on_boot=False) self.__enable_authentication() os_admin = self.__reset_user_password_to_default(self._ADMIN_USER) self.status = CassandraAppStatus(os_admin) self.restart() # Now change the administrative password to a new secret value. self.secure(update_user=os_admin) finally: self.stop_db() # Always restore the initial state of the service. # At this point, we should have a secured database with new Trove-only # superuser password. # Proceed to re-enable remote access and automatic startup. self.__enable_remote_access() operating_system.enable_service_on_boot(self.service_candidates) def __reset_user_password_to_default(self, username): LOG.debug("Resetting the password of user '%s' to '%s'." % (username, self.default_superuser_password)) user = models.CassandraUser(username, self.default_superuser_password) with CassandraLocalhostConnection(user) as client: client.execute( "UPDATE system_auth.credentials SET salted_hash=%s " "WHERE username='{}';", (user.name,), (self.default_superuser_pwd_hash,)) return user def change_cluster_name(self, cluster_name): """Change the 'cluster_name' property of an exesting running instance. Cluster name is stored in the database and is required to match the configuration value. Cassandra fails to start otherwise. """ if not self.status.is_running: raise RuntimeError(_("Cannot change the cluster name. " "The service is not running.")) LOG.debug("Changing the cluster name to '%s'." % cluster_name) # Update the in-database value. self.__reset_cluster_name(cluster_name) # Update the configuration property. self._update_cluster_name_property(cluster_name) self.restart() def __reset_cluster_name(self, cluster_name): # Reset the in-database value stored locally on this node. current_superuser = self.get_current_superuser() with CassandraLocalhostConnection(current_superuser) as client: client.execute( "UPDATE system.local SET cluster_name = '{}' " "WHERE key='local';", (cluster_name,)) # Newer version of Cassandra require a flush to ensure the changes # to the local system keyspace persist. self.flush_tables('system', 'local') def __create_cqlsh_config(self, sections): config_path = self._get_cqlsh_conf_path() config_dir = os.path.dirname(config_path) if not os.path.exists(config_dir): os.mkdir(config_dir, self._CONF_DIR_MODS) else: os.chmod(config_dir, self._CONF_DIR_MODS) operating_system.write_file(config_path, sections, codec=IniCodec()) os.chmod(config_path, self._CONF_FILE_MODS) def get_current_superuser(self): """ Build the Trove superuser. Use the stored credentials. If not available fall back to the defaults. """ if self.has_user_config(): return self._load_current_superuser() LOG.warn(_("Trove administrative user has not been configured yet. " "Using the built-in default: %s") % self.default_superuser_name) return models.CassandraUser(self.default_superuser_name, self.default_superuser_password) def has_user_config(self): """ Return TRUE if there is a client configuration file available on the guest. """ return os.path.exists(self._get_cqlsh_conf_path()) def _load_current_superuser(self): config = operating_system.read_file(self._get_cqlsh_conf_path(), codec=IniCodec()) return models.CassandraUser( config[self._CONF_AUTH_SEC][self._CONF_USR_KEY], config[self._CONF_AUTH_SEC][self._CONF_PWD_KEY] ) def apply_initial_guestagent_configuration(self, cluster_name=None): """Update guestagent-controlled configuration properties. These changes to the default template are necessary in order to make the database service bootable and accessible in the guestagent context. :param cluster_name: The 'cluster_name' configuration property. Use the unique guest id by default. :type cluster_name: string """ self.configuration_manager.apply_system_override( {'data_file_directories': [self.cassandra_data_dir]}) self._make_host_reachable() self._update_cluster_name_property(cluster_name or CONF.guest_id) # A single-node instance may use the SimpleSnitch # (keyspaces use SimpleStrategy). # A network-aware snitch has to be used otherwise. if cluster_name is None: updates = {'endpoint_snitch': 'SimpleSnitch'} else: updates = {'endpoint_snitch': 'GossipingPropertyFileSnitch'} self.configuration_manager.apply_system_override(updates) def _make_host_reachable(self): """ Some of these settings may be overriden by user defined configuration groups. authenticator and authorizer - Necessary to enable users and permissions. rpc_address - Enable remote connections on all interfaces. broadcast_rpc_address - RPC address to broadcast to drivers and other clients. Must be set if rpc_address = 0.0.0.0 and can never be 0.0.0.0 itself. listen_address - The address on which the node communicates with other nodes. Can never be 0.0.0.0. seed_provider - A list of discovery contact points. """ self.__enable_authentication() self.__enable_remote_access() def __enable_remote_access(self): updates = { 'rpc_address': "0.0.0.0", 'broadcast_rpc_address': netutils.get_my_ipv4(), 'listen_address': netutils.get_my_ipv4(), 'seed_provider': {'parameters': [{'seeds': netutils.get_my_ipv4()}] } } self.configuration_manager.apply_system_override(updates) def __disable_remote_access(self): updates = { 'rpc_address': "127.0.0.1", 'listen_address': '127.0.0.1', 'seed_provider': {'parameters': [{'seeds': '127.0.0.1'}] } } self.configuration_manager.apply_system_override(updates) def __enable_authentication(self): updates = { 'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator', 'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer' } self.configuration_manager.apply_system_override(updates) def __disable_authentication(self): updates = { 'authenticator': 'org.apache.cassandra.auth.AllowAllAuthenticator', 'authorizer': 'org.apache.cassandra.auth.AllowAllAuthorizer' } self.configuration_manager.apply_system_override(updates) def _update_cluster_name_property(self, name): """This 'cluster_name' property prevents nodes from one logical cluster from talking to another. All nodes in a cluster must have the same value. """ self.configuration_manager.apply_system_override({'cluster_name': name}) def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def remove_overrides(self): self.configuration_manager.remove_user_override() def write_cluster_topology(self, data_center, rack, prefer_local=True): LOG.info(_('Saving Cassandra cluster topology configuration.')) config = {'dc': data_center, 'rack': rack, 'prefer_local': prefer_local} operating_system.write_file(self.cassandra_topology, config, codec=self._TOPOLOGY_CODEC, as_root=True) operating_system.chown( self.cassandra_topology, self.cassandra_owner, self.cassandra_owner, as_root=True) operating_system.chmod( self.cassandra_topology, FileMode.ADD_READ_ALL, as_root=True) def start_db_with_conf_changes(self, config_contents): LOG.debug("Starting database with configuration changes.") if self.status.is_running: raise RuntimeError(_("The service is still running.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration() self.start_db(True) def reset_configuration(self, configuration): LOG.debug("Resetting configuration.") config_contents = configuration['config_contents'] self.configuration_manager.save_configuration(config_contents) def _get_cqlsh_conf_path(self): return os.path.expanduser(self.cqlsh_conf_path) def get_data_center(self): config = operating_system.read_file(self.cassandra_topology, codec=self._TOPOLOGY_CODEC) return config['dc'] def get_rack(self): config = operating_system.read_file(self.cassandra_topology, codec=self._TOPOLOGY_CODEC) return config['rack'] def set_seeds(self, seeds): LOG.debug("Setting seed nodes: %s" % seeds) updates = { 'seed_provider': {'parameters': [{'seeds': ','.join(seeds)}] } } self.configuration_manager.apply_system_override(updates) def get_seeds(self): """Return a list of seed node IPs if any. The seed IPs are stored as a comma-separated string in the seed-provider parameters: [{'class_name': '', 'parameters': [{'seeds': ','}, ...]}] """ def find_first(key, dict_list): for item in dict_list: if key in item: return item[key] return [] sp_property = self.configuration_manager.get_value('seed_provider', []) seeds_str = find_first('seeds', find_first('parameters', sp_property)) return seeds_str.split(',') if seeds_str else [] def set_auto_bootstrap(self, enabled): """Auto-bootstrap makes new (non-seed) nodes automatically migrate the right data to themselves. The feature has to be turned OFF when initializing a fresh cluster without data. It must be turned back ON once the cluster is initialized. """ LOG.debug("Setting auto-bootstrapping: %s" % enabled) updates = {'auto_bootstrap': enabled} self.configuration_manager.apply_system_override(updates) def node_cleanup_begin(self): """Suspend periodic status updates and mark the instance busy throughout the operation. """ self.status.begin_restart() self.status.set_status(rd_instance.ServiceStatuses.BLOCKED) def node_cleanup(self): """Cassandra does not automatically remove data from nodes that lose part of their partition range to a newly added node. Cleans up keyspaces and partition keys no longer belonging to the node. Do not treat cleanup failures as fatal. Resume the heartbeat after finishing and let it signal the true state of the instance to the caller. """ LOG.debug("Running node cleanup.") # nodetool -h -p -u -pw cleanup try: self._run_nodetool_command('cleanup') self.status.set_status(rd_instance.ServiceStatuses.RUNNING) except Exception: LOG.exception(_("The node failed to complete its cleanup.")) finally: self.status.end_restart() def node_decommission(self): """Causes a live node to decommission itself, streaming its data to the next node on the ring. Shutdown the database after successfully finishing the operation, or leave the node in a failed state otherwise. Suspend periodic status updates, so that the caller can poll for the database shutdown. """ LOG.debug("Decommissioning the node.") # nodetool -h -p -u -pw decommission self.status.begin_restart() try: self._run_nodetool_command('decommission') except Exception: LOG.exception(_("The node failed to decommission itself.")) self.status.set_status(rd_instance.ServiceStatuses.FAILED) return try: self.stop_db(update_db=True, do_not_start_on_reboot=True) finally: self.status.end_restart() def flush_tables(self, keyspace, *tables): """Flushes one or more tables from the memtable. """ LOG.debug("Flushing tables.") # nodetool -h -p -u -pw flush -- # (
... ) self._run_nodetool_command('flush', keyspace, *tables) def _run_nodetool_command(self, cmd, *args, **kwargs): """Execute a nodetool command on this node. """ return utils.execute('nodetool', '-h', 'localhost', cmd, *args, **kwargs) def enable_root(self, root_password=None): """Cassandra's 'root' user is called 'cassandra'. Create a new superuser if it does not exist and grant it full superuser-level access to all keyspaces. """ cassandra = models.CassandraRootUser(password=root_password) admin = CassandraAdmin(self.get_current_superuser()) if self.is_root_enabled(): admin.alter_user_password(cassandra) else: admin._create_superuser(cassandra) return cassandra.serialize() def is_root_enabled(self): """The Trove administrative user ('os_admin') should normally be the only superuser in the system. """ found = CassandraAdmin(self.get_current_superuser()).list_superusers() return len([user for user in found if user.name != self._ADMIN_USER]) > 0 class CassandraAppStatus(service.BaseDbStatus): def __init__(self, superuser): """ :param superuser: User account the Status uses for connecting to the database. :type superuser: CassandraUser """ super(CassandraAppStatus, self).__init__() self.__user = superuser def _get_actual_db_status(self): try: with CassandraLocalhostConnection(self.__user): return rd_instance.ServiceStatuses.RUNNING except NoHostAvailable: return rd_instance.ServiceStatuses.SHUTDOWN except Exception: LOG.exception(_("Error getting Cassandra status.")) return rd_instance.ServiceStatuses.SHUTDOWN def cleanup_stalled_db_services(self): utils.execute_with_timeout(CassandraApp.CASSANDRA_KILL_CMD, shell=True) class CassandraAdmin(object): """Handles administrative tasks on the Cassandra database. In Cassandra only SUPERUSERS can create other users and grant permissions to database resources. Trove uses the 'cassandra' superuser to perform its administrative tasks. The users it creates are all 'normal' (NOSUPERUSER) accounts. The permissions it can grant are also limited to non-superuser operations. This is to prevent anybody from creating a new superuser via the Trove API. """ # Non-superuser grant modifiers. __NO_SUPERUSER_MODIFIERS = ('ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT') _KS_NAME_REGEX = re.compile('^$') def __init__(self, user): self.__admin_user = user def create_user(self, context, users): """ Create new non-superuser accounts. New users are by default granted full access to all database resources. """ with CassandraLocalhostConnection(self.__admin_user) as client: for item in users: self._create_user_and_grant(client, self._deserialize_user(item)) def _create_user_and_grant(self, client, user): """ Create new non-superuser account and grant it full access to its databases. """ self._create_user(client, user) for db in user.databases: self._grant_full_access_on_keyspace( client, self._deserialize_keyspace(db), user) def _create_user(self, client, user): # Create only NOSUPERUSER accounts here. LOG.debug("Creating a new user '%s'." % user.name) client.execute("CREATE USER '{}' WITH PASSWORD %s NOSUPERUSER;", (user.name,), (user.password,)) def _create_superuser(self, user): """Create a new superuser account and grant it full superuser-level access to all keyspaces. """ LOG.debug("Creating a new superuser '%s'." % user.name) with CassandraLocalhostConnection(self.__admin_user) as client: client.execute("CREATE USER '{}' WITH PASSWORD %s SUPERUSER;", (user.name,), (user.password,)) client.execute("GRANT ALL PERMISSIONS ON ALL KEYSPACES TO '{}';", (user.name,)) def delete_user(self, context, user): self.drop_user(self._deserialize_user(user)) def drop_user(self, user): with CassandraLocalhostConnection(self.__admin_user) as client: self._drop_user(client, user) def _drop_user(self, client, user): LOG.debug("Deleting user '%s'." % user.name) client.execute("DROP USER '{}';", (user.name, )) def get_user(self, context, username, hostname): with CassandraLocalhostConnection(self.__admin_user) as client: user = self._find_user(client, username) return user.serialize() if user is not None else None def _find_user(self, client, username): """ Lookup a user with a given username. Omit user names on the ignore list. Return a new Cassandra user instance or None if no match is found. """ return next((user for user in self._get_listed_users(client) if user.name == username), None) def list_users(self, context, limit=None, marker=None, include_marker=False): """ List all non-superuser accounts. Omit names on the ignored list. Return an empty set if None. """ with CassandraLocalhostConnection(self.__admin_user) as client: users = [user.serialize() for user in self._get_listed_users(client)] return pagination.paginate_list(users, limit, marker, include_marker) def _get_listed_users(self, client): """ Return a set of unique user instances. Omit user names on the ignore list. """ return self._get_users( client, lambda user: user.name not in self.ignore_users) def _get_users(self, client, matcher=None): """ :param matcher Filter expression. :type matcher callable """ acl = self._get_acl(client) return {self._build_user(user.name, acl) for user in client.execute("LIST USERS;") if not matcher or matcher(user)} def _load_user(self, client, username, check_reserved=True): if check_reserved: self._check_reserved_user_name(username) acl = self._get_acl(client, username=username) return self._build_user(username, acl) def _build_user(self, username, acl): user = models.CassandraUser(username) for ks, permissions in acl.get(username, {}).items(): if permissions: user.databases.append(models.CassandraSchema(ks).serialize()) return user def _get_acl(self, client, username=None): """Return the ACL for a database user. Return ACLs for all users if no particular username is specified. The ACL has the following format: {username #1: {keyspace #1: {access mod(s)...}, keyspace #2: {...}}, username #2: {keyspace #1: {...}, keyspace #3: {...}} } """ def build_list_query(username): query_tokens = ["LIST ALL PERMISSIONS"] if username: query_tokens.extend(["OF", "'%s'" % username]) query_tokens.append("NORECURSIVE;") return ' '.join(query_tokens) def parse_keyspace_name(resource): """Parse a keyspace name from a resource string. The resource string has the following form: where 'object' is one of the database objects (keyspace, table...). Return the name as a singleton set. Return an empty set if no match is found. """ match = self._KS_NAME_REGEX.match(resource) if match: return {match.group(1)} return {} def update_acl(username, keyspace, permission, acl): permissions = acl.get(username, {}).get(keyspace) if permissions is None: guestagent_utils.update_dict({user: {keyspace: {permission}}}, acl) else: permissions.add(permission) all_keyspace_names = None acl = dict() for item in client.execute(build_list_query(username)): user = item.username resource = item.resource permission = item.permission if user and resource and permission: if resource == '': # Cache the full keyspace list to improve performance and # ensure consistent results for all users. if all_keyspace_names is None: all_keyspace_names = { item.name for item in self._get_available_keyspaces(client) } keyspaces = all_keyspace_names else: keyspaces = parse_keyspace_name(resource) for keyspace in keyspaces: update_acl(user, keyspace, permission, acl) return acl def list_superusers(self): """List all system users existing in the database.""" with CassandraLocalhostConnection(self.__admin_user) as client: return self._get_users(client, lambda user: user.super) def grant_access(self, context, username, hostname, databases): """ Grant full access on keyspaces to a given username. """ user = models.CassandraUser(username) with CassandraLocalhostConnection(self.__admin_user) as client: for db in databases: self._grant_full_access_on_keyspace( client, models.CassandraSchema(db), user) def revoke_access(self, context, username, hostname, database): """ Revoke all permissions on any database resources from a given username. """ user = models.CassandraUser(username) with CassandraLocalhostConnection(self.__admin_user) as client: self._revoke_all_access_on_keyspace( client, models.CassandraSchema(database), user) def _grant_full_access_on_keyspace(self, client, keyspace, user, check_reserved=True): """ Grant all non-superuser permissions on a keyspace to a given user. """ if check_reserved: self._check_reserved_user_name(user.name) self._check_reserved_keyspace_name(keyspace.name) for access in self.__NO_SUPERUSER_MODIFIERS: self._grant_permission_on_keyspace(client, access, keyspace, user) def _grant_permission_on_keyspace(self, client, modifier, keyspace, user): """ Grant a non-superuser permission on a keyspace to a given user. Raise an exception if the caller attempts to grant a superuser access. """ LOG.debug("Granting '%s' access on '%s' to user '%s'." % (modifier, keyspace.name, user.name)) if modifier in self.__NO_SUPERUSER_MODIFIERS: client.execute("GRANT {} ON KEYSPACE \"{}\" TO '{}';", (modifier, keyspace.name, user.name)) else: raise exception.UnprocessableEntity( "Invalid permission modifier (%s). Allowed values are: '%s'" % (modifier, ', '.join(self.__NO_SUPERUSER_MODIFIERS))) def _revoke_all_access_on_keyspace(self, client, keyspace, user, check_reserved=True): if check_reserved: self._check_reserved_user_name(user.name) self._check_reserved_keyspace_name(keyspace.name) LOG.debug("Revoking all permissions on '%s' from user '%s'." % (keyspace.name, user.name)) client.execute("REVOKE ALL PERMISSIONS ON KEYSPACE \"{}\" FROM '{}';", (keyspace.name, user.name)) def update_attributes(self, context, username, hostname, user_attrs): with CassandraLocalhostConnection(self.__admin_user) as client: user = self._load_user(client, username) new_name = user_attrs.get('name') new_password = user_attrs.get('password') self._update_user(client, user, new_name, new_password) def _update_user(self, client, user, new_username, new_password): """ Update a user of a given username. Updatable attributes include username and password. If a new username and password are given a new user with those attributes is created and all permissions from the original user get transfered to it. The original user is then dropped therefore revoking its permissions. If only new password is specified the existing user gets altered with that password. """ if new_username is not None and user.name != new_username: if new_password is not None: self._rename_user(client, user, new_username, new_password) else: raise exception.UnprocessableEntity( _("Updating username requires specifying a password " "as well.")) elif new_password is not None and user.password != new_password: user.password = new_password self._alter_user_password(client, user) def _rename_user(self, client, user, new_username, new_password): """ Rename a given user also updating its password. Transfer the current permissions to the new username. Drop the old username therefore revoking its permissions. """ LOG.debug("Renaming user '%s' to '%s'" % (user.name, new_username)) new_user = models.CassandraUser(new_username, new_password) new_user.databases.extend(user.databases) self._create_user_and_grant(client, new_user) self._drop_user(client, user) def alter_user_password(self, user): with CassandraLocalhostConnection(self.__admin_user) as client: self._alter_user_password(client, user) def change_passwords(self, context, users): with CassandraLocalhostConnection(self.__admin_user) as client: for user in users: self._alter_user_password(client, self._deserialize_user(user)) def _alter_user_password(self, client, user): LOG.debug("Changing password of user '%s'." % user.name) client.execute("ALTER USER '{}' " "WITH PASSWORD %s;", (user.name,), (user.password,)) def create_database(self, context, databases): with CassandraLocalhostConnection(self.__admin_user) as client: for item in databases: self._create_single_node_keyspace( client, self._deserialize_keyspace(item)) def _create_single_node_keyspace(self, client, keyspace): """ Create a single-replica keyspace. Cassandra stores replicas on multiple nodes to ensure reliability and fault tolerance. All replicas are equally important; there is no primary or master. A replication strategy determines the nodes where replicas are placed. SimpleStrategy is for a single data center only. The total number of replicas across the cluster is referred to as the replication factor. Replication Strategy: 'SimpleStrategy' is not optimized for multiple data centers. 'replication_factor' The number of replicas of data on multiple nodes. Required for SimpleStrategy; otherwise, not used. Keyspace names are case-insensitive by default. To make a name case-sensitive, enclose it in double quotation marks. """ client.execute("CREATE KEYSPACE \"{}\" WITH REPLICATION = " "{{ 'class' : 'SimpleStrategy', " "'replication_factor' : 1 }};", (keyspace.name,)) def delete_database(self, context, database): with CassandraLocalhostConnection(self.__admin_user) as client: self._drop_keyspace(client, self._deserialize_keyspace(database)) def _drop_keyspace(self, client, keyspace): LOG.debug("Dropping keyspace '%s'." % keyspace.name) client.execute("DROP KEYSPACE \"{}\";", (keyspace.name,)) def list_databases(self, context, limit=None, marker=None, include_marker=False): with CassandraLocalhostConnection(self.__admin_user) as client: databases = [keyspace.serialize() for keyspace in self._get_available_keyspaces(client)] return pagination.paginate_list(databases, limit, marker, include_marker) def _get_available_keyspaces(self, client): """ Return a set of unique keyspace instances. Omit keyspace names on the ignore list. """ return {models.CassandraSchema(db.keyspace_name) for db in client.execute("SELECT * FROM " "system.schema_keyspaces;") if db.keyspace_name not in self.ignore_dbs} def list_access(self, context, username, hostname): with CassandraLocalhostConnection(self.__admin_user) as client: user = self._find_user(client, username) if user: return user.databases raise exception.UserNotFound(username) def _deserialize_keyspace(self, keyspace_dict, check_reserved=True): if keyspace_dict: db = models.CassandraSchema.deserialize_schema(keyspace_dict) if check_reserved: self._check_reserved_keyspace_name(db.name) return db return None def _check_reserved_keyspace_name(self, name): if name in self.ignore_dbs: raise ValueError(_("This keyspace-name is reserved: %s") % name) def _deserialize_user(self, user_dict, check_reserved=True): if user_dict: user = models.CassandraUser.deserialize_user(user_dict) if check_reserved: self._check_reserved_user_name(user.name) return user return None def _check_reserved_user_name(self, name): if name in self.ignore_users: raise ValueError(_("This user-name is reserved: %s") % name) @property def ignore_users(self): return cfg.get_ignored_users(manager=MANAGER) @property def ignore_dbs(self): return cfg.get_ignored_dbs(manager=MANAGER) class CassandraConnection(object): """A wrapper to manage a Cassandra connection.""" # Cassandra 2.1 only supports protocol versions 3 and lower. NATIVE_PROTOCOL_VERSION = 3 def __init__(self, contact_points, user): self.__user = user # A Cluster is initialized with a set of initial contact points. # After the driver connects to one of the nodes it will automatically # discover the rest. # Will connect to '127.0.0.1' if None contact points are given. self._cluster = Cluster( contact_points=contact_points, auth_provider=PlainTextAuthProvider(user.name, user.password), protocol_version=self.NATIVE_PROTOCOL_VERSION) self.__session = None def __enter__(self): self.__connect() return self def __exit__(self, exc_type, exc_value, traceback): self.__disconnect() def execute(self, query, identifiers=None, data_values=None, timeout=None): """ Execute a query with a given sequence or dict of data values to bind. If a sequence is used, '%s' should be used the placeholder for each argument. If a dict is used, '%(name)s' style placeholders must be used. Only data values should be supplied this way. Other items, such as keyspaces, table names, and column names should be set ahead of time. Use the '{}' style placeholders and 'identifiers' parameter for those. Raise an exception if the operation exceeds the given timeout (sec). There is no timeout if set to None. Return a set of rows or an empty list if None. """ if self.__is_active(): try: rows = self.__session.execute(self.__bind(query, identifiers), data_values, timeout) return rows or [] except OperationTimedOut: LOG.error(_("Query execution timed out.")) raise LOG.debug("Cannot perform this operation on a closed connection.") raise exception.UnprocessableEntity() def __bind(self, query, identifiers): if identifiers: return query.format(*identifiers) return query def __connect(self): if not self._cluster.is_shutdown: LOG.debug("Connecting to a Cassandra cluster as '%s'." % self.__user.name) if not self.__is_active(): self.__session = self._cluster.connect() else: LOG.debug("Connection already open.") LOG.debug("Connected to cluster: '%s'" % self._cluster.metadata.cluster_name) for host in self._cluster.metadata.all_hosts(): LOG.debug("Connected to node: '%s' in rack '%s' at datacenter " "'%s'" % (host.address, host.rack, host.datacenter)) else: LOG.debug("Cannot perform this operation on a terminated cluster.") raise exception.UnprocessableEntity() def __disconnect(self): if self.__is_active(): try: LOG.debug("Disconnecting from cluster: '%s'" % self._cluster.metadata.cluster_name) self._cluster.shutdown() self.__session.shutdown() except Exception: LOG.debug("Failed to disconnect from a Cassandra cluster.") def __is_active(self): return self.__session and not self.__session.is_shutdown class CassandraLocalhostConnection(CassandraConnection): """ A connection to the localhost Cassandra server. """ def __init__(self, user): super(CassandraLocalhostConnection, self).__init__(None, user) trove-5.0.0/trove/guestagent/datastore/experimental/pxc/0000775000567000056710000000000012701410521024567 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/pxc/__init__.py0000664000567000056710000000000012701410316026670 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/pxc/manager.py0000664000567000056710000000211612701410316026555 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.guestagent.datastore.experimental.pxc import service as pxc_service from trove.guestagent.datastore.galera_common import manager from trove.guestagent.datastore.mysql_common import service as mysql_service class Manager(manager.GaleraManager): def __init__(self): super(Manager, self).__init__(pxc_service.PXCApp, mysql_service.BaseMySqlAppStatus, pxc_service.PXCAdmin) trove-5.0.0/trove/guestagent/datastore/experimental/pxc/service.py0000664000567000056710000000737712701410316026621 0ustar jenkinsjenkins00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging import sqlalchemy from sqlalchemy.sql.expression import text from trove.common import cfg from trove.common.i18n import _ from trove.common import utils as utils from trove.guestagent.datastore.galera_common import service as galera_service from trove.guestagent.datastore.mysql_common import service as mysql_service LOG = logging.getLogger(__name__) CONF = cfg.CONF class PXCApp(galera_service.GaleraApp): def __init__(self, status): super(PXCApp, self).__init__( status, mysql_service.BaseLocalSqlClient, mysql_service.BaseKeepAliveConnection) @property def mysql_service(self): result = super(PXCApp, self).mysql_service if result['type'] == 'sysvinit': result['cmd_bootstrap_galera_cluster'] = ( "sudo service %s bootstrap-pxc" % result['service']) elif result['type'] == 'systemd': result['cmd_bootstrap_galera_cluster'] = ( "sudo systemctl start %s@bootstrap.service" % result['service']) return result @property def cluster_configuration(self): return self.configuration_manager.get_value('mysqld') def secure(self, config_contents): LOG.info(_("Generating admin password.")) admin_password = utils.generate_random_password() mysql_service.clear_expired_password() engine = sqlalchemy.create_engine("mysql://root:@localhost:3306", echo=True) with self.local_sql_client(engine) as client: self._remove_anonymous_user(client) self._create_admin_user(client, admin_password) self.stop_db() self._reset_configuration(config_contents, admin_password) self.start_mysql() # TODO(cp16net) figure out reason for PXC not updating the password try: with self.local_sql_client(engine) as client: query = text("select Host, User from mysql.user;") client.execute(query) except Exception: LOG.debug('failed to query mysql') # creating the admin user after the config files are written because # percona pxc was not commiting the grant for the admin user after # removing the annon users. self._wait_for_mysql_to_be_really_alive( CONF.timeout_wait_for_service) with self.local_sql_client(engine) as client: self._create_admin_user(client, admin_password) self.stop_db() self._reset_configuration(config_contents, admin_password) self.start_mysql() self._wait_for_mysql_to_be_really_alive( CONF.timeout_wait_for_service) LOG.debug("MySQL secure complete.") class PXCRootAccess(mysql_service.BaseMySqlRootAccess): def __init__(self): super(PXCRootAccess, self).__init__( mysql_service.BaseLocalSqlClient, PXCApp(mysql_service.BaseMySqlAppStatus.get())) class PXCAdmin(mysql_service.BaseMySqlAdmin): def __init__(self): super(PXCAdmin, self).__init__( mysql_service.BaseLocalSqlClient, PXCRootAccess(), PXCApp) trove-5.0.0/trove/guestagent/datastore/experimental/__init__.py0000664000567000056710000000000012701410316026076 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/redis/0000775000567000056710000000000012701410521025103 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/redis/system.py0000664000567000056710000000240412701410316027003 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Determines operating system version and OS dependent commands. """ from trove.guestagent.common.operating_system import get_os REDIS_OWNER = 'redis' REDIS_CONFIG = '/etc/redis/redis.conf' REDIS_PID_FILE = '/var/run/redis/redis-server.pid' REDIS_LOG_FILE = '/var/log/redis/server.log' REDIS_CONF_DIR = '/etc/redis' REDIS_DATA_DIR = '/var/lib/redis' REDIS_PORT = '6379' REDIS_INIT = '/etc/init/redis-server.conf' REDIS_CLI = '/usr/bin/redis-cli' REDIS_BIN = '/usr/bin/redis-server' REDIS_PACKAGE = 'redis-server' SERVICE_CANDIDATES = ['redis-server'] OS = get_os() if OS is 'redhat': REDIS_CONFIG = '/etc/redis.conf' REDIS_PACKAGE = 'redis' trove-5.0.0/trove/guestagent/datastore/experimental/redis/__init__.py0000664000567000056710000000000012701410316027204 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/redis/manager.py0000664000567000056710000002463412701410316027102 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.notification import EndNotification from trove.common import utils from trove.guestagent import backup from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.redis import service from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): """ This is the Redis manager class. It is dynamically loaded based off of the service_type of the trove instance """ def __init__(self): super(Manager, self).__init__('redis') self._app = service.RedisApp() @property def status(self): return self._app.status @property def configuration_manager(self): return self._app.configuration_manager def _perform_restore(self, backup_info, context, restore_location, app): """Perform a restore on this instance.""" LOG.info(_("Restoring database from backup %s.") % backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception(_("Error performing restore from backup %s.") % backup_info['id']) app.status.set_status(rd_instance.ServiceStatuses.FAILED) raise LOG.info(_("Restored database successfully.")) def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() device.mount(mount_point) operating_system.chown(mount_point, 'redis', 'redis', as_root=True) LOG.debug('Mounted the volume.') self._app.install_if_needed(packages) LOG.info(_('Writing redis configuration.')) if cluster_config: config_contents = (config_contents + "\n" + "cluster-enabled yes\n" + "cluster-config-file cluster.conf\n") self._app.configuration_manager.save_configuration(config_contents) self._app.apply_initial_guestagent_configuration() if backup_info: persistence_dir = self._app.get_working_dir() self._perform_restore(backup_info, context, persistence_dir, self._app) else: # If we're not restoring, we have to force a restart of the # server manually so that the configuration stuff takes effect self._app.restart() if snapshot: self.attach_replica(context, snapshot, snapshot['config']) def restart(self, context): """ Restart this redis instance. This method is called when the guest agent gets a restart message from the taskmanager. """ LOG.debug("Restart called.") self._app.restart() def start_db_with_conf_changes(self, context, config_contents): """ Start this redis instance with new conf changes. """ LOG.debug("Start DB with conf changes called.") self._app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this redis instance. This method is called when the guest agent gets a stop message from the taskmanager. """ LOG.debug("Stop DB called.") self._app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def create_backup(self, context, backup_info): """Create a backup of the database.""" LOG.debug("Creating backup.") with EndNotification(context): backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self._app.remove_overrides() else: self._app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): LOG.debug("Applying overrides.") self._app.apply_overrides(self._app.admin, overrides) def backup_required_for_replication(self, context): return self.replication.backup_required_for_replication() def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") self.replication.enable_as_master(self._app, replica_source_config) snapshot_id, log_position = self.replication.snapshot_for_replication( context, self._app, None, snapshot_info) volume_stats = self.get_filesystem_stats(context, None) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': self.replication.get_master_ref(self._app, snapshot_info), 'log_position': log_position } return replication_snapshot def enable_as_master(self, context, replica_source_config): LOG.debug("Calling enable_as_master.") self.replication.enable_as_master(self._app, replica_source_config) def detach_replica(self, context, for_failover=False): LOG.debug("Detaching replica.") replica_info = self.replication.detach_slave(self._app, for_failover) return replica_info def get_replica_context(self, context): LOG.debug("Getting replica context.") replica_info = self.replication.get_replica_context(self._app) return replica_info def _validate_slave_for_replication(self, context, replica_info): if replica_info['replication_strategy'] != self.replication_strategy: raise exception.IncompatibleReplicationStrategy( replica_info.update({ 'guest_strategy': self.replication_strategy })) def attach_replica(self, context, replica_info, slave_config): LOG.debug("Attaching replica.") try: if 'replication_strategy' in replica_info: self._validate_slave_for_replication(context, replica_info) self.replication.enable_as_slave(self._app, replica_info, slave_config) except Exception: LOG.exception("Error enabling replication.") raise def make_read_only(self, context, read_only): LOG.debug("Executing make_read_only(%s)" % read_only) self._app.make_read_only(read_only) def _get_repl_info(self): return self._app.admin.get_info('replication') def _get_master_host(self): slave_info = self._get_repl_info() return slave_info and slave_info['master_host'] or None def _get_repl_offset(self): repl_info = self._get_repl_info() LOG.debug("Got repl info: %s" % repl_info) offset_key = '%s_repl_offset' % repl_info['role'] offset = repl_info[offset_key] LOG.debug("Found offset %s for key %s." % (offset, offset_key)) return int(offset) def get_last_txn(self, context): master_host = self._get_master_host() repl_offset = self._get_repl_offset() return master_host, repl_offset def get_latest_txn_id(self, context): LOG.info(_("Retrieving latest repl offset.")) return self._get_repl_offset() def wait_for_txn(self, context, txn): LOG.info(_("Waiting on repl offset '%s'.") % txn) def _wait_for_txn(): current_offset = self._get_repl_offset() LOG.debug("Current offset: %s." % current_offset) return current_offset >= txn try: utils.poll_until(_wait_for_txn, time_out=120) except exception.PollTimeOut: raise RuntimeError(_("Timeout occurred waiting for Redis repl " "offset to change to '%s'.") % txn) def cleanup_source_on_replica_detach(self, context, replica_info): LOG.debug("Cleaning up the source on the detach of a replica.") self.replication.cleanup_source_on_replica_detach(self._app, replica_info) def demote_replication_master(self, context): LOG.debug("Demoting replica source.") self.replication.demote_master(self._app) def cluster_meet(self, context, ip, port): LOG.debug("Executing cluster_meet to join node to cluster.") self._app.cluster_meet(ip, port) def get_node_ip(self, context): LOG.debug("Retrieving cluster node ip address.") return self._app.get_node_ip() def get_node_id_for_removal(self, context): LOG.debug("Validating removal of node from cluster.") return self._app.get_node_id_for_removal() def remove_nodes(self, context, node_ids): LOG.debug("Removing nodes from cluster.") self._app.remove_nodes(node_ids) def cluster_addslots(self, context, first_slot, last_slot): LOG.debug("Executing cluster_addslots to assign hash slots %s-%s.", first_slot, last_slot) self._app.cluster_addslots(first_slot, last_slot) trove-5.0.0/trove/guestagent/datastore/experimental/redis/service.py0000664000567000056710000004602212701410316027123 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import redis from redis.exceptions import BusyLoadingError, ConnectionError from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.stream_codecs import PropertiesCodec, StringConverter from trove.common import utils as utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.redis import system from trove.guestagent.datastore import service from trove.guestagent import pkg LOG = logging.getLogger(__name__) TIME_OUT = 1200 CONF = cfg.CONF CLUSTER_CFG = 'clustering' packager = pkg.Package() class RedisAppStatus(service.BaseDbStatus): """ Handles all of the status updating for the redis guest agent. """ def __init__(self, client): super(RedisAppStatus, self).__init__() self.__client = client def set_client(self, client): self.__client = client def _get_actual_db_status(self): try: if self.__client.ping(): return rd_instance.ServiceStatuses.RUNNING except ConnectionError: return rd_instance.ServiceStatuses.SHUTDOWN except BusyLoadingError: return rd_instance.ServiceStatuses.BLOCKED except Exception: LOG.exception(_("Error getting Redis status.")) return rd_instance.ServiceStatuses.CRASHED def cleanup_stalled_db_services(self): utils.execute_with_timeout('pkill', '-9', 'redis-server', run_as_root=True, root_helper='sudo') class RedisApp(object): """ Handles installation and configuration of redis on a trove instance. """ def __init__(self, state_change_wait_time=None): """ Sets default status and state_change_wait_time """ if state_change_wait_time: self.state_change_wait_time = state_change_wait_time else: self.state_change_wait_time = CONF.state_change_wait_time revision_dir = guestagent_utils.build_file_path( os.path.dirname(system.REDIS_CONFIG), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) config_value_mappings = {'yes': True, 'no': False, "''": None} self._value_converter = StringConverter(config_value_mappings) self.configuration_manager = ConfigurationManager( system.REDIS_CONFIG, system.REDIS_OWNER, system.REDIS_OWNER, PropertiesCodec( unpack_singletons=False, string_mappings=config_value_mappings ), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) self.admin = self._build_admin_client() self.status = RedisAppStatus(self.admin) def _build_admin_client(self): password = self.get_configuration_property('requirepass') socket = self.get_configuration_property('unixsocket') return RedisAdmin(password=password, unix_socket_path=socket) def install_if_needed(self, packages): """ Install redis if needed do nothing if it is already installed. """ LOG.info(_('Preparing Guest as Redis Server.')) if not packager.pkg_is_installed(packages): LOG.info(_('Installing Redis.')) self._install_redis(packages) LOG.info(_('Redis installed completely.')) def _install_redis(self, packages): """ Install the redis server. """ LOG.debug('Installing redis server.') msg = "Creating %s." % system.REDIS_CONF_DIR LOG.debug(msg) operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True) pkg_opts = {} packager.pkg_install(packages, pkg_opts, TIME_OUT) self.start_db() LOG.debug('Finished installing redis server.') def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time) def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def apply_overrides(self, client, overrides): """Use the 'CONFIG SET' command to apply configuration at runtime. Commands that appear multiple times have values separated by a white space. For instance, the following two 'save' directives from the configuration file... save 900 1 save 300 10 ... would be applied in a single command as: CONFIG SET save "900 1 300 10" Note that the 'CONFIG' command has been renamed to prevent users from using it to bypass configuration groups. """ for prop_name, prop_args in overrides.items(): args_string = self._join_lists( self._value_converter.to_strings(prop_args), ' ') client.config_set(prop_name, args_string) def _join_lists(self, items, sep): """Join list items (including items from sub-lists) into a string. Non-list inputs are returned unchanged. _join_lists('1234', ' ') = "1234" _join_lists(['1','2','3','4'], ' ') = "1 2 3 4" _join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4" """ if isinstance(items, list): return sep.join([sep.join(e) if isinstance(e, list) else e for e in items]) return items def remove_overrides(self): self.configuration_manager.remove_user_override() def make_read_only(self, read_only): # Redis has no mechanism to make an instance read-only at present pass def start_db_with_conf_changes(self, config_contents): LOG.info(_('Starting redis with conf changes.')) if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info(_("Initiating config.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration() self.start_db(True) def start_db(self, update_db=False): self.status.start_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def apply_initial_guestagent_configuration(self): """Update guestagent-controlled configuration properties. """ # Hide the 'CONFIG' command from end users by mangling its name. self.admin.set_config_command_name(self._mangle_config_command_name()) self.configuration_manager.apply_system_override( {'daemonize': 'yes', 'pidfile': system.REDIS_PID_FILE, 'logfile': system.REDIS_LOG_FILE, 'dir': system.REDIS_DATA_DIR}) def get_config_command_name(self): """Get current name of the 'CONFIG' command. """ renamed_cmds = self.configuration_manager.get_value('rename-command') for name_pair in renamed_cmds: if name_pair[0] == 'CONFIG': return name_pair[1] return None def _mangle_config_command_name(self): """Hide the 'CONFIG' command from the clients by renaming it to a random string known only to the guestagent. Return the mangled name. """ mangled = utils.generate_random_password() self._rename_command('CONFIG', mangled) return mangled def _rename_command(self, old_name, new_name): """It is possible to completely disable a command by renaming it to an empty string. """ self.configuration_manager.apply_system_override( {'rename-command': [old_name, new_name]}) def get_logfile(self): """Specify the log file name. Also the empty string can be used to force Redis to log on the standard output. Note that if you use standard output for logging but daemonize, logs will be sent to /dev/null """ return self.get_configuration_property('logfile') def get_db_filename(self): """The filename where to dump the DB. """ return self.get_configuration_property('dbfilename') def get_working_dir(self): """The DB will be written inside this directory, with the filename specified the 'dbfilename' configuration directive. The Append Only File will also be created inside this directory. """ return self.get_configuration_property('dir') def get_persistence_filepath(self): """Returns the full path to the persistence file.""" return guestagent_utils.build_file_path( self.get_working_dir(), self.get_db_filename()) def get_port(self): """Port for this instance or default if not set.""" return self.get_configuration_property('port', system.REDIS_PORT) def get_auth_password(self): """Client authentication password for this instance or None if not set. """ return self.get_configuration_property('requirepass') def is_appendonly_enabled(self): """True if the Append Only File (AOF) persistence mode is enabled. """ return self.get_configuration_property('appendonly', False) def get_append_file_name(self): """The name of the append only file (AOF). """ return self.get_configuration_property('appendfilename') def is_cluster_enabled(self): """Only nodes that are started as cluster nodes can be part of a Redis Cluster. """ return self.get_configuration_property('cluster-enabled', False) def enable_cluster(self): """In order to start a Redis instance as a cluster node enable the cluster support """ self.configuration_manager.apply_system_override( {'cluster-enabled': 'yes'}, CLUSTER_CFG) def get_cluster_config_filename(self): """Cluster node configuration file. """ return self.get_configuration_property('cluster-config-file') def set_cluster_config_filename(self, name): """Make sure that instances running in the same system do not have overlapping cluster configuration file names. """ self.configuration_manager.apply_system_override( {'cluster-config-file': name}, CLUSTER_CFG) def get_cluster_node_timeout(self): """Cluster node timeout is the amount of milliseconds a node must be unreachable for it to be considered in failure state. """ return self.get_configuration_property('cluster-node-timeout') def get_configuration_property(self, name, default=None): """Return the value of a Redis configuration property. Returns a single value for single-argument properties or a list otherwise. """ return utils.unpack_singleton( self.configuration_manager.get_value(name, default)) def cluster_meet(self, ip, port): try: utils.execute_with_timeout('redis-cli', 'cluster', 'meet', ip, port) except exception.ProcessExecutionError: LOG.exception(_('Error joining node to cluster at %s.'), ip) raise def cluster_addslots(self, first_slot, last_slot): try: slots = map(str, range(first_slot, last_slot + 1)) group_size = 200 while slots: cmd = ([system.REDIS_CLI, 'cluster', 'addslots'] + slots[0:group_size]) out, err = utils.execute_with_timeout(*cmd, run_as_root=True, root_helper='sudo') if 'OK' not in out: raise RuntimeError(_('Error executing addslots: %s') % out) del slots[0:group_size] except exception.ProcessExecutionError: LOG.exception(_('Error adding slots %(first_slot)s-%(last_slot)s' ' to cluster.'), {'first_slot': first_slot, 'last_slot': last_slot}) raise def _get_node_info(self): try: out, _ = utils.execute_with_timeout('redis-cli', '--csv', 'cluster', 'nodes') return [line.split(' ') for line in out.splitlines()] except exception.ProcessExecutionError: LOG.exception(_('Error getting node info.')) raise def _get_node_details(self): for node_details in self._get_node_info(): if 'myself' in node_details[2]: return node_details raise exception.TroveError(_("Unable to determine node details")) def get_node_ip(self): """Returns [ip, port] where both values are strings""" return self._get_node_details()[1].split(':') def get_node_id_for_removal(self): node_details = self._get_node_details() node_id = node_details[0] my_ip = node_details[1].split(':')[0] try: slots, _ = utils.execute_with_timeout('redis-cli', '--csv', 'cluster', 'slots') return node_id if my_ip not in slots else None except exception.ProcessExecutionError: LOG.exception(_('Error validating node to for removal.')) raise def remove_nodes(self, node_ids): try: for node_id in node_ids: utils.execute_with_timeout('redis-cli', 'cluster', 'forget', node_id) except exception.ProcessExecutionError: LOG.exception(_('Error removing node from cluster.')) raise class RedisAdmin(object): """Handles administrative tasks on the Redis database. """ DEFAULT_CONFIG_CMD = 'CONFIG' def __init__(self, password=None, unix_socket_path=None): self.__client = redis.StrictRedis( password=password, unix_socket_path=unix_socket_path) self.__config_cmd_name = self.DEFAULT_CONFIG_CMD def set_config_command_name(self, name): """Set name of the 'CONFIG' command or None for default. """ self.__config_cmd_name = name or self.DEFAULT_CONFIG_CMD def ping(self): """Ping the Redis server and return True if a response is received. """ return self.__client.ping() def get_info(self, section=None): return self.__client.info(section=section) def persist_data(self): save_cmd = 'SAVE' last_save = self.__client.lastsave() LOG.debug("Starting Redis data persist") if self.__client.bgsave(): save_cmd = 'BGSAVE' def _timestamp_changed(): return last_save != self.__client.lastsave() try: utils.poll_until(_timestamp_changed, sleep_time=2, time_out=TIME_OUT) except exception.PollTimeOut: raise RuntimeError(_("Timeout occurred waiting for Redis " "persist (%s) to complete.") % save_cmd) # If the background save fails for any reason, try doing a foreground # one. This blocks client connections, so we don't want it to be # the default. elif not self.__client.save(): raise exception.BackupCreationError(_("Could not persist " "Redis data (%s)") % save_cmd) LOG.debug("Redis data persist (%s) completed" % save_cmd) def set_master(self, host=None, port=None): self.__client.slaveof(host, port) def config_set(self, name, value): response = self.execute( '%s %s' % (self.__config_cmd_name, 'SET'), name, value) if not self._is_ok_response(response): raise exception.UnprocessableEntity( _("Could not set configuration property '%(name)s' to " "'%(value)s'.") % {'name': name, 'value': value}) def _is_ok_response(self, response): """Return True if a given Redis response is 'OK'. """ return response and redis.client.bool_ok(response) def execute(self, cmd_name, *cmd_args, **options): """Execute a command and return a parsed response. """ try: return self.__client.execute_command(cmd_name, *cmd_args, **options) except Exception as e: LOG.exception(e) raise exception.TroveError( _("Redis command '%(cmd_name)s %(cmd_args)s' failed.") % {'cmd_name': cmd_name, 'cmd_args': ' '.join(cmd_args)}) def wait_until(self, key, wait_value, section=None, timeout=CONF.usage_timeout): """Polls redis until the specified 'key' changes to 'wait_value'.""" LOG.debug("Waiting for Redis '%s' to be: %s." % (key, wait_value)) def _check_info(): redis_info = self.get_info(section) if key in redis_info: current_value = redis_info[key] LOG.debug("Found '%s' for field %s." % (current_value, key)) else: LOG.error(_('Output from Redis command: %s') % redis_info) raise RuntimeError(_("Field %(field)s not found " "(Section: '%(sec)s').") % ({'field': key, 'sec': section})) return current_value == wait_value try: utils.poll_until(_check_info, time_out=timeout) except exception.PollTimeOut: raise RuntimeError(_("Timeout occurred waiting for Redis field " "'%(field)s' to change to '%(val)s'.") % {'field': key, 'val': wait_value}) trove-5.0.0/trove/guestagent/datastore/experimental/vertica/0000775000567000056710000000000012701410521025432 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/vertica/system.py0000664000567000056710000001211412701410316027331 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from trove.common import utils ALTER_DB_CFG = "ALTER DATABASE %s SET %s = %s" ALTER_DB_RESET_CFG = "ALTER DATABASE %s CLEAR %s" ALTER_USER_PASSWORD = "ALTER USER %s IDENTIFIED BY '%s'" ADD_DB_TO_NODE = ("/opt/vertica/bin/adminTools -t db_add_node -a" " %s -d %s -p '%s'") REMOVE_DB_FROM_NODE = ("/opt/vertica/bin/adminTools -t db_remove_node -s" " %s -d %s -i -p '%s'") CREATE_DB = ("/opt/vertica/bin/adminTools -t create_db -s" " %s -d %s -c %s -D %s -p '%s'") CREATE_USER = "CREATE USER %s IDENTIFIED BY '%s'" ENABLE_FOR_USER = "ALTER USER %s DEFAULT ROLE %s" GRANT_TO_USER = "GRANT %s to %s" INSTALL_VERTICA = ("/opt/vertica/sbin/install_vertica -s %s" " -d %s -X -N -S default -r" " /vertica.deb -L CE -Y --no-system-checks" " --ignore-aws-instance-type") MARK_DESIGN_KSAFE = "SELECT MARK_DESIGN_KSAFE(%s)" NODE_STATUS = "SELECT node_state FROM nodes where node_state <> '%s'" STOP_DB = "/opt/vertica/bin/adminTools -t stop_db -F -d %s -p '%s'" START_DB = "/opt/vertica/bin/adminTools -t start_db -d %s -p '%s'" STATUS_ACTIVE_DB = "/opt/vertica/bin/adminTools -t show_active_db" STATUS_DB_DOWN = "/opt/vertica/bin/adminTools -t db_status -s DOWN" SET_RESTART_POLICY = ("/opt/vertica/bin/adminTools -t set_restart_policy " "-d %s -p '%s'") SEND_CONF_TO_SERVER = ("rsync -v -e 'ssh -o " "UserKnownHostsFile=/dev/null -o " "StrictHostKeyChecking=no' --perms --owner --group " "%s %s:%s") SSH_KEY_GEN = "ssh-keygen -f %s/.ssh/id_rsa -t rsa -N ''" UPDATE_VERTICA = ("/opt/vertica/sbin/update_vertica %s %s " " -d %s -X -N -S default -r" " /vertica.deb -L CE -Y --no-system-checks" " --ignore-aws-instance-type") UPDATE_REMOVE = ("/opt/vertica/sbin/update_vertica --remove-hosts %s " " -d %s -X -N -S default -r" " /vertica.deb -L CE -Y --no-system-checks" " --ignore-aws-instance-type") UPDATE_ADD = ("/opt/vertica/sbin/update_vertica --add-hosts %s " " -d %s -X -N -S default -r" " /vertica.deb -L CE -Y --no-system-checks" " --ignore-aws-instance-type") USER_EXISTS = ("/opt/vertica/bin/vsql -w '%s' -c " "\"select 1 from users where user_name = '%s'\" " "| grep row | awk '{print $1}' | cut -c2-") VERTICA_ADMIN = "dbadmin" VERTICA_ADMIN_GRP = "verticadba" VERTICA_AGENT_SERVICE_COMMAND = "service vertica_agent %s" VERTICA_CONF = "/etc/vertica.cnf" INSTALL_TIMEOUT = 1000 CREATE_LIBRARY = "CREATE LIBRARY %s AS '%s'" CREATE_SOURCE = "CREATE SOURCE %s AS LANGUAGE '%s' NAME '%s' LIBRARY %s" UDL_LIBS = [ { 'func_name': "curl", 'lib_name': "curllib", 'language': "C++", 'factory': "CurlSourceFactory", 'path': "/opt/vertica/sdk/examples/build/cURLLib.so" }, ] def shell_execute(command, command_executor="root"): # This method encapsulates utils.execute for 2 purpose: # 1. Helps in safe testing. # 2. Helps in executing commands as other user, using their environment. # Note: This method uses su because using sudo -i -u # does not works with vertica installer # and it has problems while executing remote commands. return utils.execute("sudo", "su", "-", command_executor, "-c", "%s" % command) class VSqlError(object): def __init__(self, stderr): """Parse the stderr part of the VSql output. stderr looks like: "ERROR 3117: Division by zero" :param stderr: string from executing statement via vsql """ parse = re.match("^(ERROR|WARNING) (\d+): (.+)$", stderr) if not parse: raise ValueError("VSql stderr %(msg)s not recognized." % {'msg': stderr}) self.type = parse.group(1) self.code = int(parse.group(2)) self.msg = parse.group(3) def is_warning(self): return bool(self.type == "WARNING") def __str__(self): return "Vertica %s (%s): %s" % (self.type, self.code, self.msg) def exec_vsql_command(dbadmin_password, command): """Executes a VSQL command with the given dbadmin password.""" out, err = shell_execute("/opt/vertica/bin/vsql -w \'%s\' -c \"%s\"" % (dbadmin_password, command), VERTICA_ADMIN) if err: err = VSqlError(err) return out, err trove-5.0.0/trove/guestagent/datastore/experimental/vertica/__init__.py0000664000567000056710000000000012701410316027533 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/vertica/manager.py0000664000567000056710000001370612701410316027427 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_log import log as logging from trove.common.i18n import _ from trove.common import instance as rd_ins from trove.guestagent.datastore.experimental.vertica.service import ( VerticaAppStatus) from trove.guestagent.datastore.experimental.vertica.service import VerticaApp from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): def __init__(self): self.appStatus = VerticaAppStatus() self.app = VerticaApp(self.appStatus) super(Manager, self).__init__('vertica') @property def status(self): return self.appStatus @property def configuration_manager(self): return self.app.configuration_manager def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync any existing data device.migrate_data(mount_point) # mount the volume device.mount(mount_point) LOG.debug("Mounted the volume.") self.app.install_if_needed(packages) self.app.prepare_for_install_vertica() if cluster_config is None: self.app.install_vertica() self.app.create_db() self.app.add_udls() if config_contents: self.app.configuration_manager.save_configuration( config_contents) elif cluster_config['instance_type'] not in ["member", "master"]: raise RuntimeError(_("Bad cluster configuration: instance type " "given as %s.") % cluster_config['instance_type']) def restart(self, context): LOG.debug("Restarting the database.") self.app.restart() LOG.debug("Restarted the database.") def stop_db(self, context, do_not_start_on_reboot=False): LOG.debug("Stopping the database.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) LOG.debug("Stopped the database.") def enable_root(self, context): LOG.debug("Enabling root.") return self.app.enable_root() def enable_root_with_password(self, context, root_password=None): LOG.debug("Enabling root.") return self.app.enable_root(root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return self.app.is_root_enabled() def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def get_public_keys(self, context, user): LOG.debug("Retrieving public keys for %s." % user) return self.app.get_public_keys(user) def authorize_public_keys(self, context, user, public_keys): LOG.debug("Authorizing public keys for %s." % user) return self.app.authorize_public_keys(user, public_keys) def install_cluster(self, context, members): try: LOG.debug("Installing cluster on members: %s." % members) self.app.install_cluster(members) self.app.add_udls() LOG.debug("install_cluster call has finished.") except Exception: LOG.exception(_('Cluster installation failed.')) self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self.app.remove_overrides() else: self.app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): if overrides: LOG.debug("Applying overrides: " + str(overrides)) self.app.apply_overrides(overrides) def grow_cluster(self, context, members): try: LOG.debug("Growing cluster to members: %s." % members) self.app.grow_cluster(members) LOG.debug("grow_cluster call has finished.") except Exception: LOG.exception(_('Cluster grow failed.')) self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise def shrink_cluster(self, context, members): try: LOG.debug("Shrinking cluster members: %s." % members) self.app.shrink_cluster(members) LOG.debug("shrink_cluster call has finished.") except Exception: LOG.exception(_('Cluster shrink failed.')) self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise def mark_design_ksafe(self, context, k): try: LOG.debug("Setting vertica k-safety to %s." % k) self.app.mark_design_ksafe(k) except Exception: LOG.exception(_('K-safety setting failed.')) self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise trove-5.0.0/trove/guestagent/datastore/experimental/vertica/service.py0000664000567000056710000006664712701410320027464 0ustar jenkinsjenkins00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ConfigParser import os import subprocess import tempfile from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.i18n import _LI from trove.common import instance as rd_instance from trove.common.stream_codecs import PropertiesCodec from trove.common import utils as utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.vertica import system from trove.guestagent.datastore import service from trove.guestagent.db import models from trove.guestagent import pkg from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF packager = pkg.Package() DB_NAME = 'db_srvr' MOUNT_POINT = CONF.vertica.mount_point # We will use a fake configuration file for the options managed through # configuration groups that we apply directly with ALTER DB ... SET ... FAKE_CFG = os.path.join(MOUNT_POINT, "vertica.cfg.fake") class VerticaAppStatus(service.BaseDbStatus): def _get_actual_db_status(self): """Get the status of dbaas and report it back.""" try: out, err = system.shell_execute(system.STATUS_ACTIVE_DB, system.VERTICA_ADMIN) if out.strip() == DB_NAME: # UP status is confirmed LOG.info(_("Service Status is RUNNING.")) return rd_instance.ServiceStatuses.RUNNING else: LOG.info(_("Service Status is SHUTDOWN.")) return rd_instance.ServiceStatuses.SHUTDOWN except exception.ProcessExecutionError: LOG.exception(_("Failed to get database status.")) return rd_instance.ServiceStatuses.CRASHED class VerticaApp(object): """Prepares DBaaS on a Guest container.""" def __init__(self, status): self.state_change_wait_time = CONF.state_change_wait_time self.status = status revision_dir = \ guestagent_utils.build_file_path( os.path.join(MOUNT_POINT, os.path.dirname(system.VERTICA_ADMIN)), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) if not operating_system.exists(FAKE_CFG): operating_system.write_file(FAKE_CFG, '', as_root=True) operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP, as_root=True) operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(), as_root=True) self.configuration_manager = \ ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP, PropertiesCodec(delimiter='='), requires_root=True, override_strategy=ImportOverrideStrategy( revision_dir, "cnf")) def update_overrides(self, context, overrides, remove=False): if overrides: self.apply_overrides(overrides) def remove_overrides(self): config = self.configuration_manager.get_user_override() self._reset_config(config) self.configuration_manager.remove_user_override() def apply_overrides(self, overrides): self.configuration_manager.apply_user_override(overrides) self._apply_config(overrides) def _reset_config(self, config): try: db_password = self._get_database_password() for k, v in config.iteritems(): alter_db_cmd = system.ALTER_DB_RESET_CFG % (DB_NAME, str(k)) out, err = system.exec_vsql_command(db_password, alter_db_cmd) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to remove config %s") % k) except Exception: LOG.exception(_("Vertica configuration remove failed.")) raise RuntimeError(_("Vertica configuration remove failed.")) LOG.info(_("Vertica configuration reset completed.")) def _apply_config(self, config): try: db_password = self._get_database_password() for k, v in config.iteritems(): alter_db_cmd = system.ALTER_DB_CFG % (DB_NAME, str(k), str(v)) out, err = system.exec_vsql_command(db_password, alter_db_cmd) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to apply config %s") % k) except Exception: LOG.exception(_("Vertica configuration apply failed")) raise RuntimeError(_("Vertica configuration apply failed")) LOG.info(_("Vertica config apply completed.")) def _enable_db_on_boot(self): try: command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c", (system.SET_RESTART_POLICY % (DB_NAME, "always"))] subprocess.Popen(command) command = ["sudo", "su", "-", "root", "-c", (system.VERTICA_AGENT_SERVICE_COMMAND % "enable")] subprocess.Popen(command) except Exception: LOG.exception(_("Failed to enable db on boot.")) raise RuntimeError("Could not enable db on boot.") def _disable_db_on_boot(self): try: command = (system.SET_RESTART_POLICY % (DB_NAME, "never")) system.shell_execute(command, system.VERTICA_ADMIN) command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable") system.shell_execute(command) except exception.ProcessExecutionError: LOG.exception(_("Failed to disable db on boot.")) raise RuntimeError("Could not disable db on boot.") def stop_db(self, update_db=False, do_not_start_on_reboot=False): """Stop the database.""" LOG.info(_("Stopping Vertica.")) if do_not_start_on_reboot: self._disable_db_on_boot() try: # Stop vertica-agent service command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop") system.shell_execute(command) # Using Vertica adminTools to stop db. db_password = self._get_database_password() stop_db_command = (system.STOP_DB % (DB_NAME, db_password)) out, err = system.shell_execute(system.STATUS_ACTIVE_DB, system.VERTICA_ADMIN) if out.strip() == DB_NAME: system.shell_execute(stop_db_command, system.VERTICA_ADMIN) if not self.status._is_restarting: if not self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db): LOG.error(_("Could not stop Vertica.")) self.status.end_restart() raise RuntimeError("Could not stop Vertica!") LOG.debug("Database stopped.") else: LOG.debug("Database is not running.") except exception.ProcessExecutionError: LOG.exception(_("Failed to stop database.")) raise RuntimeError("Could not stop database.") def start_db(self, update_db=False): """Start the database.""" LOG.info(_("Starting Vertica.")) try: self._enable_db_on_boot() # Start vertica-agent service command = ["sudo", "su", "-", "root", "-c", (system.VERTICA_AGENT_SERVICE_COMMAND % "start")] subprocess.Popen(command) # Using Vertica adminTools to start db. db_password = self._get_database_password() start_db_command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c", (system.START_DB % (DB_NAME, db_password))] subprocess.Popen(start_db_command) if not self.status._is_restarting: self.status.end_restart() LOG.debug("Database started.") except Exception as e: raise RuntimeError(_("Could not start Vertica due to %s") % e) def start_db_with_conf_changes(self, config_contents): """ Currently all that this method does is to start Vertica. This method needs to be implemented to enable volume resize on guestagent side. """ LOG.info(_("Starting Vertica with configuration changes.")) if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info(_("Initiating config.")) self.configuration_manager.save_configuration(config_contents) self.start_db(True) def restart(self): """Restart the database.""" try: self.status.begin_restart() self.stop_db() self.start_db() finally: self.status.end_restart() def add_db_to_node(self, members=netutils.get_my_ipv4()): """Add db to host with admintools""" LOG.info(_("Calling admintools to add DB to host")) try: # Create db after install db_password = self._get_database_password() create_db_command = (system.ADD_DB_TO_NODE % (members, DB_NAME, db_password)) system.shell_execute(create_db_command, "dbadmin") except exception.ProcessExecutionError: # Give vertica some time to get the the node up, won't be available # by the time adminTools -t db_add_node completes LOG.info(_("adminTools failed as expected - wait for node")) self.wait_for_node_status() LOG.info(_("Vertica add db to host completed.")) def remove_db_from_node(self, members=netutils.get_my_ipv4()): """Remove db from node with admintools""" LOG.info(_("Removing db from node")) try: # Create db after install db_password = self._get_database_password() create_db_command = (system.REMOVE_DB_FROM_NODE % (members, DB_NAME, db_password)) system.shell_execute(create_db_command, "dbadmin") except exception.ProcessExecutionError: # Give vertica some time to get the the node up, won't be available # by the time adminTools -t db_add_node completes LOG.info(_("adminTools failed as expected - wait for node")) # Give vertica some time to take the node down - it won't be available # by the time adminTools -t db_add_node completes self.wait_for_node_status() LOG.info(_("Vertica remove host from db completed.")) def create_db(self, members=netutils.get_my_ipv4()): """Prepare the guest machine with a Vertica db creation.""" LOG.info(_("Creating database on Vertica host.")) try: # Create db after install db_password = self._get_database_password() create_db_command = (system.CREATE_DB % (members, DB_NAME, MOUNT_POINT, MOUNT_POINT, db_password)) system.shell_execute(create_db_command, system.VERTICA_ADMIN) except Exception: LOG.exception(_("Vertica database create failed.")) raise RuntimeError(_("Vertica database create failed.")) LOG.info(_("Vertica database create completed.")) def install_vertica(self, members=netutils.get_my_ipv4()): """Prepare the guest machine with a Vertica db creation.""" LOG.info(_("Installing Vertica Server.")) try: # Create db after install install_vertica_cmd = (system.INSTALL_VERTICA % (members, MOUNT_POINT)) system.shell_execute(install_vertica_cmd) except exception.ProcessExecutionError: LOG.exception(_("install_vertica failed.")) raise RuntimeError(_("install_vertica failed.")) self._generate_database_password() LOG.info(_("install_vertica completed.")) def update_vertica(self, command, members=netutils.get_my_ipv4()): LOG.info(_("Calling update_vertica with command %s") % command) try: update_vertica_cmd = (system.UPDATE_VERTICA % (command, members, MOUNT_POINT)) system.shell_execute(update_vertica_cmd) except exception.ProcessExecutionError: LOG.exception(_("update_vertica failed.")) raise RuntimeError(_("update_vertica failed.")) # self._generate_database_password() LOG.info(_("update_vertica completed.")) def add_udls(self): """Load the user defined load libraries into the database.""" LOG.info(_("Adding configured user defined load libraries.")) password = self._get_database_password() loaded_udls = [] for lib in system.UDL_LIBS: func_name = lib['func_name'] lib_name = lib['lib_name'] language = lib['language'] factory = lib['factory'] path = lib['path'] if os.path.isfile(path): LOG.debug("Adding the %s library as %s." % (func_name, lib_name)) out, err = system.exec_vsql_command( password, system.CREATE_LIBRARY % (lib_name, path) ) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to create library %s.") % lib_name) out, err = system.exec_vsql_command( password, system.CREATE_SOURCE % (func_name, language, factory, lib_name) ) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to create source %s.") % func_name) loaded_udls.append(func_name) else: LOG.warning("Skipping %s as path %s not found." % (func_name, path)) LOG.info(_("The following UDL functions are available for use: %s") % loaded_udls) def _generate_database_password(self): """Generate and write the password to vertica.cnf file.""" config = ConfigParser.ConfigParser() config.add_section('credentials') config.set('credentials', 'dbadmin_password', utils.generate_random_password()) self.write_config(config) def write_config(self, config, unlink_function=os.unlink, temp_function=tempfile.NamedTemporaryFile): """Write the configuration contents to vertica.cnf file.""" LOG.debug('Defining config holder at %s.' % system.VERTICA_CONF) tempfile = temp_function(delete=False) try: config.write(tempfile) tempfile.close() command = (("install -o root -g root -m 644 %(source)s %(target)s" ) % {'source': tempfile.name, 'target': system.VERTICA_CONF}) system.shell_execute(command) unlink_function(tempfile.name) except Exception: unlink_function(tempfile.name) raise def read_config(self): """Reads and returns the Vertica config.""" try: config = ConfigParser.ConfigParser() config.read(system.VERTICA_CONF) return config except Exception: LOG.exception(_("Failed to read config %s.") % system.VERTICA_CONF) raise RuntimeError def _get_database_password(self): """Read the password from vertica.cnf file and return it.""" return self.read_config().get('credentials', 'dbadmin_password') def install_if_needed(self, packages): """Install Vertica package if needed.""" LOG.info(_("Preparing Guest as Vertica Server.")) if not packager.pkg_is_installed(packages): LOG.debug("Installing Vertica Package.") packager.pkg_install(packages, None, system.INSTALL_TIMEOUT) def _set_readahead_for_disks(self): """This method sets readhead size for disks as needed by Vertica.""" device = volume.VolumeDevice(CONF.device_path) device.set_readahead_size(CONF.vertica.readahead_size) LOG.debug("Set readhead size as required by Vertica.") def prepare_for_install_vertica(self): """This method executes preparatory methods before executing install_vertica. """ command = ("VERT_DBA_USR=%s VERT_DBA_HOME=/home/dbadmin " "VERT_DBA_GRP=%s /opt/vertica/oss/python/bin/python" " -m vertica.local_coerce" % (system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP)) try: self._set_readahead_for_disks() system.shell_execute(command) except exception.ProcessExecutionError: LOG.exception(_("Failed to prepare for install_vertica.")) raise def mark_design_ksafe(self, k): """Wrapper for mark_design_ksafe function for setting k-safety """ LOG.info(_("Setting Vertica k-safety to %s") % str(k)) out, err = system.exec_vsql_command(self._get_database_password(), system.MARK_DESIGN_KSAFE % k) # Only fail if we get an ERROR as opposed to a warning complaining # about setting k = 0 if "ERROR" in err: LOG.error(err) raise RuntimeError(_("Failed to set k-safety level %s.") % k) def _create_user(self, username, password, role=None): """Creates a user, granting and enabling the given role for it.""" LOG.info(_("Creating user in Vertica database.")) out, err = system.exec_vsql_command(self._get_database_password(), system.CREATE_USER % (username, password)) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to create user %s.") % username) if role: self._grant_role(username, role) def _grant_role(self, username, role): """Grants a role to the user on the schema.""" out, err = system.exec_vsql_command(self._get_database_password(), system.GRANT_TO_USER % (role, username)) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to grant role %(r)s to user " "%(u)s.") % {'r': role, 'u': username}) out, err = system.exec_vsql_command(self._get_database_password(), system.ENABLE_FOR_USER % (username, role)) if err: LOG.warning(err) def enable_root(self, root_password=None): """Resets the root password.""" LOG.info(_LI("Enabling root.")) user = models.RootUser() user.name = "root" user.host = "%" user.password = root_password or utils.generate_random_password() if not self.is_root_enabled(): self._create_user(user.name, user.password, 'pseudosuperuser') else: LOG.debug("Updating %s password." % user.name) try: out, err = system.exec_vsql_command( self._get_database_password(), system.ALTER_USER_PASSWORD % (user.name, user.password)) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to update %s " "password.") % user.name) except exception.ProcessExecutionError: LOG.error(_("Failed to update %s password.") % user.name) raise RuntimeError(_("Failed to update %s password.") % user.name) return user.serialize() def is_root_enabled(self): """Return True if root access is enabled else False.""" LOG.debug("Checking is root enabled.") try: out, err = system.shell_execute(system.USER_EXISTS % (self._get_database_password(), 'root'), system.VERTICA_ADMIN) if err: LOG.error(err) raise RuntimeError(_("Failed to query for root user.")) except exception.ProcessExecutionError: raise RuntimeError(_("Failed to query for root user.")) return out.rstrip() == "1" def get_public_keys(self, user): """Generates key (if not found), and sends public key for user.""" LOG.debug("Public keys requested for user: %s." % user) user_home_directory = os.path.expanduser('~' + user) public_key_file_name = user_home_directory + '/.ssh/id_rsa.pub' try: key_generate_command = (system.SSH_KEY_GEN % user_home_directory) system.shell_execute(key_generate_command, user) except exception.ProcessExecutionError: LOG.debug("Cannot generate key.") try: read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name}) out, err = system.shell_execute(read_key_cmd) except exception.ProcessExecutionError: LOG.exception(_("Cannot read public key.")) raise return out.strip() def authorize_public_keys(self, user, public_keys): """Adds public key to authorized_keys for user.""" LOG.debug("public keys to be added for user: %s." % (user)) user_home_directory = os.path.expanduser('~' + user) authorized_file_name = user_home_directory + '/.ssh/authorized_keys' try: read_key_cmd = ("cat %(file)s" % {'file': authorized_file_name}) out, err = system.shell_execute(read_key_cmd) public_keys.append(out.strip()) except exception.ProcessExecutionError: LOG.debug("Cannot read authorized_keys.") all_keys = '\n'.join(public_keys) + "\n" try: with tempfile.NamedTemporaryFile(delete=False) as tempkeyfile: tempkeyfile.write(all_keys) copy_key_cmd = (("install -o %(user)s -m 600 %(source)s %(target)s" ) % {'user': user, 'source': tempkeyfile.name, 'target': authorized_file_name}) system.shell_execute(copy_key_cmd) os.remove(tempkeyfile.name) except exception.ProcessExecutionError: LOG.exception(_("Cannot install public keys.")) os.remove(tempkeyfile.name) raise def _export_conf_to_members(self, members): """This method exports conf files to other members.""" try: for member in members: COPY_CMD = (system.SEND_CONF_TO_SERVER % (system.VERTICA_CONF, member, system.VERTICA_CONF)) system.shell_execute(COPY_CMD) except exception.ProcessExecutionError: LOG.exception(_("Cannot export configuration.")) raise def install_cluster(self, members): """Installs & configures cluster.""" cluster_members = ','.join(members) LOG.debug("Installing cluster with members: %s." % cluster_members) self.install_vertica(cluster_members) self._export_conf_to_members(members) LOG.debug("Creating database with members: %s." % cluster_members) self.create_db(cluster_members) LOG.debug("Cluster configured on members: %s." % cluster_members) def grow_cluster(self, members): """Adds nodes to cluster.""" cluster_members = ','.join(members) LOG.debug("Growing cluster with members: %s." % cluster_members) self.update_vertica("--add-hosts", cluster_members) self._export_conf_to_members(members) LOG.debug("Creating database with members: %s." % cluster_members) self.add_db_to_node(cluster_members) LOG.debug("Cluster configured on members: %s." % cluster_members) def shrink_cluster(self, members): """Removes nodes from cluster.""" cluster_members = ','.join(members) LOG.debug("Shrinking cluster with members: %s." % cluster_members) self.remove_db_from_node(cluster_members) self.update_vertica("--remove-hosts", cluster_members) def wait_for_node_status(self, status='UP'): """Wait until all nodes are the same status""" # select node_state from nodes where node_state <> 'UP' def _wait_for_node_status(): out, err = system.exec_vsql_command(self._get_database_password(), system.NODE_STATUS % status) LOG.debug("Polled vertica node states: %s" % out) if err: LOG.error(err) raise RuntimeError(_("Failed to query for root user.")) return "0 rows" in out try: utils.poll_until(_wait_for_node_status, time_out=600, sleep_time=15) except exception.PollTimeOut: raise RuntimeError(_("Timed out waiting for cluster to" "change to status %s") % status) trove-5.0.0/trove/guestagent/datastore/experimental/couchbase/0000775000567000056710000000000012701410521025731 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/couchbase/system.py0000664000567000056710000000460012701410316027631 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg CONF = cfg.CONF TIME_OUT = 1200 COUCHBASE_DUMP_DIR = '/tmp/backups' COUCHBASE_CONF_DIR = '/etc/couchbase' COUCHBASE_WEBADMIN_PORT = '8091' COUCHBASE_REST_API = 'http://localhost:' + COUCHBASE_WEBADMIN_PORT BUCKETS_JSON = '/buckets.json' SECRET_KEY = '/secret_key' SERVICE_CANDIDATES = ["couchbase-server"] INSTANCE_DATA_DIR = '/opt/couchbase/var/lib/couchbase/data' cmd_couchbase_status = ('sudo /opt/couchbase/bin/couchbase-cli server-info ' '-c %(IP)s:8091 -u root -p %(PWD)s') cmd_node_init = ('sudo /opt/couchbase/bin/couchbase-cli node-init ' '-c %(IP)s:8091 --node-init-data-path=%(data_path)s ' '-u root -p %(PWD)s') cmd_cluster_init = ('sudo /opt/couchbase/bin/couchbase-cli cluster-init ' '-c %(IP)s:8091 --cluster-init-username=root ' '--cluster-init-password=%(PWD)s ' '--cluster-init-port=8091 ' '-u root -p %(PWD)s') cmd_kill = 'sudo pkill -u couchbase' """ For optimal couchbase operations, swappiness of vm should be set to 0. Reference link: http://docs.couchbase.com/couchbase-manual-2 .5/cb-admin/#using-couchbase-in-the-cloud """ cmd_set_swappiness = 'sudo sysctl vm.swappiness=0' cmd_update_sysctl_conf = ('echo "vm.swappiness = 0" | sudo tee -a ' '/etc/sysctl.conf') cmd_reset_pwd = 'sudo /opt/couchbase/bin/cbreset_password %(IP)s:8091' pwd_file = COUCHBASE_CONF_DIR + SECRET_KEY cmd_get_password_from_config = """sudo /opt/couchbase/bin/erl -noinput -eval \ 'case file:read_file("/opt/couchbase/var/lib/couchbase/config/config.dat") \ of {ok, B} -> io:format("~p~n", [binary_to_term(B)]) end.' \ -run init stop | grep '\[{"root",\[{password,' | awk -F\\" '{print $4}' """ trove-5.0.0/trove/guestagent/datastore/experimental/couchbase/__init__.py0000664000567000056710000000000012701410316030032 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/couchbase/manager.py0000664000567000056710000001073212701410316027722 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.datastore.experimental.couchbase import service from trove.guestagent.datastore.experimental.couchbase import system from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): """ This is Couchbase Manager class. It is dynamically loaded based off of the datastore of the trove instance """ def __init__(self): self.appStatus = service.CouchbaseAppStatus() self.app = service.CouchbaseApp(self.appStatus) super(Manager, self).__init__('couchbase') @property def status(self): return self.appStatus def reset_configuration(self, context, configuration): self.app.reset_configuration(configuration) def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() device.mount(mount_point) LOG.debug('Mounted the volume (%s).' % device_path) self.app.start_db_with_conf_changes(config_contents) LOG.debug('Securing couchbase now.') self.app.initial_setup() if backup_info: LOG.debug('Now going to perform restore.') self._perform_restore(backup_info, context, mount_point) def restart(self, context): """ Restart this couchbase instance. This method is called when the guest agent gets a restart message from the taskmanager. """ self.app.restart() def start_db_with_conf_changes(self, context, config_contents): self.app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this couchbase instance. This method is called when the guest agent gets a stop message from the taskmanager. """ self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def enable_root(self, context): LOG.debug("Enabling root.") return self.app.enable_root() def enable_root_with_password(self, context, root_password=None): return self.app.enable_root(root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return os.path.exists(system.pwd_file) def _perform_restore(self, backup_info, context, restore_location): """ Restores all couchbase buckets and their documents from the backup. """ LOG.info(_("Restoring database from backup %s") % backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception as e: LOG.error(_("Error performing restore from backup %s") % backup_info['id']) LOG.error(e) self.status.set_status(rd_instance.ServiceStatuses.FAILED) raise LOG.info(_("Restored database successfully")) def create_backup(self, context, backup_info): """ Backup all couchbase buckets and their documents. """ with EndNotification(context): backup.backup(context, backup_info) trove-5.0.0/trove/guestagent/datastore/experimental/couchbase/service.py0000664000567000056710000002426612701410316027757 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import stat import subprocess import tempfile from oslo_log import log as logging from oslo_utils import netutils import pexpect from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common import utils as utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.couchbase import system from trove.guestagent.datastore import service from trove.guestagent.db import models from trove.guestagent import pkg LOG = logging.getLogger(__name__) CONF = cfg.CONF packager = pkg.Package() class CouchbaseApp(object): """ Handles installation and configuration of couchbase on a trove instance. """ def __init__(self, status, state_change_wait_time=None): """ Sets default status and state_change_wait_time """ if state_change_wait_time: self.state_change_wait_time = state_change_wait_time else: self.state_change_wait_time = CONF.state_change_wait_time self.status = status def install_if_needed(self, packages): """ Install couchbase if needed, do nothing if it is already installed. """ LOG.info(_('Preparing Guest as Couchbase Server.')) if not packager.pkg_is_installed(packages): LOG.debug('Installing Couchbase.') self._install_couchbase(packages) def initial_setup(self): self.ip_address = netutils.get_my_ipv4() mount_point = CONF.couchbase.mount_point try: LOG.info(_('Couchbase Server change data dir path.')) operating_system.chown(mount_point, 'couchbase', 'couchbase', as_root=True) pwd = CouchbaseRootAccess.get_password() utils.execute_with_timeout( (system.cmd_node_init % {'data_path': mount_point, 'IP': self.ip_address, 'PWD': pwd}), shell=True) operating_system.remove(system.INSTANCE_DATA_DIR, force=True, as_root=True) LOG.debug('Couchbase Server initialize cluster.') utils.execute_with_timeout( (system.cmd_cluster_init % {'IP': self.ip_address, 'PWD': pwd}), shell=True) utils.execute_with_timeout(system.cmd_set_swappiness, shell=True) utils.execute_with_timeout(system.cmd_update_sysctl_conf, shell=True) LOG.info(_('Couchbase Server initial setup finished.')) except exception.ProcessExecutionError: LOG.exception(_('Error performing initial Couchbase setup.')) raise RuntimeError("Couchbase Server initial setup failed") def _install_couchbase(self, packages): """ Install the Couchbase Server. """ LOG.debug('Installing Couchbase Server. Creating %s' % system.COUCHBASE_CONF_DIR) operating_system.create_directory(system.COUCHBASE_CONF_DIR, as_root=True) pkg_opts = {} packager.pkg_install(packages, pkg_opts, system.TIME_OUT) self.start_db() LOG.debug('Finished installing Couchbase Server.') def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time) def start_db(self, update_db=False): self.status.start_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def enable_root(self, root_password=None): return CouchbaseRootAccess.enable_root(root_password) def start_db_with_conf_changes(self, config_contents): LOG.info(_("Starting Couchbase with configuration changes.\n" "Configuration contents:\n %s.") % config_contents) if self.status.is_running: LOG.error(_("Cannot start Couchbase with configuration changes. " "Couchbase state == %s.") % self.status) raise RuntimeError("Couchbase is not stopped.") self._write_config(config_contents) self.start_db(True) def reset_configuration(self, configuration): config_contents = configuration['config_contents'] LOG.debug("Resetting configuration.") self._write_config(config_contents) def _write_config(self, config_contents): """ Update contents of Couchbase configuration file """ return class CouchbaseAppStatus(service.BaseDbStatus): """ Handles all of the status updating for the couchbase guest agent. """ def _get_actual_db_status(self): self.ip_address = netutils.get_my_ipv4() pwd = None try: pwd = CouchbaseRootAccess.get_password() return self._get_status_from_couchbase(pwd) except exception.ProcessExecutionError: # log the exception, but continue with native config approach LOG.exception(_("Error getting the Couchbase status.")) try: out, err = utils.execute_with_timeout( system.cmd_get_password_from_config, shell=True) except exception.ProcessExecutionError: LOG.exception(_("Error getting the root password from the " "native Couchbase config file.")) return rd_instance.ServiceStatuses.SHUTDOWN config_pwd = out.strip() if out is not None else None if not config_pwd or config_pwd == pwd: LOG.debug("The root password from the native Couchbase config " "file is either empty or already matches the " "stored value.") return rd_instance.ServiceStatuses.SHUTDOWN try: status = self._get_status_from_couchbase(config_pwd) except exception.ProcessExecutionError: LOG.exception(_("Error getting Couchbase status using the " "password parsed from the native Couchbase " "config file.")) return rd_instance.ServiceStatuses.SHUTDOWN # if the parsed root password worked, update the stored value to # avoid having to consult/parse the couchbase config file again. LOG.debug("Updating the stored value for the Couchbase " "root password.") CouchbaseRootAccess().write_password_to_file(config_pwd) return status def _get_status_from_couchbase(self, pwd): out, err = utils.execute_with_timeout( (system.cmd_couchbase_status % {'IP': self.ip_address, 'PWD': pwd}), shell=True) server_stats = json.loads(out) if not err and server_stats["clusterMembership"] == "active": return rd_instance.ServiceStatuses.RUNNING else: return rd_instance.ServiceStatuses.SHUTDOWN def cleanup_stalled_db_services(self): utils.execute_with_timeout(system.cmd_kill) class CouchbaseRootAccess(object): @classmethod def enable_root(cls, root_password=None): user = models.RootUser() user.name = "root" user.host = "%" user.password = root_password or utils.generate_random_password() if root_password: CouchbaseRootAccess().write_password_to_file(root_password) else: CouchbaseRootAccess().set_password(user.password) return user.serialize() def set_password(self, root_password): self.ip_address = netutils.get_my_ipv4() child = pexpect.spawn(system.cmd_reset_pwd % {'IP': self.ip_address}) try: child.expect('.*password.*') child.sendline(root_password) child.expect('.*(yes/no).*') child.sendline('yes') child.expect('.*successfully.*') except pexpect.TIMEOUT: child.delayafterclose = 1 child.delayafterterminate = 1 try: child.close(force=True) except pexpect.ExceptionPexpect: # Close fails to terminate a sudo process on some OSes. subprocess.call(['sudo', 'kill', str(child.pid)]) self.write_password_to_file(root_password) def write_password_to_file(self, root_password): operating_system.create_directory(system.COUCHBASE_CONF_DIR, as_root=True) try: tempfd, tempname = tempfile.mkstemp() os.fchmod(tempfd, stat.S_IRUSR | stat.S_IWUSR) os.write(tempfd, root_password) os.fchmod(tempfd, stat.S_IRUSR) os.close(tempfd) except OSError as err: message = _("An error occurred in saving password " "(%(errno)s). %(strerror)s.") % { "errno": err.errno, "strerror": err.strerror} LOG.exception(message) raise RuntimeError(message) operating_system.move(tempname, system.pwd_file, as_root=True) @staticmethod def get_password(): pwd = "password" if os.path.exists(system.pwd_file): with open(system.pwd_file) as file: pwd = file.readline().strip() return pwd trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/0000775000567000056710000000000012701410521026200 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/pgutil.py0000664000567000056710000001755112701410316030071 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import psycopg2 from trove.common import exception LOG = logging.getLogger(__name__) PG_ADMIN = 'os_admin' class PostgresConnection(object): def __init__(self, autocommit=False, **connection_args): self._autocommit = autocommit self._connection_args = connection_args def execute(self, statement, identifiers=None, data_values=None): """Execute a non-returning statement. """ self._execute_stmt(statement, identifiers, data_values, False) def query(self, query, identifiers=None, data_values=None): """Execute a query and return the result set. """ return self._execute_stmt(query, identifiers, data_values, True) def _execute_stmt(self, statement, identifiers, data_values, fetch): if statement: with psycopg2.connect(**self._connection_args) as connection: connection.autocommit = self._autocommit with connection.cursor() as cursor: cursor.execute( self._bind(statement, identifiers), data_values) if fetch: return cursor.fetchall() else: raise exception.UnprocessableEntity(_("Invalid SQL statement: %s") % statement) def _bind(self, statement, identifiers): if identifiers: return statement.format(*identifiers) return statement class PostgresLocalhostConnection(PostgresConnection): HOST = 'localhost' def __init__(self, user, password=None, port=5432, autocommit=False): super(PostgresLocalhostConnection, self).__init__( autocommit=autocommit, user=user, password=password, host=self.HOST, port=port) # TODO(pmalik): No need to recreate the connection every time. def psql(statement, timeout=30): """Execute a non-returning statement (usually DDL); Turn autocommit ON (this is necessary for statements that cannot run within an implicit transaction, like CREATE DATABASE). """ return PostgresLocalhostConnection( PG_ADMIN, autocommit=True).execute(statement) # TODO(pmalik): No need to recreate the connection every time. def query(query, timeout=30): """Execute a query and return the result set. """ return PostgresLocalhostConnection( PG_ADMIN, autocommit=False).query(query) class DatabaseQuery(object): @classmethod def list(cls, ignore=()): """Query to list all databases.""" statement = ( "SELECT datname, pg_encoding_to_char(encoding), " "datcollate FROM pg_database " "WHERE datistemplate = false" ) for name in ignore: statement += " AND datname != '{name}'".format(name=name) return statement @classmethod def create(cls, name, encoding=None, collation=None): """Query to create a database.""" statement = "CREATE DATABASE \"{name}\"".format(name=name) if encoding is not None: statement += " ENCODING = '{encoding}'".format( encoding=encoding, ) if collation is not None: statement += " LC_COLLATE = '{collation}'".format( collation=collation, ) return statement @classmethod def drop(cls, name): """Query to drop a database.""" return "DROP DATABASE IF EXISTS \"{name}\"".format(name=name) class UserQuery(object): @classmethod def list(cls, ignore=()): """Query to list all users.""" statement = "SELECT usename FROM pg_catalog.pg_user" if ignore: # User a simple tautology so all clauses can be AND'ed without # crazy special logic. statement += " WHERE 1=1" for name in ignore: statement += " AND usename != '{name}'".format(name=name) return statement @classmethod def list_root(cls, ignore=()): """Query to list all superuser accounts.""" statement = ( "SELECT usename FROM pg_catalog.pg_user WHERE usesuper = true" ) for name in ignore: statement += " AND usename != '{name}'".format(name=name) return statement @classmethod def get(cls, name): """Query to get a single user.""" return ( "SELECT usename FROM pg_catalog.pg_user " "WHERE usename = '{name}'".format(name=name) ) @classmethod def create(cls, name, password, encrypt_password=None, *options): """Query to create a user with a password.""" create_clause = "CREATE USER \"{name}\"".format(name=name) with_clause = cls._build_with_clause( password, encrypt_password, *options) return ''.join([create_clause, with_clause]) @classmethod def _build_with_clause(cls, password, encrypt_password=None, *options): tokens = ['WITH'] if password: # Do not specify the encryption option if 'encrypt_password' # is None. PostgreSQL will use the configuration default. if encrypt_password is True: tokens.append('ENCRYPTED') elif encrypt_password is False: tokens.append('UNENCRYPTED') tokens.append('PASSWORD') tokens.append("'{password}'".format(password=password)) if options: tokens.extend(options) if len(tokens) > 1: return ' '.join(tokens) return '' @classmethod def update_password(cls, name, password, encrypt_password=None): """Query to update the password for a user.""" return cls.alter_user(name, password, encrypt_password) @classmethod def alter_user(cls, name, password, encrypt_password=None, *options): """Query to alter a user.""" alter_clause = "ALTER USER \"{name}\"".format(name=name) with_clause = cls._build_with_clause( password, encrypt_password, *options) return ''.join([alter_clause, with_clause]) @classmethod def update_name(cls, old, new): """Query to update the name of a user.""" return "ALTER USER \"{old}\" RENAME TO \"{new}\"".format( old=old, new=new, ) @classmethod def drop(cls, name): """Query to drop a user.""" return "DROP USER \"{name}\"".format(name=name) class AccessQuery(object): @classmethod def list(cls, user): """Query to list grants for a user.""" return ( "SELECT datname, pg_encoding_to_char(encoding), datcollate " "FROM pg_database " "WHERE datistemplate = false " "AND 'user {user}=CTc' = ANY (datacl)".format(user=user) ) @classmethod def grant(cls, user, database): """Query to grant user access to a database.""" return "GRANT ALL ON DATABASE \"{database}\" TO \"{user}\"".format( database=database, user=user, ) @classmethod def revoke(cls, user, database): """Query to revoke user access to a database.""" return "REVOKE ALL ON DATABASE \"{database}\" FROM \"{user}\"".format( database=database, user=user, ) trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/__init__.py0000664000567000056710000000000012701410316030301 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/manager.py0000664000567000056710000001100312701410316030161 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from .service.config import PgSqlConfig from .service.database import PgSqlDatabase from .service.install import PgSqlInstall from .service.root import PgSqlRoot from .service.status import PgSqlAppStatus import pgutil from trove.common import cfg from trove.common.notification import EndNotification from trove.common import utils from trove.guestagent import backup from trove.guestagent.datastore import manager from trove.guestagent import guest_log from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager( PgSqlDatabase, PgSqlRoot, PgSqlConfig, PgSqlInstall, manager.Manager ): PG_BUILTIN_ADMIN = 'postgres' def __init__(self): super(Manager, self).__init__('postgresql') @property def status(self): return PgSqlAppStatus.get() @property def configuration_manager(self): return self._configuration_manager @property def datastore_log_defs(self): owner = 'postgres' datastore_dir = '/var/log/postgresql/' long_query_time = CONF.get(self.manager).get( 'guest_log_long_query_time') general_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_GENERAL_LABEL, owner, datastore_dir=datastore_dir) general_log_dir, general_log_filename = os.path.split(general_log_file) return { self.GUEST_LOG_DEFS_GENERAL_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: general_log_file, self.GUEST_LOG_ENABLE_LABEL: { 'logging_collector': 'on', 'log_destination': self._quote_str('stderr'), 'log_directory': self._quote_str(general_log_dir), 'log_filename': self._quote_str(general_log_filename), 'log_statement': self._quote_str('all'), 'debug_print_plan': 'on', 'log_min_duration_statement': long_query_time, }, self.GUEST_LOG_DISABLE_LABEL: { 'logging_collector': 'off', }, self.GUEST_LOG_RESTART_LABEL: True, }, } def _quote_str(self, value): return "'%s'" % value def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): pgutil.PG_ADMIN = self.PG_BUILTIN_ADMIN self.install(context, packages) self.stop_db(context) if device_path: device = volume.VolumeDevice(device_path) device.format() if os.path.exists(mount_point): device.migrate_data(mount_point) device.mount(mount_point) self.configuration_manager.save_configuration(config_contents) self.apply_initial_guestagent_configuration() self.start_db(context) if backup_info: backup.restore(context, backup_info, '/tmp') pgutil.PG_ADMIN = self.ADMIN_USER else: self._secure(context) def _secure(self, context): # Create a new administrative user for Trove and also # disable the built-in superuser. self.create_database(context, [{'_name': self.ADMIN_USER}]) self._create_admin_user(context) pgutil.PG_ADMIN = self.ADMIN_USER postgres = {'_name': self.PG_BUILTIN_ADMIN, '_password': utils.generate_random_password()} self.alter_user(context, postgres, 'NOSUPERUSER', 'NOLOGIN') def create_backup(self, context, backup_info): with EndNotification(context): backup.backup(context, backup_info) trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/0000775000567000056710000000000012701410521027640 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/process.py0000664000567000056710000000514512701410316031677 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common import cfg from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.postgresql.service.status import ( PgSqlAppStatus) from trove.guestagent import guest_log LOG = logging.getLogger(__name__) CONF = cfg.CONF class PgSqlProcess(object): """Mixin that manages the PgSql process.""" SERVICE_CANDIDATES = ["postgresql"] PGSQL_OWNER = 'postgres' DATA_BASE = '/var/lib/postgresql/' PID_FILE = '/var/run/postgresql/postgresql.pid' UNIX_SOCKET_DIR = '/var/run/postgresql/' @property def pgsql_data_dir(self): return os.path.dirname(self.pg_version[0]) @property def pg_version(self): """Find the database version file stored in the data directory. :returns: A tuple with the path to the version file (in the root of the data directory) and the version string. """ version_files = operating_system.list_files_in_directory( self.DATA_BASE, recursive=True, pattern='PG_VERSION', as_root=True) version_file = sorted(version_files, key=len)[0] version = operating_system.read_file(version_file, as_root=True) return version_file, version.strip() def restart(self, context): PgSqlAppStatus.get().restart_db_service( self.SERVICE_CANDIDATES, CONF.state_change_wait_time) self.set_guest_log_status(guest_log.LogStatus.Restart_Completed) def start_db(self, context, enable_on_boot=True, update_db=False): PgSqlAppStatus.get().start_db_service( self.SERVICE_CANDIDATES, CONF.state_change_wait_time, enable_on_boot=enable_on_boot, update_db=update_db) def stop_db(self, context, do_not_start_on_reboot=False, update_db=False): PgSqlAppStatus.get().stop_db_service( self.SERVICE_CANDIDATES, CONF.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/root.py0000664000567000056710000000720112701410316031177 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import utils from trove.guestagent.datastore.experimental.postgresql import pgutil from trove.guestagent.datastore.experimental.postgresql.service.users import ( PgSqlUsers) LOG = logging.getLogger(__name__) CONF = cfg.CONF class PgSqlRoot(PgSqlUsers): """Mixin that provides the root-enable API.""" def __init__(self, *args, **kwargs): super(PgSqlRoot, self).__init__(*args, **kwargs) def is_root_enabled(self, context): """Return True if there is a superuser account enabled. """ results = pgutil.query( pgutil.UserQuery.list_root(), timeout=30, ) # There should be only one superuser (Trove's administrative account). return len(results) > 1 or (results[0] != self.ADMIN_USER) # TODO(pmalik): For future use by 'root-disable'. # def disable_root(self, context): # """Generate a new random password for the public superuser account. # Do not disable its access rights. Once enabled the account should # stay that way. # """ # self.enable_root(context) def enable_root(self, context, root_password=None): """Create a superuser user or reset the superuser password. The default PostgreSQL administration account is 'postgres'. This account always exists and cannot be removed. Its attributes and access can however be altered. Clients can connect from the localhost or remotely via TCP/IP: Local clients (e.g. psql) can connect from a preset *system* account called 'postgres'. This system account has no password and is *locked* by default, so that it can be used by *local* users only. It should *never* be enabled (or it's password set)!!! That would just open up a new attack vector on the system account. Remote clients should use a build-in *database* account of the same name. It's password can be changed using the "ALTER USER" statement. Access to this account is disabled by Trove exposed only once the superuser access is requested. Trove itself creates its own administrative account. {"_name": "postgres", "_password": ""} """ user = { "_name": "postgres", "_password": root_password or utils.generate_random_password(), } query = pgutil.UserQuery.alter_user( user['_name'], user['_password'], None, *self.ADMIN_OPTIONS ) pgutil.psql(query, timeout=30) return user def disable_root(self, context): """Generate a new random password for the public superuser account. Do not disable its access rights. Once enabled the account should stay that way. """ self.enable_root(context) def enable_root_with_password(self, context, root_password=None): self.enable_root(context, root_password) trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/__init__.py0000664000567000056710000000000012701410316031741 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/install.py0000664000567000056710000000621112701410316031662 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.guestagent.datastore.experimental.postgresql.service.process import( PgSqlProcess) from trove.guestagent import pkg LOG = logging.getLogger(__name__) CONF = cfg.CONF class PgSqlInstall(PgSqlProcess): """Mixin class that provides a PgSql installer. This mixin has a dependency on the PgSqlProcess mixin. """ def __init__(self, *args, **kwargs): super(PgSqlInstall, self).__init__(*args, **kwargs) def install(self, context, packages): """Install one or more packages that postgresql needs to run. The packages parameter is a string representing the package names that should be given to the system's package manager. """ LOG.debug( "{guest_id}: Beginning PgSql package installation.".format( guest_id=CONF.guest_id ) ) packager = pkg.Package() if not packager.pkg_is_installed(packages): try: LOG.info( _("{guest_id}: Installing ({packages}).").format( guest_id=CONF.guest_id, packages=packages, ) ) packager.pkg_install(packages, {}, 1000) except (pkg.PkgAdminLockError, pkg.PkgPermissionError, pkg.PkgPackageStateError, pkg.PkgNotFoundError, pkg.PkgTimeout, pkg.PkgScriptletError, pkg.PkgDownloadError, pkg.PkgSignError, pkg.PkgBrokenError): LOG.exception( "{guest_id}: There was a package manager error while " "trying to install ({packages}).".format( guest_id=CONF.guest_id, packages=packages, ) ) raise except Exception: LOG.exception( "{guest_id}: The package manager encountered an unknown " "error while trying to install ({packages}).".format( guest_id=CONF.guest_id, packages=packages, ) ) raise else: self.start_db(context) LOG.debug( "{guest_id}: Completed package installation.".format( guest_id=CONF.guest_id, ) ) trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/config.py0000664000567000056710000001654212701410316031471 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import OrderedDict import os from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common.stream_codecs import PropertiesCodec from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.postgresql.service.process import( PgSqlProcess) from trove.guestagent.datastore.experimental.postgresql.service.status import( PgSqlAppStatus) from trove.guestagent.datastore.experimental.postgresql import pgutil LOG = logging.getLogger(__name__) CONF = cfg.CONF class PgSqlConfig(PgSqlProcess): """Mixin that implements the config API. This mixin has a dependency on the PgSqlProcess mixin. """ OS = operating_system.get_os() CONFIG_BASE = { operating_system.DEBIAN: '/etc/postgresql/', operating_system.REDHAT: '/var/lib/postgresql/', operating_system.SUSE: '/var/lib/pgsql/'}[OS] LISTEN_ADDRESSES = ['*'] # Listen on all available IP (v4/v6) interfaces. def __init__(self, *args, **kwargs): super(PgSqlConfig, self).__init__(*args, **kwargs) revision_dir = guestagent_utils.build_file_path( os.path.dirname(self.pgsql_config), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self._configuration_manager = ConfigurationManager( self.pgsql_config, self.PGSQL_OWNER, self.PGSQL_OWNER, PropertiesCodec( delimiter='=', string_mappings={'on': True, 'off': False, "''": None}), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) @property def pgsql_config(self): return self._find_config_file('postgresql.conf') @property def pgsql_hba_config(self): return self._find_config_file('pg_hba.conf') @property def pgsql_ident_config(self): return self._find_config_file('pg_ident.conf') def _find_config_file(self, name_pattern): version_base = guestagent_utils.build_file_path(self.CONFIG_BASE, self.pg_version[1]) return sorted(operating_system.list_files_in_directory( version_base, recursive=True, pattern=name_pattern, as_root=True), key=len)[0] def update_overrides(self, context, overrides, remove=False): if remove: self.configuration_manager.remove_user_override() elif overrides: self.configuration_manager.apply_user_override(overrides) def apply_overrides(self, context, overrides): # Send a signal to the server, causing configuration files to be # reloaded by all server processes. # Active queries or connections to the database will not be # interrupted. # # NOTE: Do not use the 'SET' command as it only affects the current # session. pgutil.psql("SELECT pg_reload_conf()") def reset_configuration(self, context, configuration): """Reset the PgSql configuration to the one given. """ config_contents = configuration['config_contents'] self.configuration_manager.save_configuration(config_contents) def start_db_with_conf_changes(self, context, config_contents): """Starts the PgSql instance with a new configuration.""" if PgSqlAppStatus.get().is_running: raise RuntimeError(_("The service is still running.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration() self.start_db(context) def apply_initial_guestagent_configuration(self): """Update guestagent-controlled configuration properties. """ LOG.debug("Applying initial guestagent configuration.") file_locations = { 'data_directory': self._quote(self.pgsql_data_dir), 'hba_file': self._quote(self.pgsql_hba_config), 'ident_file': self._quote(self.pgsql_ident_config), 'external_pid_file': self._quote(self.PID_FILE), 'unix_socket_directories': self._quote(self.UNIX_SOCKET_DIR), 'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)), 'port': CONF.postgresql.postgresql_port} self.configuration_manager.apply_system_override(file_locations) self._apply_access_rules() @staticmethod def _quote(value): return "'%s'" % value def _apply_access_rules(self): LOG.debug("Applying database access rules.") # Connections to all resources are granted. # # Local access from administrative users is implicitly trusted. # # Remote access from the Trove's account is always rejected as # it is not needed and could be used by malicious users to hijack the # instance. # # Connections from other accounts always require a double-MD5-hashed # password. # # Make the rules readable only by the Postgres service. # # NOTE: The order of entries is important. # The first failure to authenticate stops the lookup. # That is why the 'local' connections validate first. # The OrderedDict is necessary to guarantee the iteration order. access_rules = OrderedDict( [('local', [['all', 'postgres,os_admin', None, 'trust'], ['all', 'all', None, 'md5']]), ('host', [['all', 'postgres,os_admin', '127.0.0.1/32', 'trust'], ['all', 'postgres,os_admin', '::1/128', 'trust'], ['all', 'postgres,os_admin', 'localhost', 'trust'], ['all', 'os_admin', '0.0.0.0/0', 'reject'], ['all', 'os_admin', '::/0', 'reject'], ['all', 'all', '0.0.0.0/0', 'md5'], ['all', 'all', '::/0', 'md5']]) ]) operating_system.write_file(self.pgsql_hba_config, access_rules, PropertiesCodec( string_mappings={'\t': None}), as_root=True) operating_system.chown(self.pgsql_hba_config, self.PGSQL_OWNER, self.PGSQL_OWNER, as_root=True) operating_system.chmod(self.pgsql_hba_config, FileMode.SET_USR_RO, as_root=True) trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/database.py0000664000567000056710000001002112701410316031752 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common.notification import EndNotification from trove.guestagent.datastore.experimental.postgresql import pgutil LOG = logging.getLogger(__name__) CONF = cfg.CONF class PgSqlDatabase(object): def __init__(self, *args, **kwargs): super(PgSqlDatabase, self).__init__(*args, **kwargs) def create_database(self, context, databases): """Create the list of specified databases. The databases parameter is a list of dictionaries in the following form: {"_name": "", "_character_set": "", "_collate": ""} Encoding and collation values are validated in trove.guestagent.db.models. """ with EndNotification(context): for database in databases: encoding = database.get('_character_set') collate = database.get('_collate') LOG.info( _("{guest_id}: Creating database {name}.").format( guest_id=CONF.guest_id, name=database['_name'], ) ) pgutil.psql( pgutil.DatabaseQuery.create( name=database['_name'], encoding=encoding, collation=collate, ), timeout=30, ) def delete_database(self, context, database): """Delete the specified database. The database parameter is a dictionary in the following form: {"_name": ""} """ with EndNotification(context): LOG.info( _("{guest_id}: Dropping database {name}.").format( guest_id=CONF.guest_id, name=database['_name'], ) ) pgutil.psql( pgutil.DatabaseQuery.drop(name=database['_name']), timeout=30, ) def list_databases( self, context, limit=None, marker=None, include_marker=False, ): """List databases created on this instance. Return value is a list of dictionaries in the following form: [{"_name": "", "_character_set": "", "_collate": ""}, ...] """ results = pgutil.query( pgutil.DatabaseQuery.list(ignore=cfg.get_ignored_dbs( manager='postgresql')), timeout=30, ) # Convert results to dictionaries. results = ( {'_name': r[0].strip(), '_character_set': r[1], '_collate': r[2]} for r in results ) # Force __iter__ of generator until marker found. if marker is not None: try: item = results.next() while item['_name'] != marker: item = results.next() except StopIteration: pass remainder = None if limit is not None: remainder = results results = itertools.islice(results, limit) results = tuple(results) next_marker = None if remainder is not None: try: next_marker = remainder.next() except StopIteration: pass return results, next_marker trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/access.py0000664000567000056710000000614712701410316031465 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.guestagent.datastore.experimental.postgresql import pgutil LOG = logging.getLogger(__name__) CONF = cfg.CONF class PgSqlAccess(object): """Mixin implementing the user-access API calls.""" def grant_access(self, context, username, hostname, databases): """Give a user permission to use a given database. The username and hostname parameters are strings. The databases parameter is a list of strings representing the names of the databases to grant permission on. """ for database in databases: LOG.info( _("{guest_id}: Granting user ({user}) access to database " "({database}).").format( guest_id=CONF.guest_id, user=username, database=database,) ) pgutil.psql( pgutil.AccessQuery.grant( user=username, database=database, ), timeout=30, ) def revoke_access(self, context, username, hostname, database): """Revoke a user's permission to use a given database. The username and hostname parameters are strings. The database parameter is a string representing the name of the database. """ LOG.info( _("{guest_id}: Revoking user ({user}) access to database" "({database}).").format( guest_id=CONF.guest_id, user=username, database=database,) ) pgutil.psql( pgutil.AccessQuery.revoke( user=username, database=database, ), timeout=30, ) def list_access(self, context, username, hostname): """List database for which the given user as access. The username and hostname parameters are strings. Return value is a list of dictionaries in the following form: [{"_name": "", "_collate": None, "_character_set": None}, ...] """ results = pgutil.query( pgutil.AccessQuery.list(user=username), timeout=30, ) # Convert to dictionaries. results = ( {'_name': r[0].strip(), '_collate': None, '_character_set': None} for r in results ) return tuple(results) trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/users.py0000664000567000056710000002246612701410316031367 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common.notification import EndNotification from trove.common import utils from trove.guestagent.datastore.experimental.postgresql import pgutil from trove.guestagent.datastore.experimental.postgresql.service.access import ( PgSqlAccess) LOG = logging.getLogger(__name__) CONF = cfg.CONF class PgSqlUsers(PgSqlAccess): """Mixin implementing the user CRUD API. This mixin has a dependency on the PgSqlAccess mixin. """ @property def ADMIN_USER(self): """Trove's administrative user.""" return 'os_admin' @property def ADMIN_OPTIONS(self): """Default set of options of an administrative account.""" return [ 'SUPERUSER', 'CREATEDB', 'CREATEROLE', 'INHERIT', 'REPLICATION', 'LOGIN'] def _create_admin_user(self, context): """Create an administrative user for Trove. Force password encryption. """ password = utils.generate_random_password() os_admin = {'_name': self.ADMIN_USER, '_password': password, '_databases': [{'_name': self.ADMIN_USER}]} self._create_user(context, os_admin, True, *self.ADMIN_OPTIONS) def create_user(self, context, users): """Create users and grant privileges for the specified databases. The users parameter is a list of dictionaries in the following form: {"_name": "", "_password": "", "_databases": [{"_name": ""}, ...]} """ with EndNotification(context): for user in users: self._create_user(context, user, None) def _create_user(self, context, user, encrypt_password=None, *options): LOG.info( _("{guest_id}: Creating user {user} {with_clause}.") .format( guest_id=CONF.guest_id, user=user['_name'], with_clause=pgutil.UserQuery._build_with_clause( '', encrypt_password, *options ), ) ) pgutil.psql( pgutil.UserQuery.create( user['_name'], user['_password'], encrypt_password, *options ), timeout=30, ) self.grant_access( context, user['_name'], None, [d['_name'] for d in user['_databases']], ) def list_users( self, context, limit=None, marker=None, include_marker=False, ): """List all users on the instance along with their access permissions. Return value is a list of dictionaries in the following form: [{"_name": "", "_password": None, "_host": None, "_databases": [{"_name": ""}, ...]}, ...] """ results = pgutil.query( pgutil.UserQuery.list(ignore=cfg.get_ignored_users( manager='postgresql')), timeout=30, ) # Convert results into dictionaries. results = ( { '_name': r[0].strip(), '_password': None, '_host': None, '_databases': self.list_access(context, r[0], None), } for r in results ) # Force __iter__ of generator until marker found. if marker is not None: try: item = results.next() while item['_name'] != marker: item = results.next() except StopIteration: pass remainder = None if limit is not None: remainder = results results = itertools.islice(results, limit) results = tuple(results) next_marker = None if remainder is not None: try: next_marker = remainder.next() except StopIteration: pass return results, next_marker def delete_user(self, context, user): """Delete the specified user. The user parameter is a dictionary in the following form: {"_name": ""} """ with EndNotification(context): LOG.info( _("{guest_id}: Dropping user {name}.").format( guest_id=CONF.guest_id, name=user['_name'], ) ) pgutil.psql( pgutil.UserQuery.drop(name=user['_name']), timeout=30, ) def get_user(self, context, username, hostname): """Return a single user matching the criteria. The username and hostname parameter are strings. The return value is a dictionary in the following form: {"_name": "", "_host": None, "_password": None, "_databases": [{"_name": ""}, ...]} Where "_databases" is a list of databases the user has access to. """ results = pgutil.query( pgutil.UserQuery.get(name=username), timeout=30, ) results = tuple(results) if len(results) < 1: return None return { "_name": results[0][0], "_host": None, "_password": None, "_databases": self.list_access(context, username, None), } def change_passwords(self, context, users): """Change the passwords of one or more existing users. The users parameter is a list of dictionaries in the following form: {"name": "", "password": ""} """ with EndNotification(context): for user in users: self.alter_user(context, user, None) def alter_user(self, context, user, encrypt_password=None, *options): """Change the password and options of an existing users. The user parameter is a dictionary of the following form: {"name": "", "password": ""} """ LOG.info( _("{guest_id}: Altering user {user} {with_clause}.") .format( guest_id=CONF.guest_id, user=user['_name'], with_clause=pgutil.UserQuery._build_with_clause( '', encrypt_password, *options ), ) ) pgutil.psql( pgutil.UserQuery.alter_user( user['_name'], user['_password'], encrypt_password, *options), timeout=30, ) def update_attributes(self, context, username, hostname, user_attrs): """Change the attributes of one existing user. The username and hostname parameters are strings. The user_attrs parameter is a dictionary in the following form: {"password": "", "name": ""} Each key/value pair in user_attrs is optional. """ with EndNotification(context): if user_attrs.get('password') is not None: self.change_passwords( context, ( { "name": username, "password": user_attrs['password'], }, ), ) if user_attrs.get('name') is not None: access = self.list_access(context, username, None) LOG.info( _("{guest_id}: Changing username for {old} to {new}." ).format( guest_id=CONF.guest_id, old=username, new=user_attrs['name'], ) ) pgutil.psql( pgutil.psql.UserQuery.update_name( old=username, new=user_attrs['name'], ), timeout=30, ) # Regrant all previous access after the name change. LOG.info( _("{guest_id}: Regranting permissions from {old} " "to {new}.") .format( guest_id=CONF.guest_id, old=username, new=user_attrs['name'], ) ) self.grant_access( context, username=user_attrs['name'], hostname=None, databases=(db['_name'] for db in access) ) trove-5.0.0/trove/guestagent/datastore/experimental/postgresql/service/status.py0000664000567000056710000000320412701410316031536 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import psycopg2 from trove.common import instance from trove.common import utils from trove.guestagent.datastore.experimental.postgresql import pgutil from trove.guestagent.datastore import service LOG = logging.getLogger(__name__) class PgSqlAppStatus(service.BaseDbStatus): @classmethod def get(cls): if not cls._instance: cls._instance = PgSqlAppStatus() return cls._instance def _get_actual_db_status(self): try: # Any query will initiate a new database connection. pgutil.psql("SELECT 1") return instance.ServiceStatuses.RUNNING except psycopg2.OperationalError: return instance.ServiceStatuses.SHUTDOWN except utils.Timeout: return instance.ServiceStatuses.BLOCKED except Exception: LOG.exception(_("Error getting Postgres status.")) return instance.ServiceStatuses.CRASHED return instance.ServiceStatuses.SHUTDOWN trove-5.0.0/trove/guestagent/datastore/experimental/couchdb/0000775000567000056710000000000012701410521025404 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/couchdb/system.py0000664000567000056710000000647212701410316027315 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os import path SERVICE_CANDIDATES = ["couchdb"] UPDATE_BIND_ADDRESS = ( "sudo sed -i -r 's/;bind_address = 127.0.0.1/bind_address = 0.0.0.0/' " "/etc/couchdb/local.ini") TIME_OUT = 1200 COUCHDB_HTTPD_PORT = "5984" COUCHDB_SERVER_STATUS = "curl http://127.0.0.1:" + COUCHDB_HTTPD_PORT COUCHDB_ADMIN_NAME = 'os_admin' COUCHDB_CREATE_ADMIN = ( "curl -X PUT http://127.0.0.1:" + COUCHDB_HTTPD_PORT + "/_config/admins/" + COUCHDB_ADMIN_NAME + " -d '\"%(password)s\"'") COUCHDB_ADMIN_CREDS_FILE = path.join(path.expanduser('~'), '.os_couchdb_admin_creds.json') CREATE_USER_COMMAND = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_users/org.couchdb.user:%(username)s -H \"Accept:" " application/json\" -H \"Content-Type: application/json\" -d \'{\"name\"" ": \"%(username)s\", \"password\": \"%(password)s\", \"roles\": []," " \"type\":\"user\"}\'") DELETE_REV_ID = ( "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_users/_all_docs") DELETE_USER_COMMAND = ( "curl -X DELETE http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_users/org.couchdb.user:%(username)s?rev=" "%(revid)s") ALL_USERS_COMMAND = ( "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_users/_all_docs") DB_ACCESS_COMMAND = ( "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s/_security") GRANT_ACCESS_COMMAND = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s/_security -d \'{\"admins\":{\"names\"" ":[], \"roles\":[]}, \"members\":{\"" + "names\":[\"%(username)s\"],\"" "roles\":[]}}\'") REVOKE_ACCESS_COMMAND = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s/_security" + " -d \'{\"admins\":{\"" "names\":[], \"roles\":[]}, \"members\":{\"" + "names\":%(username)s,\"" "roles\":[]}}\'") ENABLE_ROOT = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:5984" "/_config/admins/root -d '\"%(password)s\"'") IS_ROOT_ENABLED = ( "curl -s http://%(admin_name)s:%(admin_password)s@localhost:5984/_config/" "admins") CREATE_DB_COMMAND = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s") LIST_DB_COMMAND = ( "curl -X GET http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_all_dbs") DELETE_DB_COMMAND = ( "curl -X DELETE http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s") trove-5.0.0/trove/guestagent/datastore/experimental/couchdb/__init__.py0000664000567000056710000000000012701410316027505 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/couchdb/manager.py0000664000567000056710000001442312701410316027376 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.guestagent import backup from trove.guestagent.datastore.experimental.couchdb import service from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): """ This is CouchDB Manager class. It is dynamically loaded based off of the datastore of the Trove instance. """ def __init__(self): self.appStatus = service.CouchDBAppStatus() self.app = service.CouchDBApp(self.appStatus) super(Manager, self).__init__('couchdb') @property def status(self): return self.appStatus def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) if device_path: self.app.stop_db() device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): device.migrate_data(mount_point) device.mount(mount_point) LOG.debug('Mounted the volume (%s).' % device_path) self.app.start_db() self.app.change_permissions() self.app.make_host_reachable() if backup_info: self._perform_restore(backup_info, context, mount_point) self.app.secure() def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this CouchDB instance. This method is called when the guest agent gets a stop message from the taskmanager. """ LOG.debug("Stopping the CouchDB instance.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def restart(self, context): """ Restart this CouchDB instance. This method is called when the guest agent gets a restart message from the taskmanager. """ LOG.debug("Restarting the CouchDB instance.") self.app.restart() def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting CouchDB with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def _perform_restore(self, backup_info, context, restore_location): """ Restores all CouchDB databases and their documents from the backup. """ LOG.info(_("Restoring database from backup %s") % backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception(_("Error performing restore from backup %s") % backup_info['id']) self.status.set_status(rd_instance.ServiceStatuses.FAILED) raise LOG.info(_("Restored database successfully")) def create_backup(self, context, backup_info): LOG.debug("Creating backup for CouchDB.") backup.backup(context, backup_info) def create_admin_user(self, context, password): self.app.create_admin_user(password) def store_admin_password(self, context, password): self.app.store_admin_password(password) def create_user(self, context, users): LOG.debug("Creating user(s).") return service.CouchDBAdmin().create_user(users) def delete_user(self, context, user): LOG.debug("Deleting user.") return service.CouchDBAdmin().delete_user(user) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing users.") return service.CouchDBAdmin().list_users(limit, marker, include_marker) def get_user(self, context, username, hostname): LOG.debug("Show details of user %s." % username) return service.CouchDBAdmin().get_user(username, hostname) def grant_access(self, context, username, hostname, databases): LOG.debug("Granting acccess.") return service.CouchDBAdmin().grant_access(username, databases) def revoke_access(self, context, username, hostname, database): LOG.debug("Revoking access.") return service.CouchDBAdmin().revoke_access(username, database) def list_access(self, context, username, hostname): LOG.debug("Listing access.") return service.CouchDBAdmin().list_access(username, hostname) def enable_root(self, context): LOG.debug("Enabling root.") return service.CouchDBAdmin().enable_root() def enable_root_with_password(self, context, root_password=None): return service.CouchDBAdmin().enable_root(root_pwd=root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return service.CouchDBAdmin().is_root_enabled() def create_database(self, context, databases): LOG.debug("Creating database(s).") return service.CouchDBAdmin().create_database(databases) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing databases.") return service.CouchDBAdmin().list_databases(limit, marker, include_marker) def delete_database(self, context, database): LOG.debug("Deleting database.") return service.CouchDBAdmin().delete_database(database) trove-5.0.0/trove/guestagent/datastore/experimental/couchdb/service.py0000664000567000056710000005634612701410316027436 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import getpass import json from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common import pagination from trove.common.stream_codecs import JsonCodec from trove.common import utils as utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.couchdb import system from trove.guestagent.datastore import service from trove.guestagent.db import models from trove.guestagent import pkg CONF = cfg.CONF LOG = logging.getLogger(__name__) packager = pkg.Package() MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'couchdb' COUCHDB_LIB_DIR = "/var/lib/couchdb" COUCHDB_LOG_DIR = "/var/log/couchdb" COUCHDB_CONFIG_DIR = "/etc/couchdb" COUCHDB_BIN_DIR = "/var/run/couchdb" class CouchDBApp(object): """ Handles installation and configuration of CouchDB on a Trove instance. """ def __init__(self, status, state_change_wait_time=None): """ Sets default status and state_change_wait_time. """ self.state_change_wait_time = ( state_change_wait_time if state_change_wait_time else CONF.state_change_wait_time ) LOG.debug("state_change_wait_time = %s." % self.state_change_wait_time) self.status = status def install_if_needed(self, packages): """ Install CouchDB if needed, do nothing if it is already installed. """ LOG.info(_('Preparing guest as a CouchDB server.')) if not packager.pkg_is_installed(packages): LOG.debug("Installing packages: %s." % str(packages)) packager.pkg_install(packages, {}, system.TIME_OUT) LOG.info(_("Finished installing CouchDB server.")) def change_permissions(self): """ When CouchDB is installed, a default user 'couchdb' is created. Inorder to start/stop/restart CouchDB service as the current OS user, add the current OS user to the 'couchdb' group and provide read/write access to the 'couchdb' group. """ try: LOG.debug("Changing permissions.") for dir in [COUCHDB_LIB_DIR, COUCHDB_LOG_DIR, COUCHDB_BIN_DIR, COUCHDB_CONFIG_DIR]: operating_system.chown(dir, 'couchdb', 'couchdb', as_root=True) operating_system.chmod(dir, FileMode.ADD_GRP_RW, as_root=True) operating_system.change_user_group(getpass.getuser(), 'couchdb', as_root=True) LOG.debug("Successfully changed permissions.") except exception.ProcessExecutionError: LOG.exception(_("Error changing permissions.")) def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def start_db(self, update_db=False): self.status.start_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def restart(self): self.status.restart_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time) def make_host_reachable(self): try: LOG.debug("Changing bind address to 0.0.0.0 .") self.stop_db() out, err = utils.execute_with_timeout( system.UPDATE_BIND_ADDRESS, shell=True ) self.start_db() except exception.ProcessExecutionError: LOG.exception(_("Error while trying to update bind address of" " CouchDB server.")) def start_db_with_conf_changes(self, config_contents): ''' Will not be implementing configuration change API for CouchDB in the Kilo release. Currently all that this method does is to start the CouchDB server without any configuration changes. Looks like this needs to be implemented to enable volume resize on the guest agent side. ''' LOG.info(_("Starting CouchDB with configuration changes.")) self.start_db(True) def store_admin_password(self, password): LOG.debug('Storing the admin password.') creds = CouchDBCredentials(username=system.COUCHDB_ADMIN_NAME, password=password) creds.write(system.COUCHDB_ADMIN_CREDS_FILE) return creds def create_admin_user(self, password): ''' Creating the admin user, os_admin, for the couchdb instance ''' LOG.debug('Creating the admin user.') creds = self.store_admin_password(password) out, err = utils.execute_with_timeout( system.COUCHDB_CREATE_ADMIN % {'password': creds.password}, shell=True) LOG.debug('Created admin user.') def secure(self): ''' Create the Trove admin user. The service should not be running at this point. ''' self.start_db(update_db=False) password = utils.generate_random_password() self.create_admin_user(password) LOG.debug("CouchDB secure complete.") @property def admin_password(self): creds = CouchDBCredentials() creds.read(system.COUCHDB_ADMIN_CREDS_FILE) return creds.password class CouchDBAppStatus(service.BaseDbStatus): """ Handles all of the status updating for the CouchDB guest agent. We can verify that CouchDB is running by running the command: curl http://127.0.0.1:5984/ The response will be similar to: {"couchdb":"Welcome","version":"1.6.0"} """ def _get_actual_db_status(self): try: out, err = utils.execute_with_timeout( system.COUCHDB_SERVER_STATUS, shell=True ) LOG.debug("CouchDB status = %r" % out) server_status = json.loads(out) status = server_status["couchdb"] if status == 'Welcome': LOG.debug("Status of CouchDB is active.") return rd_instance.ServiceStatuses.RUNNING else: LOG.debug("Status of CouchDB is not active.") return rd_instance.ServiceStatuses.SHUTDOWN except exception.ProcessExecutionError: LOG.exception(_("Error getting CouchDB status.")) return rd_instance.ServiceStatuses.SHUTDOWN class CouchDBAdmin(object): '''Handles administrative functions on CouchDB.''' # user is cached by making it a class attribute admin_user = None def _admin_user(self): if not type(self).admin_user: creds = CouchDBCredentials() creds.read(system.COUCHDB_ADMIN_CREDS_FILE) user = models.CouchDBUser() user.name = creds.username user.password = creds.password type(self).admin_user = user return type(self).admin_user def _is_modifiable_user(self, name): if name in cfg.get_ignored_users(manager=MANAGER): return False elif name == system.COUCHDB_ADMIN_NAME: return False return True def create_user(self, users): LOG.debug("Creating user(s) for accessing CouchDB database(s).") self._admin_user() try: for item in users: user = models.CouchDBUser() user.deserialize(item) try: LOG.debug("Creating user: %s." % user.name) utils.execute_with_timeout( system.CREATE_USER_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'username': user.name, 'username': user.name, 'password': user.password}, shell=True) except exception.ProcessExecutionError as pe: LOG.exception(_("Error creating user: %s.") % user.name) pass for database in user.databases: mydb = models.CouchDBSchema() mydb.deserialize(database) try: LOG.debug("Granting user: %s access to database: %s." % (user.name, mydb.name)) out, err = utils.execute_with_timeout( system.GRANT_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': mydb.name, 'username': user.name}, shell=True) except exception.ProcessExecutionError as pe: LOG.debug("Error granting user: %s access to" "database: %s." % (user.name, mydb.name)) LOG.debug(pe) pass except exception.ProcessExecutionError as pe: LOG.exception(_("An error occurred creating users: %s.") % pe.message) pass def delete_user(self, user): LOG.debug("Delete a given CouchDB user.") couchdb_user = models.CouchDBUser() couchdb_user.deserialize(user) db_names = self.list_database_names() for db in db_names: userlist = [] try: out, err = utils.execute_with_timeout( system.DB_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db}, shell=True) except exception.ProcessExecutionError: LOG.debug( "Error while trying to get the users for database: %s." % db) continue evalout = ast.literal_eval(out) if evalout: members = evalout['members'] names = members['names'] for i in range(0, len(names)): couchdb_user.databases = db userlist.append(names[i]) if couchdb_user.name in userlist: userlist.remove(couchdb_user.name) out2, err2 = utils.execute_with_timeout( system.REVOKE_ACCESS_COMMAND % { 'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db, 'username': userlist}, shell=True) try: out2, err = utils.execute_with_timeout( system.DELETE_REV_ID % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password}, shell=True) evalout2 = ast.literal_eval(out2) rows = evalout2['rows'] userlist = [] for i in range(0, len(rows)): row = rows[i] username = "org.couchdb.user:" + couchdb_user.name if row['key'] == username: rev = row['value'] revid = rev['rev'] utils.execute_with_timeout( system.DELETE_USER_COMMAND % { 'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'username': couchdb_user.name, 'revid': revid}, shell=True) except exception.ProcessExecutionError as pe: LOG.exception(_( "There was an error while deleting user: %s.") % pe) raise exception.GuestError(_("Unable to delete user: %s.") % couchdb_user.name) def list_users(self, limit=None, marker=None, include_marker=False): '''List all users and the databases they have access to.''' users = [] db_names = self.list_database_names() try: out, err = utils.execute_with_timeout( system.ALL_USERS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password}, shell=True) except exception.ProcessExecutionError: LOG.debug("Error while trying to get list of all couchdb users") evalout = ast.literal_eval(out) rows = evalout['rows'] userlist = [] for i in range(0, len(rows)): row = rows[i] uname = row['key'] if not self._is_modifiable_user(uname): break elif uname[17:]: userlist.append(uname[17:]) for i in range(len(userlist)): user = models.CouchDBUser() user.name = userlist[i] for db in db_names: try: out2, err = utils.execute_with_timeout( system.DB_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db}, shell=True) except exception.ProcessExecutionError: LOG.debug( "Error while trying to get users for database: %s." % db) continue evalout2 = ast.literal_eval(out2) if evalout2: members = evalout2['members'] names = members['names'] for i in range(0, len(names)): if user.name == names[i]: user.databases = db users.append(user.serialize()) next_marker = None return users, next_marker def get_user(self, username, hostname): '''Get Information about the given user.''' LOG.debug('Getting user %s.' % username) user = self._get_user(username, hostname) if not user: return None return user.serialize() def _get_user(self, username, hostname): user = models.CouchDBUser() user.name = username db_names = self.list_database_names() for db in db_names: try: out, err = utils.execute_with_timeout( system.DB_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db}, shell=True) except exception.ProcessExecutionError: LOG.debug( "Error while trying to get the users for database: %s." % db) continue evalout = ast.literal_eval(out) if evalout: members = evalout['members'] names = members['names'] for i in range(0, len(names)): if user.name == names[i]: user.databases = db return user def grant_access(self, username, databases): if self._get_user(username, None).name != username: raise exception.BadRequest(_( 'Cannot grant access for non-existant user: ' '%(user)s') % {'user': username}) else: user = models.CouchDBUser() user.name = username if not self._is_modifiable_user(user.name): LOG.warning(_('Cannot grant access for reserved user ' '%(user)s') % {'user': username}) if not user: raise exception.BadRequest(_( 'Cannot grant access for reserved or non-existant user ' '%(user)s') % {'user': username}) for db_name in databases: out, err = utils.execute_with_timeout( system.GRANT_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db_name, 'username': username}, shell=True) def revoke_access(self, username, database): userlist = [] if self._is_modifiable_user(username): out, err = utils.execute_with_timeout( system.DB_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': database}, shell=True) evalout = ast.literal_eval(out) members = evalout['members'] names = members['names'] for i in range(0, len(names)): userlist.append(names[i]) if username in userlist: userlist.remove(username) out2, err2 = utils.execute_with_timeout( system.REVOKE_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': database, 'username': userlist}, shell=True) def list_access(self, username, hostname): '''Returns a list of all databases which the user has access to''' user = self._get_user(username, hostname) return user.databases def enable_root(self, root_pwd=None): '''Create admin user root''' if not root_pwd: LOG.debug('Generating root user password.') root_pwd = utils.generate_random_password() root_user = models.CouchDBUser() root_user.name = 'root' root_user.password = root_pwd out, err = utils.execute_with_timeout( system.ENABLE_ROOT % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'password': root_pwd}, shell=True) return root_user.serialize() def is_root_enabled(self): '''Check if user root exists''' out, err = utils.execute_with_timeout( system.IS_ROOT_ENABLED % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password}, shell=True) evalout = ast.literal_eval(out) if evalout['root']: return True else: return False def create_database(self, databases): '''Create the given database(s).''' dbName = None db_create_failed = [] LOG.debug("Creating CouchDB databases.") for database in databases: dbName = models.CouchDBSchema.deserialize_schema(database).name LOG.debug('Creating CouchDB database %s' % dbName) try: utils.execute_with_timeout( system.CREATE_DB_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': dbName}, shell=True) except exception.ProcessExecutionError: LOG.exception(_( "There was an error creating database: %s.") % dbName) db_create_failed.append(dbName) pass if len(db_create_failed) > 0: LOG.exception(_("Creating the following databases failed: %s.") % db_create_failed) def list_database_names(self): '''Get the list of database names.''' out, err = utils.execute_with_timeout( system.LIST_DB_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password}, shell=True) dbnames_list = eval(out) for hidden in cfg.get_ignored_dbs(manager=MANAGER): if hidden in dbnames_list: dbnames_list.remove(hidden) return dbnames_list def list_databases(self, limit=None, marker=None, include_marker=False): '''Lists all the CouchDB databases.''' databases = [] db_names = self.list_database_names() pag_dblist, marker = pagination.paginate_list(db_names, limit, marker, include_marker) databases = [models.CouchDBSchema(db_name).serialize() for db_name in pag_dblist] LOG.debug('databases = ' + str(databases)) return databases, marker def delete_database(self, database): '''Delete the specified database.''' dbName = None try: dbName = models.CouchDBSchema.deserialize_schema(database).name LOG.debug("Deleting CouchDB database: %s." % dbName) utils.execute_with_timeout( system.DELETE_DB_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': dbName}, shell=True) except exception.ProcessExecutionError: LOG.exception(_( "There was an error while deleting database:%s.") % dbName) raise exception.GuestError(_("Unable to delete database: %s.") % dbName) class CouchDBCredentials(object): """Handles storing/retrieving credentials. Stored as json in files""" def __init__(self, username=None, password=None): self.username = username self.password = password def read(self, filename): credentials = operating_system.read_file(filename, codec=JsonCodec()) self.username = credentials['username'] self.password = credentials['password'] def write(self, filename): self.clear_file(filename) credentials = {'username': self.username, 'password': self.password} operating_system.write_file(filename, credentials, codec=JsonCodec()) operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW) @staticmethod def clear_file(filename): LOG.debug("Creating clean file %s" % filename) if operating_system.file_discovery([filename]): operating_system.remove(filename) # force file creation by just opening it open(filename, 'wb') operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW, as_root=True) trove-5.0.0/trove/guestagent/datastore/experimental/db2/0000775000567000056710000000000012701410521024444 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/db2/system.py0000664000567000056710000000477212701410316026356 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TIMEOUT = 1200 DB2_INSTANCE_OWNER = "db2inst1" UPDATE_HOSTNAME = ( 'source /home/db2inst1/sqllib/db2profile;' 'db2set -g DB2SYSTEM="$(hostname)"') ENABLE_AUTOSTART = ( "/opt/ibm/db2/V10.5/instance/db2iauto -on " + DB2_INSTANCE_OWNER) DISABLE_AUTOSTART = ( "/opt/ibm/db2/V10.5/instance/db2iauto -off " + DB2_INSTANCE_OWNER) START_DB2 = "db2start" QUIESCE_DB2 = ("db2 QUIESCE INSTANCE DB2INST1 RESTRICTED ACCESS IMMEDIATE " "FORCE CONNECTIONS") UNQUIESCE_DB2 = "db2 UNQUIESCE INSTANCE DB2INST1" STOP_DB2 = "db2 force application all; db2 terminate; db2stop" DB2_STATUS = ("ps -ef | grep " + DB2_INSTANCE_OWNER + " | grep db2sysc |" "grep -v grep | wc -l") CREATE_DB_COMMAND = "db2 create database %(dbname)s" DELETE_DB_COMMAND = "db2 drop database %(dbname)s" LIST_DB_COMMAND = ( "db2 list database directory | grep -B6 -i indirect | " "grep 'Database name' | sed 's/.*= //'") CREATE_USER_COMMAND = ( 'sudo useradd -m -d /home/%(login)s %(login)s;' 'sudo echo %(login)s:%(passwd)s |sudo chpasswd') GRANT_USER_ACCESS = ( "db2 connect to %(dbname)s; " "db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " "ON DATABASE TO USER %(login)s; db2 connect reset") DELETE_USER_COMMAND = 'sudo userdel -r %(login)s' REVOKE_USER_ACCESS = ( "db2 connect to %(dbname)s; " "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " "ON DATABASE FROM USER %(login)s; db2 connect reset") LIST_DB_USERS = ( "db2 +o connect to %(dbname)s; " "db2 -x select grantee, dataaccessauth from sysibm.sysdbauth; " "db2 connect reset") BACKUP_DB = "db2 backup database %(dbname)s to %(dir)s" RESTORE_DB = ( "db2 restore database %(dbname)s from %(dir)s") GET_DB_SIZE = ( "db2 connect to %(dbname)s;" "db2 call get_dbsize_info(?, ?, ?, -1) ") GET_DB_NAMES = ("find /home/db2inst1/db2inst1/backup/ -type f -name '*.001' |" " grep -Po \"(?<=backup/)[^.']*(?=\.)\"") trove-5.0.0/trove/guestagent/datastore/experimental/db2/__init__.py0000664000567000056710000000000012701410316026545 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/experimental/db2/manager.py0000664000567000056710000001201612701410316026432 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.i18n import _ from trove.common import instance as ds_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.datastore.experimental.db2 import service from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): """ This is DB2 Manager class. It is dynamically loaded based off of the datastore of the Trove instance. """ def __init__(self): self.appStatus = service.DB2AppStatus() self.app = service.DB2App(self.appStatus) self.admin = service.DB2Admin() super(Manager, self).__init__('db2') @property def status(self): return self.appStatus def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" if device_path: device = volume.VolumeDevice(device_path) device.unmount_device(device_path) device.format() device.mount(mount_point) LOG.debug('Mounted the volume.') self.app.update_hostname() self.app.change_ownership(mount_point) self.app.start_db() if backup_info: self._perform_restore(backup_info, context, mount_point) def restart(self, context): """ Restart this DB2 instance. This method is called when the guest agent gets a restart message from the taskmanager. """ LOG.debug("Restart a DB2 server instance.") self.app.restart() def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this DB2 instance. This method is called when the guest agent gets a stop message from the taskmanager. """ LOG.debug("Stop a given DB2 server instance.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def create_database(self, context, databases): LOG.debug("Creating database(s)." % databases) with EndNotification(context): self.admin.create_database(databases) def delete_database(self, context, database): LOG.debug("Deleting database %s." % database) with EndNotification(context): return self.admin.delete_database(database) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing all databases.") return self.admin.list_databases(limit, marker, include_marker) def create_user(self, context, users): LOG.debug("Create user(s).") with EndNotification(context): self.admin.create_user(users) def delete_user(self, context, user): LOG.debug("Delete a user %s." % user) with EndNotification(context): self.admin.delete_user(user) def get_user(self, context, username, hostname): LOG.debug("Show details of user %s." % username) return self.admin.get_user(username, hostname) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("List all users.") return self.admin.list_users(limit, marker, include_marker) def list_access(self, context, username, hostname): LOG.debug("List all the databases the user has access to.") return self.admin.list_access(username, hostname) def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting DB2 with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def _perform_restore(self, backup_info, context, restore_location): LOG.info(_("Restoring database from backup %s.") % backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception(_("Error performing restore from backup %s.") % backup_info['id']) self.status.set_status(ds_instance.ServiceStatuses.FAILED) raise LOG.info(_("Restored database successfully.")) def create_backup(self, context, backup_info): LOG.debug("Creating backup.") backup.backup(context, backup_info) trove-5.0.0/trove/guestagent/datastore/experimental/db2/service.py0000664000567000056710000004233312701410316026465 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common import utils as utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.db2 import system from trove.guestagent.datastore import service from trove.guestagent.db import models CONF = cfg.CONF LOG = logging.getLogger(__name__) class DB2App(object): """ Handles installation and configuration of DB2 on a Trove instance. """ def __init__(self, status, state_change_wait_time=None): LOG.debug("Initialize DB2App.") self.state_change_wait_time = ( state_change_wait_time if state_change_wait_time else CONF.state_change_wait_time ) LOG.debug("state_change_wait_time = %s." % self.state_change_wait_time) self.status = status def update_hostname(self): """ When DB2 server is installed, it uses the hostname of the instance were the image was built. This needs to be updated to reflect the guest instance. """ LOG.debug("Update the hostname of the DB2 instance.") try: run_command(system.UPDATE_HOSTNAME, superuser='root') except exception.ProcessExecutionError: raise RuntimeError(_("Command to update the hostname failed.")) def change_ownership(self, mount_point): """ When DB2 server instance is installed, it does not have the DB2 local database directory created (/home/db2inst1/db2inst1). This gets created when we mount the cinder volume. So we need to change ownership of this directory to the DB2 instance user - db2inst1. """ LOG.debug("Changing ownership of the DB2 data directory.") try: operating_system.chown(mount_point, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, recursive=False, as_root=True) except exception.ProcessExecutionError: raise RuntimeError(_( "Command to change ownership of DB2 data directory failed.")) def _enable_db_on_boot(self): LOG.debug("Enable DB on boot.") try: run_command(system.ENABLE_AUTOSTART) except exception.ProcessExecutionError: raise RuntimeError(_( "Command to enable DB2 server on boot failed.")) def _disable_db_on_boot(self): LOG.debug("Disable DB2 on boot.") try: run_command(system.DISABLE_AUTOSTART) except exception.ProcessExecutionError: raise RuntimeError(_( "Command to disable DB2 server on boot failed.")) def start_db_with_conf_changes(self, config_contents): ''' Will not be implementing configuration change API for DB2 in the Kilo release. Currently all that this method does is to start the DB2 server without any configuration changes. Looks like this needs to be implemented to enable volume resize on the guest agent side. ''' LOG.info(_("Starting DB2 with configuration changes.")) self.start_db(True) def start_db(self, update_db=False): LOG.debug("Start the DB2 server instance.") self._enable_db_on_boot() try: run_command(system.START_DB2) except exception.ProcessExecutionError: pass if not self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.RUNNING, self.state_change_wait_time, update_db): LOG.error(_("Start of DB2 server instance failed.")) self.status.end_restart() raise RuntimeError(_("Could not start DB2.")) def stop_db(self, update_db=False, do_not_start_on_reboot=False): LOG.debug("Stop the DB2 server instance.") if do_not_start_on_reboot: self._disable_db_on_boot() try: run_command(system.STOP_DB2) except exception.ProcessExecutionError: pass if not (self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db)): LOG.error(_("Could not stop DB2.")) self.status.end_restart() raise RuntimeError(_("Could not stop DB2.")) def restart(self): LOG.debug("Restarting DB2 server instance.") try: self.status.begin_restart() self.stop_db() self.start_db() finally: self.status.end_restart() class DB2AppStatus(service.BaseDbStatus): """ Handles all of the status updating for the DB2 guest agent. """ def _get_actual_db_status(self): LOG.debug("Getting the status of the DB2 server instance.") try: out, err = utils.execute_with_timeout( system.DB2_STATUS, shell=True) if "0" not in out: return rd_instance.ServiceStatuses.RUNNING else: return rd_instance.ServiceStatuses.SHUTDOWN except exception.ProcessExecutionError: LOG.exception(_("Error getting the DB2 server status.")) return rd_instance.ServiceStatuses.CRASHED def run_command(command, superuser=system.DB2_INSTANCE_OWNER, timeout=system.TIMEOUT): return utils.execute_with_timeout("sudo", "su", "-", superuser, "-c", command, timeout=timeout) class DB2Admin(object): """ Handles administrative tasks on the DB2 instance. """ def create_database(self, databases): """Create the given database(s).""" dbName = None db_create_failed = [] LOG.debug("Creating DB2 databases.") for item in databases: mydb = models.ValidatedMySQLDatabase() mydb.deserialize(item) dbName = mydb.name LOG.debug("Creating DB2 database: %s." % dbName) try: run_command(system.CREATE_DB_COMMAND % {'dbname': dbName}) except exception.ProcessExecutionError: LOG.exception(_( "There was an error creating database: %s.") % dbName) db_create_failed.append(dbName) pass if len(db_create_failed) > 0: LOG.exception(_("Creating the following databases failed: %s.") % db_create_failed) def delete_database(self, database): """Delete the specified database.""" dbName = None try: mydb = models.ValidatedMySQLDatabase() mydb.deserialize(database) dbName = mydb.name LOG.debug("Deleting DB2 database: %s." % dbName) run_command(system.DELETE_DB_COMMAND % {'dbname': dbName}) except exception.ProcessExecutionError: LOG.exception(_( "There was an error while deleting database:%s.") % dbName) raise exception.GuestError(_("Unable to delete database: %s.") % dbName) def list_databases(self, limit=None, marker=None, include_marker=False): LOG.debug("Listing all the DB2 databases.") databases = [] next_marker = None try: out, err = run_command(system.LIST_DB_COMMAND) dblist = out.split() result = iter(dblist) count = 0 if marker is not None: try: item = result.next() while item != marker: item = result.next() if item == marker: marker = None except StopIteration: pass try: item = result.next() while item: count = count + 1 if (limit and count <= limit) or limit is None: db2_db = models.MySQLDatabase() db2_db.name = item LOG.debug("database = %s ." % item) db2_db.character_set = None db2_db.collate = None next_marker = db2_db.name databases.append(db2_db.serialize()) item = result.next() else: next_marker = None break except StopIteration: next_marker = None LOG.debug("databases = %s." % str(databases)) except exception.ProcessExecutionError as pe: LOG.exception(_("An error occurred listing databases: %s.") % pe.message) pass return databases, next_marker def create_user(self, users): LOG.debug("Creating user(s) for accessing DB2 database(s).") try: for item in users: user = models.MySQLUser() user.deserialize(item) try: LOG.debug("Creating OS user: %s." % user.name) utils.execute_with_timeout( system.CREATE_USER_COMMAND % { 'login': user.name, 'login': user.name, 'passwd': user.password}, shell=True) except exception.ProcessExecutionError as pe: LOG.exception(_("Error creating user: %s.") % user.name) continue for database in user.databases: mydb = models.ValidatedMySQLDatabase() mydb.deserialize(database) try: LOG.debug("Granting user: %s access to database: %s." % (user.name, mydb.name)) run_command(system.GRANT_USER_ACCESS % { 'dbname': mydb.name, 'login': user.name}) except exception.ProcessExecutionError as pe: LOG.debug( "Error granting user: %s access to database: %s." % (user.name, mydb.name)) LOG.debug(pe) pass except exception.ProcessExecutionError as pe: LOG.exception(_("An error occurred creating users: %s.") % pe.message) pass def delete_user(self, user): LOG.debug("Delete a given user.") db2_user = models.MySQLUser() db2_user.deserialize(user) userName = db2_user.name user_dbs = db2_user.databases LOG.debug("For user %s, databases to be deleted = %r." % ( userName, user_dbs)) if len(user_dbs) == 0: databases = self.list_access(db2_user.name, None) else: databases = user_dbs LOG.debug("databases for user = %r." % databases) for database in databases: mydb = models.ValidatedMySQLDatabase() mydb.deserialize(database) try: run_command(system.REVOKE_USER_ACCESS % { 'dbname': mydb.name, 'login': userName}) LOG.debug("Revoked access for user:%s on database:%s." % ( userName, mydb.name)) except exception.ProcessExecutionError as pe: LOG.debug("Error occurred while revoking access to %s." % mydb.name) pass try: utils.execute_with_timeout(system.DELETE_USER_COMMAND % { 'login': db2_user.name.lower()}, shell=True) except exception.ProcessExecutionError as pe: LOG.exception(_( "There was an error while deleting user: %s.") % pe) raise exception.GuestError(_("Unable to delete user: %s.") % userName) def list_users(self, limit=None, marker=None, include_marker=False): LOG.debug( "List all users for all the databases in a DB2 server instance.") users = [] user_map = {} next_marker = None count = 0 databases, marker = self.list_databases() for database in databases: db2_db = models.MySQLDatabase() db2_db.deserialize(database) out = None try: out, err = run_command( system.LIST_DB_USERS % {'dbname': db2_db.name}) except exception.ProcessExecutionError: LOG.debug( "There was an error while listing users for database: %s." % db2_db.name) continue userlist = [] for item in out.split('\n'): LOG.debug("item = %r" % item) user = item.split() if item != "" else None LOG.debug("user = %r" % (user)) if (user is not None and (user[0] not in cfg.get_ignored_users(manager='db2') and user[1] == 'Y')): userlist.append(user[0]) result = iter(userlist) if marker is not None: try: item = result.next() while item != marker: item = result.next() if item == marker: marker = None except StopIteration: pass try: item = result.next() db2db = models.MySQLDatabase() db2db.name = db2_db.name while item: ''' Check if the user has already been discovered. If so, add this database to the database list for this user. ''' if item in user_map: db2user = user_map.get(item) db2user.databases.append(db2db.serialize()) item = result.next() continue ''' If this user was not previously discovered, then add this to the user's list. ''' count = count + 1 if (limit and count <= limit) or limit is None: db2_user = models.MySQLUser() db2_user.name = item db2_user.databases.append(db2db.serialize()) users.append(db2_user.serialize()) user_map.update({item: db2_user}) item = result.next() else: next_marker = None break except StopIteration: next_marker = None if count == limit: break return users, next_marker def get_user(self, username, hostname): LOG.debug("Get details of a given database user.") user = self._get_user(username, hostname) if not user: return None return user.serialize() def _get_user(self, username, hostname): LOG.debug("Get details of a given database user %s." % username) user = models.MySQLUser() user.name = username databases, marker = self.list_databases() out = None for database in databases: db2_db = models.MySQLDatabase() db2_db.deserialize(database) try: out, err = run_command( system.LIST_DB_USERS % {'dbname': db2_db.name}) except exception.ProcessExecutionError: LOG.debug( "Error while trying to get the users for database: %s." % db2_db.name) continue for item in out.split('\n'): user_access = item.split() if item != "" else None if (user_access is not None and user_access[0].lower() == username.lower() and user_access[1] == 'Y'): user.databases = db2_db.name break return user def list_access(self, username, hostname): """ Show all the databases to which the user has more than USAGE granted. """ LOG.debug("Listing databases that user: %s has access to." % username) user = self._get_user(username, hostname) return user.databases trove-5.0.0/trove/guestagent/datastore/galera_common/0000775000567000056710000000000012701410521024103 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/galera_common/__init__.py0000664000567000056710000000000012701410316026204 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/guestagent/datastore/galera_common/manager.py0000664000567000056710000000626712701410316026104 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.guestagent.datastore.mysql_common import manager LOG = logging.getLogger(__name__) class GaleraManager(manager.MySqlManager): def __init__(self, mysql_app, mysql_app_status, mysql_admin, manager_name='galera'): super(GaleraManager, self).__init__( mysql_app, mysql_app_status, mysql_admin, manager_name) self._mysql_app = mysql_app self._mysql_app_status = mysql_app_status self._mysql_admin = mysql_admin self.volume_do_not_start_on_reboot = False def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): self.volume_do_not_start_on_reboot = True super(GaleraManager, self).do_prepare( context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) def install_cluster(self, context, replication_user, cluster_configuration, bootstrap): app = self.mysql_app(self.mysql_app_status.get()) try: app.install_cluster( replication_user, cluster_configuration, bootstrap) LOG.debug("install_cluster call has finished.") except Exception: LOG.exception(_('Cluster installation failed.')) app.status.set_status( rd_instance.ServiceStatuses.FAILED) raise def reset_admin_password(self, context, admin_password): LOG.debug("Storing the admin password on the instance.") app = self.mysql_app(self.mysql_app_status.get()) app.reset_admin_password(admin_password) def get_cluster_context(self, context): LOG.debug("Getting the cluster context.") app = self.mysql_app(self.mysql_app_status.get()) return app.get_cluster_context() def write_cluster_configuration_overrides(self, context, cluster_configuration): LOG.debug("Apply the updated cluster configuration.") app = self.mysql_app(self.mysql_app_status.get()) app.write_cluster_configuration_overrides(cluster_configuration) def enable_root_with_password(self, context, root_password=None): return self.mysql_admin().enable_root(root_password) trove-5.0.0/trove/guestagent/datastore/galera_common/service.py0000664000567000056710000001015312701410316026117 0ustar jenkinsjenkins00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from oslo_log import log as logging import sqlalchemy from sqlalchemy.sql.expression import text from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import sql_query from trove.guestagent.datastore.mysql_common import service LOG = logging.getLogger(__name__) CONF = service.CONF class GaleraApp(service.BaseMySqlApp): def __init__(self, status, local_sql_client, keep_alive_connection_cls): super(GaleraApp, self).__init__(status, local_sql_client, keep_alive_connection_cls) def _test_mysql(self): engine = sqlalchemy.create_engine("mysql://root:@localhost:3306", echo=True) try: with self.local_sql_client(engine) as client: out = client.execute(text("select 1;")) for line in out: LOG.debug("line: %s" % line) return True except Exception: return False def _wait_for_mysql_to_be_really_alive(self, max_time): utils.poll_until(self._test_mysql, sleep_time=3, time_out=max_time) def _grant_cluster_replication_privilege(self, replication_user): LOG.info(_("Granting Replication Slave privilege.")) with self.local_sql_client(self.get_engine()) as client: perms = ['REPLICATION CLIENT', 'RELOAD', 'LOCK TABLES'] g = sql_query.Grant(permissions=perms, user=replication_user['name'], clear=replication_user['password']) t = text(str(g)) client.execute(t) def _bootstrap_cluster(self, timeout=120): LOG.info(_("Bootstraping cluster.")) try: utils.execute_with_timeout( self.mysql_service['cmd_bootstrap_galera_cluster'], shell=True, timeout=timeout) except KeyError: LOG.exception(_("Error bootstrapping cluster.")) raise RuntimeError(_("Service is not discovered.")) def write_cluster_configuration_overrides(self, cluster_configuration): self.configuration_manager.apply_system_override( cluster_configuration, 'cluster') def install_cluster(self, replication_user, cluster_configuration, bootstrap=False): LOG.info(_("Installing cluster configuration.")) self._grant_cluster_replication_privilege(replication_user) self.stop_db() self.write_cluster_configuration_overrides(cluster_configuration) self.wipe_ib_logfiles() LOG.debug("bootstrap the instance? : %s" % bootstrap) # Have to wait to sync up the joiner instances with the donor instance. if bootstrap: self._bootstrap_cluster(timeout=CONF.restore_usage_timeout) else: self.start_mysql(timeout=CONF.restore_usage_timeout) @abc.abstractproperty def cluster_configuration(self): """ Returns the cluster section from the configuration manager. """ def get_cluster_context(self): auth = self.cluster_configuration.get( "wsrep_sst_auth").replace('"', '') cluster_name = self.cluster_configuration.get("wsrep_cluster_name") return { 'replication_user': { 'name': auth.split(":")[0], 'password': auth.split(":")[1], }, 'cluster_name': cluster_name, 'admin_password': self.get_auth_password() } trove-5.0.0/trove/guestagent/api.py0000664000567000056710000005207612701410316020451 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all request to the Platform or Guest VM """ from eventlet import Timeout from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc.client import RemoteError from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.notification import NotificationCastWrapper import trove.common.rpc.version as rpc_version from trove import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) AGENT_LOW_TIMEOUT = CONF.agent_call_low_timeout AGENT_HIGH_TIMEOUT = CONF.agent_call_high_timeout AGENT_SNAPSHOT_TIMEOUT = CONF.agent_replication_snapshot_timeout class API(object): """API for interacting with the guest manager.""" def __init__(self, context, id): self.context = context self.id = id super(API, self).__init__() target = messaging.Target(topic=self._get_routing_key(), version=rpc_version.RPC_API_VERSION) self.version_cap = rpc_version.VERSION_ALIASES.get( CONF.upgrade_levels.guestagent) self.client = self.get_client(target, self.version_cap) def get_client(self, target, version_cap, serializer=None): return rpc.get_client(target, version_cap=version_cap, serializer=serializer) def _call(self, method_name, timeout_sec, version, **kwargs): LOG.debug("Calling %s with timeout %s" % (method_name, timeout_sec)) try: cctxt = self.client.prepare(version=version, timeout=timeout_sec) result = cctxt.call(self.context, method_name, **kwargs) LOG.debug("Result is %s." % result) return result except RemoteError as r: LOG.exception(_("Error calling %s") % method_name) raise exception.GuestError(original_message=r.value) except Exception as e: LOG.exception(_("Error calling %s") % method_name) raise exception.GuestError(original_message=str(e)) except Timeout: raise exception.GuestTimeout() def _cast(self, method_name, version, **kwargs): LOG.debug("Casting %s" % method_name) try: with NotificationCastWrapper(self.context, 'guest'): cctxt = self.client.prepare(version=version) cctxt.cast(self.context, method_name, **kwargs) except RemoteError as r: LOG.exception(_("Error calling %s") % method_name) raise exception.GuestError(original_message=r.value) except Exception as e: LOG.exception(_("Error calling %s") % method_name) raise exception.GuestError(original_message=str(e)) def _get_routing_key(self): """Create the routing key based on the container id.""" return "guestagent.%s" % self.id def change_passwords(self, users): """Make an asynchronous call to change the passwords of one or more users. """ LOG.debug("Changing passwords for users on instance %s.", self.id) self._cast("change_passwords", self.version_cap, users=users) def update_attributes(self, username, hostname, user_attrs): """Update user attributes.""" LOG.debug("Changing user attributes on instance %s.", self.id) self._cast("update_attributes", self.version_cap, username=username, hostname=hostname, user_attrs=user_attrs) def create_user(self, users): """Make an asynchronous call to create a new database user""" LOG.debug("Creating Users for instance %s.", self.id) self._cast("create_user", self.version_cap, users=users) def get_user(self, username, hostname): """Make an asynchronous call to get a single database user.""" LOG.debug("Getting a user %(username)s on instance %(id)s.", {'username': username, 'id': self.id}) return self._call("get_user", AGENT_LOW_TIMEOUT, self.version_cap, username=username, hostname=hostname) def list_access(self, username, hostname): """Show all the databases to which a user has more than USAGE.""" LOG.debug("Showing user %(username)s grants on instance %(id)s.", {'username': username, 'id': self.id}) return self._call("list_access", AGENT_LOW_TIMEOUT, self.version_cap, username=username, hostname=hostname) def grant_access(self, username, hostname, databases): """Grant a user permission to use a given database.""" LOG.debug("Granting access to databases %(databases)s for user " "%(username)s on instance %(id)s.", {'username': username, 'databases': databases, 'id': self.id}) return self._call("grant_access", AGENT_LOW_TIMEOUT, self.version_cap, username=username, hostname=hostname, databases=databases) def revoke_access(self, username, hostname, database): """Remove a user's permission to use a given database.""" LOG.debug("Revoking access from database %(database)s for user " "%(username)s on instance %(id)s.", {'username': username, 'database': database, 'id': self.id}) return self._call("revoke_access", AGENT_LOW_TIMEOUT, self.version_cap, username=username, hostname=hostname, database=database) def list_users(self, limit=None, marker=None, include_marker=False): """Make an asynchronous call to list database users.""" LOG.debug("Listing Users for instance %s.", self.id) return self._call("list_users", AGENT_HIGH_TIMEOUT, self.version_cap, limit=limit, marker=marker, include_marker=include_marker) def delete_user(self, user): """Make an asynchronous call to delete an existing database user.""" LOG.debug("Deleting user %(user)s for instance %(instance_id)s." % {'user': user, 'instance_id': self.id}) self._cast("delete_user", self.version_cap, user=user) def create_database(self, databases): """Make an asynchronous call to create a new database within the specified container """ LOG.debug("Creating databases for instance %s.", self.id) self._cast("create_database", self.version_cap, databases=databases) def list_databases(self, limit=None, marker=None, include_marker=False): """Make an asynchronous call to list databases.""" LOG.debug("Listing databases for instance %s.", self.id) return self._call("list_databases", AGENT_LOW_TIMEOUT, self.version_cap, limit=limit, marker=marker, include_marker=include_marker) def delete_database(self, database): """Make an asynchronous call to delete an existing database within the specified container """ LOG.debug("Deleting database %(database)s for " "instance %(instance_id)s." % {'database': database, 'instance_id': self.id}) self._cast("delete_database", self.version_cap, database=database) def enable_root(self): """Make a synchronous call to enable the root user for access from anywhere """ LOG.debug("Enable root user for instance %s.", self.id) return self._call("enable_root", AGENT_HIGH_TIMEOUT, self.version_cap) def enable_root_with_password(self, root_password=None): """Make a synchronous call to enable the root user for access from anywhere """ LOG.debug("Enable root user for instance %s.", self.id) return self._call("enable_root_with_password", AGENT_HIGH_TIMEOUT, self.version_cap, root_password=root_password) def disable_root(self): """Make a synchronous call to disable the root user for access from anywhere """ LOG.debug("Disable root user for instance %s.", self.id) return self._call("disable_root", AGENT_LOW_TIMEOUT, self.version_cap) def is_root_enabled(self): """Make a synchronous call to check if root access is available for the container """ LOG.debug("Check root access for instance %s.", self.id) return self._call("is_root_enabled", AGENT_LOW_TIMEOUT, self.version_cap) def get_hwinfo(self): """Make a synchronous call to get hardware info for the container""" LOG.debug("Check hwinfo on instance %s.", self.id) return self._call("get_hwinfo", AGENT_LOW_TIMEOUT, self.version_cap) def get_diagnostics(self): """Make a synchronous call to get diagnostics for the container""" LOG.debug("Check diagnostics on instance %s.", self.id) return self._call("get_diagnostics", AGENT_LOW_TIMEOUT, self.version_cap) def rpc_ping(self): """Make a synchronous RPC call to check if we can ping the instance.""" LOG.debug("Check RPC ping on instance %s.", self.id) return self._call("rpc_ping", AGENT_LOW_TIMEOUT, self.version_cap) def prepare(self, memory_mb, packages, databases, users, device_path='/dev/vdb', mount_point='/mnt/volume', backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, modules=None): """Make an asynchronous call to prepare the guest as a database container optionally includes a backup id for restores """ LOG.debug("Sending the call to prepare the Guest.") # Taskmanager is a publisher, guestagent is a consumer. Usually # consumer creates a queue, but in this case we have to make sure # "prepare" doesn't get lost if for some reason guest was delayed and # didn't create a queue on time. self._create_guest_queue() packages = packages.split() self._cast( "prepare", self.version_cap, packages=packages, databases=databases, memory_mb=memory_mb, users=users, device_path=device_path, mount_point=mount_point, backup_info=backup_info, config_contents=config_contents, root_password=root_password, overrides=overrides, cluster_config=cluster_config, snapshot=snapshot, modules=modules) def _create_guest_queue(self): """Call to construct, start and immediately stop rpc server in order to create a queue to communicate with the guestagent. This is method do nothing in case a queue is already created by the guest """ server = None target = messaging.Target(topic=self._get_routing_key(), server=self.id, version=rpc_version.RPC_API_VERSION) try: server = rpc.get_server(target, []) server.start() finally: if server is not None: server.stop() server.wait() def restart(self): """Restart the database server.""" LOG.debug("Sending the call to restart the database process " "on the Guest.") self._call("restart", AGENT_HIGH_TIMEOUT, self.version_cap) def start_db_with_conf_changes(self, config_contents): """Start the database server.""" LOG.debug("Sending the call to start the database process on " "the Guest with a timeout of %s." % AGENT_HIGH_TIMEOUT) self._call("start_db_with_conf_changes", AGENT_HIGH_TIMEOUT, self.version_cap, config_contents=config_contents) def reset_configuration(self, configuration): """Ignore running state of the database server; just change the config file to a new flavor. """ LOG.debug("Sending the call to change the database conf file on the " "Guest with a timeout of %s." % AGENT_HIGH_TIMEOUT) self._call("reset_configuration", AGENT_HIGH_TIMEOUT, self.version_cap, configuration=configuration) def stop_db(self, do_not_start_on_reboot=False): """Stop the database server.""" LOG.debug("Sending the call to stop the database process " "on the Guest.") self._call("stop_db", AGENT_HIGH_TIMEOUT, self.version_cap, do_not_start_on_reboot=do_not_start_on_reboot) def upgrade(self, instance_version, location, metadata=None): """Make an asynchronous call to self upgrade the guest agent.""" LOG.debug("Sending an upgrade call to nova-guest.") self._cast("upgrade", self.version_cap, instance_version=instance_version, location=location, metadata=metadata) def get_volume_info(self): """Make a synchronous call to get volume info for the container.""" LOG.debug("Check Volume Info on instance %s.", self.id) return self._call("get_filesystem_stats", AGENT_LOW_TIMEOUT, self.version_cap, fs_path=None) def update_guest(self): """Make a synchronous call to update the guest agent.""" LOG.debug("Updating guest agent on instance %s.", self.id) self._call("update_guest", AGENT_HIGH_TIMEOUT, self.version_cap) def create_backup(self, backup_info): """Make async call to create a full backup of this instance.""" LOG.debug("Create Backup %(backup_id)s " "for instance %(instance_id)s." % {'backup_id': backup_info['id'], 'instance_id': self.id}) self._cast("create_backup", self.version_cap, backup_info=backup_info) def mount_volume(self, device_path=None, mount_point=None): """Mount the volume.""" LOG.debug("Mount volume %(mount)s on instance %(id)s." % { 'mount': mount_point, 'id': self.id}) self._call("mount_volume", AGENT_LOW_TIMEOUT, self.version_cap, device_path=device_path, mount_point=mount_point) def unmount_volume(self, device_path=None, mount_point=None): """Unmount the volume.""" LOG.debug("Unmount volume %(device)s on instance %(id)s." % { 'device': device_path, 'id': self.id}) self._call("unmount_volume", AGENT_LOW_TIMEOUT, self.version_cap, device_path=device_path, mount_point=mount_point) def resize_fs(self, device_path=None, mount_point=None): """Resize the filesystem.""" LOG.debug("Resize device %(device)s on instance %(id)s." % { 'device': device_path, 'id': self.id}) self._call("resize_fs", AGENT_HIGH_TIMEOUT, self.version_cap, device_path=device_path, mount_point=mount_point) def update_overrides(self, overrides, remove=False): """Update the overrides.""" LOG.debug("Updating overrides values %(overrides)s on instance " "%(id)s.", {'overrides': overrides, 'id': self.id}) self._call("update_overrides", AGENT_HIGH_TIMEOUT, self.version_cap, overrides=overrides, remove=remove) def apply_overrides(self, overrides): LOG.debug("Applying overrides values %(overrides)s on instance " "%(id)s.", {'overrides': overrides, 'id': self.id}) self._call("apply_overrides", AGENT_HIGH_TIMEOUT, self.version_cap, overrides=overrides) def backup_required_for_replication(self): LOG.debug("Checking backup requirement for replication") return self._call("backup_required_for_replication", AGENT_LOW_TIMEOUT, self.version_cap) def get_replication_snapshot(self, snapshot_info=None, replica_source_config=None): LOG.debug("Retrieving replication snapshot from instance %s.", self.id) return self._call("get_replication_snapshot", AGENT_SNAPSHOT_TIMEOUT, self.version_cap, snapshot_info=snapshot_info, replica_source_config=replica_source_config) def attach_replication_slave(self, snapshot, replica_config=None): LOG.debug("Configuring instance %s to replicate from %s.", self.id, snapshot.get('master').get('id')) self._cast("attach_replication_slave", self.version_cap, snapshot=snapshot, slave_config=replica_config) def detach_replica(self, for_failover=False): LOG.debug("Detaching replica %s from its replication source.", self.id) return self._call("detach_replica", AGENT_HIGH_TIMEOUT, self.version_cap, for_failover=for_failover) def get_replica_context(self): LOG.debug("Getting replica context.") return self._call("get_replica_context", AGENT_HIGH_TIMEOUT, self.version_cap) def attach_replica(self, replica_info, slave_config): LOG.debug("Attaching replica %s." % replica_info) self._call("attach_replica", AGENT_HIGH_TIMEOUT, self.version_cap, replica_info=replica_info, slave_config=slave_config) def make_read_only(self, read_only): LOG.debug("Executing make_read_only(%s)" % read_only) self._call("make_read_only", AGENT_HIGH_TIMEOUT, self.version_cap, read_only=read_only) def enable_as_master(self, replica_source_config): LOG.debug("Executing enable_as_master") self._call("enable_as_master", AGENT_HIGH_TIMEOUT, self.version_cap, replica_source_config=replica_source_config) # DEPRECATED: Maintain for API Compatibility def get_txn_count(self): LOG.debug("Executing get_txn_count.") return self._call("get_txn_count", AGENT_HIGH_TIMEOUT, self.version_cap) def get_last_txn(self): LOG.debug("Executing get_last_txn.") return self._call("get_last_txn", AGENT_HIGH_TIMEOUT, self.version_cap) def get_latest_txn_id(self): LOG.debug("Executing get_latest_txn_id.") return self._call("get_latest_txn_id", AGENT_HIGH_TIMEOUT, self.version_cap) def wait_for_txn(self, txn): LOG.debug("Executing wait_for_txn.") self._call("wait_for_txn", AGENT_HIGH_TIMEOUT, self.version_cap, txn=txn) def cleanup_source_on_replica_detach(self, replica_info): LOG.debug("Cleaning up master %s on detach of replica.", self.id) self._call("cleanup_source_on_replica_detach", AGENT_HIGH_TIMEOUT, self.version_cap, replica_info=replica_info) def demote_replication_master(self): LOG.debug("Demoting instance %s to non-master.", self.id) self._call("demote_replication_master", AGENT_HIGH_TIMEOUT, self.version_cap) def guest_log_list(self): LOG.debug("Retrieving guest log list for %s.", self.id) result = self._call("guest_log_list", AGENT_HIGH_TIMEOUT, self.version_cap) LOG.debug("guest_log_list returns %s", result) return result def guest_log_action(self, log_name, enable, disable, publish, discard): LOG.debug("Processing guest log '%s' for %s.", log_name, self.id) return self._call("guest_log_action", AGENT_HIGH_TIMEOUT, self.version_cap, log_name=log_name, enable=enable, disable=disable, publish=publish, discard=discard) def module_list(self, include_contents): LOG.debug("Querying modules on %s (contents: %s).", self.id, include_contents) result = self._call("module_list", AGENT_HIGH_TIMEOUT, self.version_cap, include_contents=include_contents) return result def module_apply(self, modules): LOG.debug("Applying modules to %s.", self.id) return self._call("module_apply", AGENT_HIGH_TIMEOUT, self.version_cap, modules=modules) def module_remove(self, module): LOG.debug("Removing modules from %s.", self.id) return self._call("module_remove", AGENT_HIGH_TIMEOUT, self.version_cap, module=module) trove-5.0.0/trove/guestagent/dbaas.py0000664000567000056710000000634612701410316020751 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all processes within the Guest VM, considering it as a Platform The :py:class:`GuestManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to Platform specific operations. **Related Flags** """ from itertools import chain import os from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ LOG = logging.getLogger(__name__) defaults = { 'mysql': 'trove.guestagent.datastore.mysql.manager.Manager', 'percona': 'trove.guestagent.datastore.experimental.percona.manager.Manager', 'pxc': 'trove.guestagent.datastore.experimental.pxc.manager.Manager', 'redis': 'trove.guestagent.datastore.experimental.redis.manager.Manager', 'cassandra': 'trove.guestagent.datastore.experimental.cassandra.manager.Manager', 'couchbase': 'trove.guestagent.datastore.experimental.couchbase.manager.Manager', 'mongodb': 'trove.guestagent.datastore.experimental.mongodb.manager.Manager', 'postgresql': 'trove.guestagent.datastore.experimental.postgresql.manager.Manager', 'couchdb': 'trove.guestagent.datastore.experimental.couchdb.manager.Manager', 'vertica': 'trove.guestagent.datastore.experimental.vertica.manager.Manager', 'db2': 'trove.guestagent.datastore.experimental.db2.manager.Manager', 'mariadb': 'trove.guestagent.datastore.experimental.mariadb.manager.Manager' } CONF = cfg.CONF def get_custom_managers(): return CONF.datastore_registry_ext def datastore_registry(): return dict(chain(defaults.iteritems(), get_custom_managers().iteritems())) def to_gb(bytes): if bytes == 0: return 0.0 size = bytes / 1024.0 ** 3 # Make sure we don't return 0.0 if the size is greater than 0 return max(round(size, 2), 0.01) def to_mb(bytes): if bytes == 0: return 0.0 size = bytes / 1024.0 ** 2 # Make sure we don't return 0.0 if the size is greater than 0 return max(round(size, 2), 0.01) def get_filesystem_volume_stats(fs_path): try: stats = os.statvfs(fs_path) except OSError: LOG.exception(_("Error getting volume stats.")) raise RuntimeError("Filesystem not found (%s)" % fs_path) total = stats.f_blocks * stats.f_bsize free = stats.f_bfree * stats.f_bsize # return the size in GB used_gb = to_gb(total - free) total_gb = to_gb(total) output = { 'block_size': stats.f_bsize, 'total_blocks': stats.f_blocks, 'free_blocks': stats.f_bfree, 'total': total_gb, 'free': free, 'used': used_gb } return output trove-5.0.0/trove/taskmanager/0000775000567000056710000000000012701410521017441 5ustar jenkinsjenkins00000000000000trove-5.0.0/trove/taskmanager/__init__.py0000664000567000056710000000000012701410316021542 0ustar jenkinsjenkins00000000000000trove-5.0.0/trove/taskmanager/manager.py0000664000567000056710000005040612701410316021434 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sets import Set from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import periodic_task from oslo_utils import importutils from trove.backup.models import Backup import trove.common.cfg as cfg from trove.common.context import TroveContext from trove.common import exception from trove.common.exception import ReplicationSlaveAttachError from trove.common.exception import TroveError from trove.common.i18n import _ from trove.common.notification import DBaaSQuotas, EndNotification from trove.common import remote import trove.common.rpc.version as rpc_version from trove.common.strategies.cluster import strategy import trove.extensions.mgmt.instances.models as mgmtmodels from trove.instance.tasks import InstanceTasks from trove.taskmanager import models from trove.taskmanager.models import FreshInstanceTasks, BuiltInstanceTasks from trove.quota.quota import QUOTAS LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(periodic_task.PeriodicTasks): target = messaging.Target(version=rpc_version.RPC_API_VERSION) def __init__(self): super(Manager, self).__init__(CONF) self.admin_context = TroveContext( user=CONF.nova_proxy_admin_user, auth_token=CONF.nova_proxy_admin_pass, tenant=CONF.nova_proxy_admin_tenant_id) if CONF.exists_notification_transformer: self.exists_transformer = importutils.import_object( CONF.exists_notification_transformer, context=self.admin_context) def resize_volume(self, context, instance_id, new_size): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.resize_volume(new_size) def resize_flavor(self, context, instance_id, old_flavor, new_flavor): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.resize_flavor(old_flavor, new_flavor) def reboot(self, context, instance_id): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.reboot() def restart(self, context, instance_id): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.restart() def detach_replica(self, context, instance_id): with EndNotification(context): slave = models.BuiltInstanceTasks.load(context, instance_id) master_id = slave.slave_of_id master = models.BuiltInstanceTasks.load(context, master_id) slave.detach_replica(master) def _set_task_status(self, instances, status): for instance in instances: setattr(instance.db_info, 'task_status', status) instance.db_info.save() def promote_to_replica_source(self, context, instance_id): def _promote_to_replica_source(old_master, master_candidate, replica_models): # First, we transition from the old master to new as quickly as # possible to minimize the scope of unrecoverable error old_master.make_read_only(True) master_ips = old_master.detach_public_ips() slave_ips = master_candidate.detach_public_ips() latest_txn_id = old_master.get_latest_txn_id() master_candidate.wait_for_txn(latest_txn_id) master_candidate.detach_replica(old_master, for_failover=True) master_candidate.enable_as_master() old_master.attach_replica(master_candidate) master_candidate.attach_public_ips(master_ips) master_candidate.make_read_only(False) old_master.attach_public_ips(slave_ips) # At this point, should something go wrong, there # should be a working master with some number of working slaves, # and possibly some number of "orphaned" slaves exception_replicas = [] for replica in replica_models: try: if replica.id != master_candidate.id: replica.detach_replica(old_master, for_failover=True) replica.attach_replica(master_candidate) except exception.TroveError: msg = _("promote-to-replica-source: Unable to migrate " "replica %(slave)s from old replica source " "%(old_master)s to new source %(new_master)s.") msg_values = { "slave": replica.id, "old_master": old_master.id, "new_master": master_candidate.id } LOG.exception(msg % msg_values) exception_replicas.append(replica) try: old_master.demote_replication_master() except Exception: LOG.exception(_("Exception demoting old replica source")) exception_replicas.append(old_master) self._set_task_status([old_master] + replica_models, InstanceTasks.NONE) if exception_replicas: self._set_task_status(exception_replicas, InstanceTasks.PROMOTION_ERROR) msg = _("promote-to-replica-source %(id)s: The following " "replicas may not have been switched: %(replicas)s") msg_values = { "id": master_candidate.id, "replicas": exception_replicas } raise ReplicationSlaveAttachError(msg % msg_values) with EndNotification(context): master_candidate = BuiltInstanceTasks.load(context, instance_id) old_master = BuiltInstanceTasks.load(context, master_candidate.slave_of_id) replicas = [] for replica_dbinfo in old_master.slaves: if replica_dbinfo.id == instance_id: replica = master_candidate else: replica = BuiltInstanceTasks.load(context, replica_dbinfo.id) replicas.append(replica) try: _promote_to_replica_source(old_master, master_candidate, replicas) except ReplicationSlaveAttachError: raise except Exception: self._set_task_status([old_master] + replicas, InstanceTasks.PROMOTION_ERROR) raise # pulled out to facilitate testing def _get_replica_txns(self, replica_models): return [[repl] + repl.get_last_txn() for repl in replica_models] def _most_current_replica(self, old_master, replica_models): last_txns = self._get_replica_txns(replica_models) master_ids = [txn[1] for txn in last_txns if txn[1]] if len(Set(master_ids)) > 1: raise TroveError(_("Replicas of %s not all replicating" " from same master") % old_master.id) return sorted(last_txns, key=lambda x: x[2], reverse=True)[0][0] def eject_replica_source(self, context, instance_id): def _eject_replica_source(old_master, replica_models): master_candidate = self._most_current_replica(old_master, replica_models) master_ips = old_master.detach_public_ips() slave_ips = master_candidate.detach_public_ips() master_candidate.detach_replica(old_master, for_failover=True) master_candidate.enable_as_master() master_candidate.attach_public_ips(master_ips) master_candidate.make_read_only(False) old_master.attach_public_ips(slave_ips) exception_replicas = [] for replica in replica_models: try: if replica.id != master_candidate.id: replica.detach_replica(old_master, for_failover=True) replica.attach_replica(master_candidate) except exception.TroveError: msg = _("eject-replica-source: Unable to migrate " "replica %(slave)s from old replica source " "%(old_master)s to new source %(new_master)s.") msg_values = { "slave": replica.id, "old_master": old_master.id, "new_master": master_candidate.id } LOG.exception(msg % msg_values) exception_replicas.append(replica.id) self._set_task_status([old_master] + replica_models, InstanceTasks.NONE) if exception_replicas: self._set_task_status(exception_replicas, InstanceTasks.EJECTION_ERROR) msg = _("eject-replica-source %(id)s: The following " "replicas may not have been switched: %(replicas)s") msg_values = { "id": master_candidate.id, "replicas": exception_replicas } raise ReplicationSlaveAttachError(msg % msg_values) with EndNotification(context): master = BuiltInstanceTasks.load(context, instance_id) replicas = [BuiltInstanceTasks.load(context, dbinfo.id) for dbinfo in master.slaves] try: _eject_replica_source(master, replicas) except ReplicationSlaveAttachError: raise except Exception: self._set_task_status([master] + replicas, InstanceTasks.EJECTION_ERROR) raise def migrate(self, context, instance_id, host): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.migrate(host) def delete_instance(self, context, instance_id): with EndNotification(context): try: instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.delete_async() except exception.UnprocessableEntity: instance_tasks = models.FreshInstanceTasks.load(context, instance_id) instance_tasks.delete_async() def delete_backup(self, context, backup_id): with EndNotification(context): models.BackupTasks.delete_backup(context, backup_id) def create_backup(self, context, backup_info, instance_id): with EndNotification(context, backup_id=backup_info['id']): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.create_backup(backup_info) def _create_replication_slave(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, volume_type, modules): if type(instance_id) in [list]: ids = instance_id root_passwords = root_password else: ids = [instance_id] root_passwords = [root_password] replica_number = 0 replica_backup_id = backup_id replica_backup_created = False replicas = [] try: for replica_index in range(0, len(ids)): try: replica_number += 1 LOG.debug("Creating replica %d of %d." % (replica_number, len(ids))) instance_tasks = FreshInstanceTasks.load( context, ids[replica_index]) snapshot = instance_tasks.get_replication_master_snapshot( context, slave_of_id, flavor, replica_backup_id, replica_number=replica_number) replica_backup_id = snapshot['dataset']['snapshot_id'] replica_backup_created = (replica_backup_id is not None) instance_tasks.create_instance( flavor, image_id, databases, users, datastore_manager, packages, volume_size, replica_backup_id, availability_zone, root_passwords[replica_index], nics, overrides, None, snapshot, volume_type, modules) replicas.append(instance_tasks) except Exception: # if it's the first replica, then we shouldn't continue LOG.exception(_( "Could not create replica %(num)d of %(count)d.") % {'num': replica_number, 'count': len(ids)}) if replica_number == 1: raise for replica in replicas: replica.wait_for_instance(CONF.restore_usage_timeout, flavor) finally: if replica_backup_created: Backup.delete(context, replica_backup_id) def _create_instance(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules): if slave_of_id: self._create_replication_slave(context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, volume_type, modules) else: if type(instance_id) in [list]: raise AttributeError(_( "Cannot create multiple non-replica instances.")) instance_tasks = FreshInstanceTasks.load(context, instance_id) instance_tasks.create_instance(flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, cluster_config, None, volume_type, modules) timeout = (CONF.restore_usage_timeout if backup_id else CONF.usage_timeout) instance_tasks.wait_for_instance(timeout, flavor) def create_instance(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules): with EndNotification(context, instance_id=(instance_id[0] if type(instance_id) is list else instance_id)): self._create_instance(context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules) def update_overrides(self, context, instance_id, overrides): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.update_overrides(overrides) def unassign_configuration(self, context, instance_id, flavor, configuration_id): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.unassign_configuration(flavor, configuration_id) def create_cluster(self, context, cluster_id): with EndNotification(context, cluster_id=cluster_id): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.create_cluster(context, cluster_id) def grow_cluster(self, context, cluster_id, new_instance_ids): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.grow_cluster(context, cluster_id, new_instance_ids) def shrink_cluster(self, context, cluster_id, instance_ids): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.shrink_cluster(context, cluster_id, instance_ids) def delete_cluster(self, context, cluster_id): with EndNotification(context): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.delete_cluster(context, cluster_id) if CONF.exists_notification_transformer: @periodic_task.periodic_task def publish_exists_event(self, context): """ Push this in Instance Tasks to fetch a report/collection :param context: currently None as specied in bin script """ mgmtmodels.publish_exist_events(self.exists_transformer, self.admin_context) @periodic_task.periodic_task(spacing=CONF.quota_notification_interval) def publish_quota_notifications(self, context): nova_client = remote.create_nova_client(self.admin_context) for tenant in nova_client.tenants.list(): for quota in QUOTAS.get_all_quotas_by_tenant(tenant.id): usage = QUOTAS.get_quota_usage(quota) DBaaSQuotas(self.admin_context, quota, usage).notify() def __getattr__(self, name): """ We should only get here if Python couldn't find a "real" method. """ def raise_error(msg): raise AttributeError(msg) manager, sep, method = name.partition('_') if not manager: raise_error('Cannot derive manager from attribute name "%s"' % name) task_strategy = strategy.load_taskmanager_strategy(manager) if not task_strategy: raise_error('No task manager strategy for manager "%s"' % manager) if method not in task_strategy.task_manager_manager_actions: raise_error('No method "%s" for task manager strategy for manager' ' "%s"' % (method, manager)) return task_strategy.task_manager_manager_actions.get(method) trove-5.0.0/trove/taskmanager/service.py0000664000567000056710000000151212701410316021454 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging LOG = logging.getLogger(__name__) class TaskService(object): """Task Manager interface.""" def app_factory(global_conf, **local_conf): return TaskService() trove-5.0.0/trove/taskmanager/models.py0000775000567000056710000024202512701410316021310 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import traceback from cinderclient import exceptions as cinder_exceptions from eventlet import greenthread from heatclient import exc as heat_exceptions from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from oslo_utils import timeutils from swiftclient.client import ClientException from trove.backup import models as bkup_models from trove.backup.models import Backup from trove.backup.models import DBBackup from trove.backup.state import BackupState from trove.cluster.models import Cluster from trove.cluster.models import DBCluster from trove.cluster import tasks from trove.common import cfg from trove.common import exception from trove.common.exception import BackupCreationError from trove.common.exception import GuestError from trove.common.exception import GuestTimeout from trove.common.exception import InvalidModelError from trove.common.exception import MalformedSecurityGroupRuleError from trove.common.exception import PollTimeOut from trove.common.exception import TroveError from trove.common.exception import VolumeCreationFailure from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.instance import ServiceStatuses from trove.common.notification import ( TroveInstanceCreate, TroveInstanceModifyVolume, TroveInstanceModifyFlavor, TroveInstanceDelete) import trove.common.remote as remote from trove.common.remote import create_cinder_client from trove.common.remote import create_dns_client from trove.common.remote import create_heat_client from trove.common.strategies.cluster import strategy from trove.common import template from trove.common import utils from trove.common.utils import try_recover from trove.extensions.mysql import models as mysql_models from trove.extensions.security_group.models import ( SecurityGroupInstanceAssociation) from trove.extensions.security_group.models import SecurityGroup from trove.extensions.security_group.models import SecurityGroupRule from trove.instance import models as inst_models from trove.instance.models import BuiltInstance from trove.instance.models import DBInstance from trove.instance.models import FreshInstance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceStatus from trove.instance.tasks import InstanceTasks from trove.quota.quota import run_with_quotas from trove import rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF VOLUME_TIME_OUT = CONF.volume_time_out # seconds. DNS_TIME_OUT = CONF.dns_time_out # seconds. RESIZE_TIME_OUT = CONF.resize_time_out # seconds. REVERT_TIME_OUT = CONF.revert_time_out # seconds. HEAT_TIME_OUT = CONF.heat_time_out # seconds. USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds. HEAT_STACK_SUCCESSFUL_STATUSES = [('CREATE', 'CREATE_COMPLETE')] HEAT_RESOURCE_SUCCESSFUL_STATE = 'CREATE_COMPLETE' use_nova_server_volume = CONF.use_nova_server_volume use_heat = CONF.use_heat class NotifyMixin(object): """Notification Mixin This adds the ability to send usage events to an Instance object. """ def _get_service_id(self, datastore_manager, id_map): if datastore_manager in id_map: datastore_manager_id = id_map[datastore_manager] else: datastore_manager_id = cfg.UNKNOWN_SERVICE_ID LOG.error(_("Datastore ID for Manager (%s) is not configured") % datastore_manager) return datastore_manager_id def send_usage_event(self, event_type, **kwargs): event_type = 'trove.instance.%s' % event_type publisher_id = CONF.host # Grab the instance size from the kwargs or from the nova client instance_size = kwargs.pop('instance_size', None) flavor = self.nova_client.flavors.get(self.flavor_id) server = kwargs.pop('server', None) if server is None: server = self.nova_client.servers.get(self.server_id) az = getattr(server, 'OS-EXT-AZ:availability_zone', None) # Default payload created_time = timeutils.isotime(self.db_info.created) payload = { 'availability_zone': az, 'created_at': created_time, 'name': self.name, 'instance_id': self.id, 'instance_name': self.name, 'instance_size': instance_size or flavor.ram, 'instance_type': flavor.name, 'instance_type_id': flavor.id, 'launched_at': created_time, 'nova_instance_id': self.server_id, 'region': CONF.region, 'state_description': self.status, 'state': self.status, 'tenant_id': self.tenant_id, 'user_id': self.context.user, } if CONF.get(self.datastore_version.manager).volume_support: payload.update({ 'volume_size': self.volume_size, 'nova_volume_id': self.volume_id }) payload['service_id'] = self._get_service_id( self.datastore_version.manager, CONF.notification_service_id) # Update payload with all other kwargs payload.update(kwargs) LOG.debug('Sending event: %(event_type)s, %(payload)s' % {'event_type': event_type, 'payload': payload}) notifier = rpc.get_notifier( service="taskmanager", publisher_id=publisher_id) notifier.info(self.context, event_type, payload) class ConfigurationMixin(object): """Configuration Mixin Configuration related tasks for instances and resizes. """ def _render_config(self, flavor): config = template.SingleInstanceConfigTemplate( self.datastore_version, flavor, self.id) config.render() return config def _render_replica_source_config(self, flavor): config = template.ReplicaSourceConfigTemplate( self.datastore_version, flavor, self.id) config.render() return config def _render_replica_config(self, flavor): config = template.ReplicaConfigTemplate( self.datastore_version, flavor, self.id) config.render() return config def _render_config_dict(self, flavor): config = template.SingleInstanceConfigTemplate( self.datastore_version, flavor, self.id) ret = config.render_dict() LOG.debug("the default template dict of mysqld section: %s" % ret) return ret class ClusterTasks(Cluster): def update_statuses_on_failure(self, cluster_id, shard_id=None, status=None): if CONF.update_status_on_fail: if shard_id: db_instances = DBInstance.find_all(cluster_id=cluster_id, shard_id=shard_id).all() else: db_instances = DBInstance.find_all( cluster_id=cluster_id).all() for db_instance in db_instances: db_instance.set_task_status( status or InstanceTasks.BUILDING_ERROR_SERVER) db_instance.save() @classmethod def get_ip(cls, instance): return instance.get_visible_ip_addresses()[0] def _all_instances_ready(self, instance_ids, cluster_id, shard_id=None): """Wait for all instances to get READY.""" return self._all_instances_acquire_status( instance_ids, cluster_id, shard_id, ServiceStatuses.INSTANCE_READY, fast_fail_statuses=[ServiceStatuses.FAILED, ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT]) def _all_instances_shutdown(self, instance_ids, cluster_id, shard_id=None): """Wait for all instances to go SHUTDOWN.""" return self._all_instances_acquire_status( instance_ids, cluster_id, shard_id, ServiceStatuses.SHUTDOWN, fast_fail_statuses=[ServiceStatuses.FAILED, ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT]) def _all_instances_running(self, instance_ids, cluster_id, shard_id=None): """Wait for all instances to become ACTIVE.""" return self._all_instances_acquire_status( instance_ids, cluster_id, shard_id, ServiceStatuses.RUNNING, fast_fail_statuses=[ServiceStatuses.FAILED, ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT]) def _all_instances_acquire_status( self, instance_ids, cluster_id, shard_id, expected_status, fast_fail_statuses=None): def _is_fast_fail_status(status): return ((fast_fail_statuses is not None) and ((status == fast_fail_statuses) or (status in fast_fail_statuses))) def _all_have_status(ids): for instance_id in ids: status = InstanceServiceStatus.find_by( instance_id=instance_id).get_status() if _is_fast_fail_status(status): # if one has failed, no need to continue polling LOG.debug("Instance %s has acquired a fast-fail status %s." % (instance_id, status)) return True if status != expected_status: # if one is not in the expected state, continue polling LOG.debug("Instance %s was %s." % (instance_id, status)) return False return True def _instance_ids_with_failures(ids): LOG.debug("Checking for service failures on instances: %s" % ids) failed_instance_ids = [] for instance_id in ids: status = InstanceServiceStatus.find_by( instance_id=instance_id).get_status() if _is_fast_fail_status(status): failed_instance_ids.append(instance_id) return failed_instance_ids LOG.debug("Polling until all instances acquire %s status: %s" % (expected_status, instance_ids)) try: utils.poll_until(lambda: instance_ids, lambda ids: _all_have_status(ids), sleep_time=USAGE_SLEEP_TIME, time_out=CONF.usage_timeout) except PollTimeOut: LOG.exception(_("Timed out while waiting for all instances " "to become %s.") % expected_status) self.update_statuses_on_failure(cluster_id, shard_id) return False failed_ids = _instance_ids_with_failures(instance_ids) if failed_ids: LOG.error(_("Some instances failed: %s") % failed_ids) self.update_statuses_on_failure(cluster_id, shard_id) return False LOG.debug("All instances have acquired the expected status %s." % expected_status) return True def delete_cluster(self, context, cluster_id): LOG.debug("begin delete_cluster for id: %s" % cluster_id) def all_instances_marked_deleted(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() return len(db_instances) == 0 try: utils.poll_until(all_instances_marked_deleted, sleep_time=2, time_out=CONF.cluster_delete_time_out) except PollTimeOut: LOG.error(_("timeout for instances to be marked as deleted.")) return LOG.debug("setting cluster %s as deleted." % cluster_id) cluster = DBCluster.find_by(id=cluster_id) cluster.deleted = True cluster.deleted_at = utils.utcnow() cluster.task_status = tasks.ClusterTasks.NONE cluster.save() LOG.debug("end delete_cluster for id: %s" % cluster_id) class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): def _get_injected_files(self, datastore_manager): injected_config_location = CONF.get('injected_config_location') guest_info = CONF.get('guest_info') if ('/' in guest_info): # Set guest_info_file to exactly guest_info from the conf file. # This should be /etc/guest_info for pre-Kilo compatibility. guest_info_file = guest_info else: guest_info_file = os.path.join(injected_config_location, guest_info) files = {guest_info_file: ( "[DEFAULT]\n" "guest_id=%s\n" "datastore_manager=%s\n" "tenant_id=%s\n" % (self.id, datastore_manager, self.tenant_id))} if os.path.isfile(CONF.get('guest_config')): with open(CONF.get('guest_config'), "r") as f: files[os.path.join(injected_config_location, "trove-guestagent.conf")] = f.read() return files def wait_for_instance(self, timeout, flavor): # Make sure the service becomes active before sending a usage # record to avoid over billing a customer for an instance that # fails to build properly. try: utils.poll_until(self._service_is_active, sleep_time=USAGE_SLEEP_TIME, time_out=timeout) LOG.info(_("Created instance %s successfully.") % self.id) TroveInstanceCreate(instance=self, instance_size=flavor['ram']).notify() except PollTimeOut: LOG.error(_("Failed to create instance %s. " "Timeout waiting for instance to become active. " "No usage create-event was sent.") % self.id) self.update_statuses_on_time_out() except Exception: LOG.exception(_("Failed to send usage create-event for " "instance %s.") % self.id) def create_instance(self, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, cluster_config, snapshot, volume_type, modules): # It is the caller's responsibility to ensure that # FreshInstanceTasks.wait_for_instance is called after # create_instance to ensure that the proper usage event gets sent LOG.info(_("Creating instance %s.") % self.id) security_groups = None # If security group support is enabled and heat based instance # orchestration is disabled, create a security group. # # Heat based orchestration handles security group(resource) # in the template definition. if CONF.trove_security_groups_support and not use_heat: try: security_groups = self._create_secgroup(datastore_manager) except Exception as e: msg = (_("Error creating security group for instance: %s") % self.id) err = inst_models.InstanceTasks.BUILDING_ERROR_SEC_GROUP self._log_and_raise(e, msg, err) else: LOG.debug("Successfully created security group for " "instance: %s" % self.id) files = self._get_injected_files(datastore_manager) cinder_volume_type = volume_type or CONF.cinder_volume_type if use_heat: volume_info = self._create_server_volume_heat( flavor, image_id, datastore_manager, volume_size, availability_zone, nics, files, cinder_volume_type) elif use_nova_server_volume: volume_info = self._create_server_volume( flavor['id'], image_id, security_groups, datastore_manager, volume_size, availability_zone, nics, files) else: volume_info = self._create_server_volume_individually( flavor['id'], image_id, security_groups, datastore_manager, volume_size, availability_zone, nics, files, cinder_volume_type) config = self._render_config(flavor) backup_info = None if backup_id is not None: backup = bkup_models.Backup.get_by_id(self.context, backup_id) backup_info = {'id': backup_id, 'instance_id': backup.instance_id, 'location': backup.location, 'type': backup.backup_type, 'checksum': backup.checksum, } self._guest_prepare(flavor['ram'], volume_info, packages, databases, users, backup_info, config.config_contents, root_password, overrides, cluster_config, snapshot, modules) if root_password: self.report_root_enabled() if not self.db_info.task_status.is_error: self.reset_task_status() # when DNS is supported, we attempt to add this after the # instance is prepared. Otherwise, if DNS fails, instances # end up in a poorer state and there's no tooling around # re-sending the prepare call; retrying DNS is much easier. try: self._create_dns_entry() except Exception as e: msg = _("Error creating DNS entry for instance: %s") % self.id err = inst_models.InstanceTasks.BUILDING_ERROR_DNS self._log_and_raise(e, msg, err) def attach_replication_slave(self, snapshot, flavor): LOG.debug("Calling attach_replication_slave for %s.", self.id) try: replica_config = self._render_replica_config(flavor) self.guest.attach_replication_slave(snapshot, replica_config.config_contents) except GuestError as e: msg = (_("Error attaching instance %s " "as replica.") % self.id) err = inst_models.InstanceTasks.BUILDING_ERROR_REPLICA self._log_and_raise(e, msg, err) def get_replication_master_snapshot(self, context, slave_of_id, flavor, backup_id=None, replica_number=1): # First check to see if we need to take a backup master = BuiltInstanceTasks.load(context, slave_of_id) backup_required = master.backup_required_for_replication() if backup_required: # if we aren't passed in a backup id, look it up to possibly do # an incremental backup, thus saving time if not backup_id: backup = Backup.get_last_completed( context, slave_of_id, include_incremental=True) if backup: backup_id = backup.id else: LOG.debug('Skipping replication backup, as none is required.') snapshot_info = { 'name': "Replication snapshot for %s" % self.id, 'description': "Backup image used to initialize " "replication slave", 'instance_id': slave_of_id, 'parent_id': backup_id, 'tenant_id': self.tenant_id, 'state': BackupState.NEW, 'datastore_version_id': self.datastore_version.id, 'deleted': False, 'replica_number': replica_number, } replica_backup_id = None if backup_required: # Only do a backup if it's the first replica if replica_number == 1: try: db_info = DBBackup.create(**snapshot_info) replica_backup_id = db_info.id except InvalidModelError: msg = (_("Unable to create replication snapshot record " "for instance: %s") % self.id) LOG.exception(msg) raise BackupCreationError(msg) if backup_id: # Look up the parent backup info or fail early if not # found or if the user does not have access to the parent. _parent = Backup.get_by_id(context, backup_id) parent = { 'location': _parent.location, 'checksum': _parent.checksum, } snapshot_info.update({ 'parent': parent, }) else: # we've been passed in the actual replica backup id, # so just use it replica_backup_id = backup_id try: snapshot_info.update({ 'id': replica_backup_id, 'datastore': master.datastore.name, 'datastore_version': master.datastore_version.name, }) snapshot = master.get_replication_snapshot( snapshot_info, flavor=master.flavor_id) snapshot.update({ 'config': self._render_replica_config(flavor).config_contents }) return snapshot except Exception as e_create: msg_create = ( _("Error creating replication snapshot from " "instance %(source)s for new replica %(replica)s.") % {'source': slave_of_id, 'replica': self.id}) err = inst_models.InstanceTasks.BUILDING_ERROR_REPLICA # if the delete of the 'bad' backup fails, it'll mask the # create exception, so we trap it here try: # Only try to delete the backup if it's the first replica if replica_number == 1 and backup_required: Backup.delete(context, replica_backup_id) except Exception as e_delete: LOG.error(msg_create) # Make sure we log any unexpected errors from the create if not isinstance(e_create, TroveError): LOG.error(e_create) msg_delete = ( _("An error occurred while deleting a bad " "replication snapshot from instance %(source)s.") % {'source': slave_of_id}) # we've already logged the create exception, so we'll raise # the delete (otherwise the create will be logged twice) self._log_and_raise(e_delete, msg_delete, err) # the delete worked, so just log the original problem with create self._log_and_raise(e_create, msg_create, err) def report_root_enabled(self): mysql_models.RootHistory.create(self.context, self.id, 'root') def update_statuses_on_time_out(self): if CONF.update_status_on_fail: # Updating service status service = InstanceServiceStatus.find_by(instance_id=self.id) service.set_status(ServiceStatuses. FAILED_TIMEOUT_GUESTAGENT) service.save() LOG.error(_("Service status: %(status)s\n" "Service error description: %(desc)s") % {'status': ServiceStatuses. FAILED_TIMEOUT_GUESTAGENT.api_status, 'desc': ServiceStatuses. FAILED_TIMEOUT_GUESTAGENT.description}) # Updating instance status db_info = DBInstance.find_by(id=self.id, deleted=False) db_info.set_task_status(InstanceTasks. BUILDING_ERROR_TIMEOUT_GA) db_info.save() LOG.error(_("Trove instance status: %(action)s\n" "Trove instance status description: %(text)s") % {'action': InstanceTasks. BUILDING_ERROR_TIMEOUT_GA.action, 'text': InstanceTasks. BUILDING_ERROR_TIMEOUT_GA.db_text}) def _service_is_active(self): """ Check that the database guest is active. This function is meant to be called with poll_until to check that the guest is alive before sending a 'create' message. This prevents over billing a customer for an instance that they can never use. Returns: boolean if the service is active. Raises: TroveError if the service is in a failure state. """ service = InstanceServiceStatus.find_by(instance_id=self.id) status = service.get_status() if (status == rd_instance.ServiceStatuses.RUNNING or status == rd_instance.ServiceStatuses.INSTANCE_READY): return True elif status not in [rd_instance.ServiceStatuses.NEW, rd_instance.ServiceStatuses.BUILDING]: raise TroveError(_("Service not active, status: %s") % status) c_id = self.db_info.compute_instance_id nova_status = self.nova_client.servers.get(c_id).status if nova_status in [InstanceStatus.ERROR, InstanceStatus.FAILED]: raise TroveError(_("Server not active, status: %s") % nova_status) return False def _create_server_volume(self, flavor_id, image_id, security_groups, datastore_manager, volume_size, availability_zone, nics, files): LOG.debug("Begin _create_server_volume for id: %s" % self.id) try: userdata = self._prepare_userdata(datastore_manager) name = self.hostname or self.name volume_desc = ("datastore volume for %s" % self.id) volume_name = ("datastore-%s" % self.id) volume_ref = {'size': volume_size, 'name': volume_name, 'description': volume_desc} config_drive = CONF.use_nova_server_config_drive server = self.nova_client.servers.create( name, image_id, flavor_id, files=files, volume=volume_ref, security_groups=security_groups, availability_zone=availability_zone, nics=nics, config_drive=config_drive, userdata=userdata) server_dict = server._info LOG.debug("Created new compute instance %(server_id)s " "for id: %(id)s\nServer response: %(response)s" % {'server_id': server.id, 'id': self.id, 'response': server_dict}) volume_id = None for volume in server_dict.get('os:volumes', []): volume_id = volume.get('id') # Record the server ID and volume ID in case something goes wrong. self.update_db(compute_instance_id=server.id, volume_id=volume_id) except Exception as e: msg = _("Error creating server and volume for " "instance %s") % self.id LOG.debug("End _create_server_volume for id: %s" % self.id) err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER self._log_and_raise(e, msg, err) device_path = self.device_path mount_point = CONF.get(datastore_manager).mount_point volume_info = {'device_path': device_path, 'mount_point': mount_point} LOG.debug("End _create_server_volume for id: %s" % self.id) return volume_info def _build_sg_rules_mapping(self, rule_ports): final = [] cidr = CONF.trove_security_group_rule_cidr for port_or_range in set(rule_ports): from_, to_ = utils.gen_ports(port_or_range) final.append({'cidr': cidr, 'from_': str(from_), 'to_': str(to_)}) return final def _create_server_volume_heat(self, flavor, image_id, datastore_manager, volume_size, availability_zone, nics, files, volume_type): LOG.debug("Begin _create_server_volume_heat for id: %s" % self.id) try: client = create_heat_client(self.context) tcp_rules_mapping_list = self._build_sg_rules_mapping(CONF.get( datastore_manager).tcp_ports) udp_ports_mapping_list = self._build_sg_rules_mapping(CONF.get( datastore_manager).udp_ports) ifaces, ports = self._build_heat_nics(nics) template_obj = template.load_heat_template(datastore_manager) heat_template_unicode = template_obj.render( volume_support=self.volume_support, ifaces=ifaces, ports=ports, tcp_rules=tcp_rules_mapping_list, udp_rules=udp_ports_mapping_list, datastore_manager=datastore_manager, files=files) try: heat_template = heat_template_unicode.encode('utf-8') except UnicodeEncodeError: raise TroveError("Failed to utf-8 encode Heat template.") parameters = {"Flavor": flavor["name"], "VolumeSize": volume_size, "VolumeType": volume_type, "InstanceId": self.id, "ImageId": image_id, "DatastoreManager": datastore_manager, "AvailabilityZone": availability_zone, "TenantId": self.tenant_id} stack_name = 'trove-%s' % self.id client.stacks.create(stack_name=stack_name, template=heat_template, parameters=parameters) try: utils.poll_until( lambda: client.stacks.get(stack_name), lambda stack: stack.stack_status in ['CREATE_COMPLETE', 'CREATE_FAILED'], sleep_time=USAGE_SLEEP_TIME, time_out=HEAT_TIME_OUT) except PollTimeOut: raise TroveError("Failed to obtain Heat stack status. " "Timeout occurred.") stack = client.stacks.get(stack_name) if ((stack.action, stack.stack_status) not in HEAT_STACK_SUCCESSFUL_STATUSES): raise TroveError("Failed to create Heat stack.") resource = client.resources.get(stack.id, 'BaseInstance') if resource.resource_status != HEAT_RESOURCE_SUCCESSFUL_STATE: raise TroveError("Failed to provision Heat base instance.") instance_id = resource.physical_resource_id if self.volume_support: resource = client.resources.get(stack.id, 'DataVolume') if resource.resource_status != HEAT_RESOURCE_SUCCESSFUL_STATE: raise TroveError("Failed to provision Heat data volume.") volume_id = resource.physical_resource_id self.update_db(compute_instance_id=instance_id, volume_id=volume_id) else: self.update_db(compute_instance_id=instance_id) if CONF.trove_security_groups_support: resource = client.resources.get(stack.id, 'DatastoreSG') name = "%s_%s" % ( CONF.trove_security_group_name_prefix, self.id) description = _("Security Group for %s") % self.id SecurityGroup.create( id=resource.physical_resource_id, name=name, description=description, user=self.context.user, tenant_id=self.context.tenant) SecurityGroupInstanceAssociation.create( security_group_id=resource.physical_resource_id, instance_id=self.id) except (TroveError, heat_exceptions.HTTPNotFound, heat_exceptions.HTTPException) as e: msg = _("Error occurred during Heat stack creation for " "instance %s.") % self.id err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER self._log_and_raise(e, msg, err) device_path = self.device_path mount_point = CONF.get(datastore_manager).mount_point volume_info = {'device_path': device_path, 'mount_point': mount_point} LOG.debug("End _create_server_volume_heat for id: %s" % self.id) return volume_info def _create_server_volume_individually(self, flavor_id, image_id, security_groups, datastore_manager, volume_size, availability_zone, nics, files, volume_type): LOG.debug("Begin _create_server_volume_individually for id: %s" % self.id) server = None volume_info = self._build_volume_info(datastore_manager, volume_size=volume_size, volume_type=volume_type) block_device_mapping = volume_info['block_device'] try: server = self._create_server(flavor_id, image_id, security_groups, datastore_manager, block_device_mapping, availability_zone, nics, files) server_id = server.id # Save server ID. self.update_db(compute_instance_id=server_id) except Exception as e: msg = _("Failed to create server for instance %s") % self.id err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER self._log_and_raise(e, msg, err) LOG.debug("End _create_server_volume_individually for id: %s" % self.id) return volume_info def _build_volume_info(self, datastore_manager, volume_size=None, volume_type=None): volume_info = None volume_support = self.volume_support device_path = self.device_path mount_point = CONF.get(datastore_manager).mount_point LOG.debug("trove volume support = %s" % volume_support) if volume_support: try: volume_info = self._create_volume( volume_size, volume_type, datastore_manager) except Exception as e: msg = _("Failed to create volume for instance %s") % self.id err = inst_models.InstanceTasks.BUILDING_ERROR_VOLUME self._log_and_raise(e, msg, err) else: LOG.debug("device_path = %(path)s\n" "mount_point = %(point)s" % { "path": device_path, "point": mount_point }) volume_info = { 'block_device': None, 'device_path': device_path, 'mount_point': mount_point, 'volumes': None, } return volume_info def _log_and_raise(self, exc, message, task_status): LOG.error(_("%(message)s\n%(exc)s\n%(trace)s") % {"message": message, "exc": exc, "trace": traceback.format_exc()}) self.update_db(task_status=task_status) raise TroveError(message=message) def _create_volume(self, volume_size, volume_type, datastore_manager): LOG.debug("Begin _create_volume for id: %s" % self.id) volume_client = create_cinder_client(self.context) volume_desc = ("datastore volume for %s" % self.id) volume_ref = volume_client.volumes.create( volume_size, name="datastore-%s" % self.id, description=volume_desc, volume_type=volume_type) # Record the volume ID in case something goes wrong. self.update_db(volume_id=volume_ref.id) utils.poll_until( lambda: volume_client.volumes.get(volume_ref.id), lambda v_ref: v_ref.status in ['available', 'error'], sleep_time=2, time_out=VOLUME_TIME_OUT) v_ref = volume_client.volumes.get(volume_ref.id) if v_ref.status in ['error']: raise VolumeCreationFailure() LOG.debug("End _create_volume for id: %s" % self.id) return self._build_volume(v_ref, datastore_manager) def _build_volume(self, v_ref, datastore_manager): LOG.debug("Created volume %s" % v_ref) # The mapping is in the format: # :[]:[]:[] # setting the delete_on_terminate instance to true=1 mapping = "%s:%s:%s:%s" % (v_ref.id, '', v_ref.size, 1) bdm = CONF.block_device_mapping block_device = {bdm: mapping} created_volumes = [{'id': v_ref.id, 'size': v_ref.size}] device_path = self.device_path mount_point = CONF.get(datastore_manager).mount_point LOG.debug("block_device = %(device)s\n" "volume = %(volume)s\n" "device_path = %(path)s\n" "mount_point = %(point)s" % {"device": block_device, "volume": created_volumes, "path": device_path, "point": mount_point}) volume_info = {'block_device': block_device, 'device_path': device_path, 'mount_point': mount_point, 'volumes': created_volumes} return volume_info def _prepare_userdata(self, datastore_manager): userdata = None cloudinit = os.path.join(CONF.get('cloudinit_location'), "%s.cloudinit" % datastore_manager) if os.path.isfile(cloudinit): with open(cloudinit, "r") as f: userdata = f.read() return userdata def _create_server(self, flavor_id, image_id, security_groups, datastore_manager, block_device_mapping, availability_zone, nics, files={}): userdata = self._prepare_userdata(datastore_manager) name = self.hostname or self.name bdmap = block_device_mapping config_drive = CONF.use_nova_server_config_drive server = self.nova_client.servers.create( name, image_id, flavor_id, files=files, userdata=userdata, security_groups=security_groups, block_device_mapping=bdmap, availability_zone=availability_zone, nics=nics, config_drive=config_drive) LOG.debug("Created new compute instance %(server_id)s " "for instance %(id)s" % {'server_id': server.id, 'id': self.id}) return server def _guest_prepare(self, flavor_ram, volume_info, packages, databases, users, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, modules=None): LOG.debug("Entering guest_prepare") # Now wait for the response from the create to do additional work self.guest.prepare(flavor_ram, packages, databases, users, device_path=volume_info['device_path'], mount_point=volume_info['mount_point'], backup_info=backup_info, config_contents=config_contents, root_password=root_password, overrides=overrides, cluster_config=cluster_config, snapshot=snapshot, modules=modules) def _create_dns_entry(self): dns_support = CONF.trove_dns_support LOG.debug("trove dns support = %s" % dns_support) if dns_support: LOG.debug("%(gt)s: Creating dns entry for instance: %(id)s" % {'gt': greenthread.getcurrent(), 'id': self.id}) dns_client = create_dns_client(self.context) def get_server(): c_id = self.db_info.compute_instance_id return self.nova_client.servers.get(c_id) def ip_is_available(server): LOG.debug("Polling for ip addresses: $%s " % server.addresses) if server.addresses != {}: return True elif (server.addresses == {} and server.status != InstanceStatus.ERROR): return False elif (server.addresses == {} and server.status == InstanceStatus.ERROR): LOG.error(_("Failed to create DNS entry for instance " "%(instance)s. Server status was " "%(status)s).") % {'instance': self.id, 'status': server.status}) raise TroveError(status=server.status) utils.poll_until(get_server, ip_is_available, sleep_time=1, time_out=DNS_TIME_OUT) server = self.nova_client.servers.get( self.db_info.compute_instance_id) self.db_info.addresses = server.addresses LOG.debug("Creating dns entry...") ip = self.dns_ip_address if not ip: raise TroveError("Failed to create DNS entry for instance %s. " "No IP available." % self.id) dns_client.create_instance_entry(self.id, ip) LOG.debug("Successfully created DNS entry for instance: %s" % self.id) else: LOG.debug("%(gt)s: DNS not enabled for instance: %(id)s" % {'gt': greenthread.getcurrent(), 'id': self.id}) def _create_secgroup(self, datastore_manager): security_group = SecurityGroup.create_for_instance( self.id, self.context) tcp_ports = CONF.get(datastore_manager).tcp_ports udp_ports = CONF.get(datastore_manager).udp_ports self._create_rules(security_group, tcp_ports, 'tcp') self._create_rules(security_group, udp_ports, 'udp') return [security_group["name"]] def _create_rules(self, s_group, ports, protocol): err = inst_models.InstanceTasks.BUILDING_ERROR_SEC_GROUP err_msg = _("Failed to create security group rules for instance " "%(instance_id)s: Invalid port format - " "FromPort = %(from)s, ToPort = %(to)s") def set_error_and_raise(port_or_range): from_port, to_port = port_or_range self.update_db(task_status=err) msg = err_msg % {'instance_id': self.id, 'from': from_port, 'to': to_port} raise MalformedSecurityGroupRuleError(message=msg) for port_or_range in set(ports): try: from_, to_ = (None, None) from_, to_ = utils.gen_ports(port_or_range) cidr = CONF.trove_security_group_rule_cidr SecurityGroupRule.create_sec_group_rule( s_group, protocol, int(from_), int(to_), cidr, self.context) except (ValueError, TroveError): set_error_and_raise([from_, to_]) def _build_heat_nics(self, nics): ifaces = [] ports = [] if nics: for idx, nic in enumerate(nics): iface_id = nic.get('port-id') if iface_id: ifaces.append(iface_id) continue net_id = nic.get('net-id') if net_id: port = {} port['name'] = "Port%s" % idx port['net_id'] = net_id fixed_ip = nic.get('v4-fixed-ip') if fixed_ip: port['fixed_ip'] = fixed_ip ports.append(port) ifaces.append("{Ref: Port%s}" % idx) return ifaces, ports class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin): """ Performs the various asynchronous instance related tasks. """ def _delete_resources(self, deleted_at): LOG.debug("Begin _delete_resources for instance %s" % self.id) server_id = self.db_info.compute_instance_id old_server = self.nova_client.servers.get(server_id) LOG.debug("Stopping datastore on instance %s before deleting any " "resources." % self.id) try: self.guest.stop_db() except Exception: LOG.exception(_("Error stopping the datastore before attempting " "to delete instance id %s.") % self.id) try: if use_heat: # Delete the server via heat heatclient = create_heat_client(self.context) name = 'trove-%s' % self.id heatclient.stacks.delete(name) else: self.server.delete() except Exception as ex: LOG.exception(_("Error during delete compute server %s") % self.server.id) try: dns_support = CONF.trove_dns_support LOG.debug("trove dns support = %s" % dns_support) if dns_support: dns_api = create_dns_client(self.context) dns_api.delete_instance_entry(instance_id=self.db_info.id) except Exception as ex: LOG.exception(_("Error during dns entry of instance %(id)s: " "%(ex)s") % {'id': self.db_info.id, 'ex': ex}) # Poll until the server is gone. def server_is_finished(): try: server = self.nova_client.servers.get(server_id) if not self.server_status_matches(['SHUTDOWN', 'ACTIVE'], server=server): LOG.error(_("Server %(server_id)s entered ERROR status " "when deleting instance %(instance_id)s!") % {'server_id': server.id, 'instance_id': self.id}) return False except nova_exceptions.NotFound: return True try: utils.poll_until(server_is_finished, sleep_time=2, time_out=CONF.server_delete_time_out) except PollTimeOut: LOG.exception(_("Failed to delete instance %(instance_id)s: " "Timeout deleting compute server %(server_id)s") % {'instance_id': self.id, 'server_id': server_id}) # If volume has been resized it must be manually removed in cinder try: if self.volume_id: volume_client = create_cinder_client(self.context) volume = volume_client.volumes.get(self.volume_id) if volume.status == "available": LOG.info(_("Deleting volume %(v)s for instance: %(i)s.") % {'v': self.volume_id, 'i': self.id}) volume.delete() except Exception: LOG.exception(_("Error deleting volume of instance %(id)s.") % {'id': self.db_info.id}) TroveInstanceDelete(instance=self, deleted_at=timeutils.isotime(deleted_at), server=old_server).notify() LOG.debug("End _delete_resources for instance %s" % self.id) def server_status_matches(self, expected_status, server=None): if not server: server = self.server return server.status.upper() in ( status.upper() for status in expected_status) def resize_volume(self, new_size): LOG.info(_("Resizing volume for instance %(instance_id)s from " "%(old_size)s GB to %(new_size)s GB.") % {'instance_id': self.id, 'old_size': self.volume_size, 'new_size': new_size}) action = ResizeVolumeAction(self, self.volume_size, new_size) action.execute() LOG.info(_("Resized volume for instance %s successfully.") % self.id) def resize_flavor(self, old_flavor, new_flavor): LOG.info(_("Resizing instance %(instance_id)s from flavor " "%(old_flavor)s to %(new_flavor)s.") % {'instance_id': self.id, 'old_flavor': old_flavor['id'], 'new_flavor': new_flavor['id']}) action = ResizeAction(self, old_flavor, new_flavor) action.execute() LOG.info(_("Resized instance %s successfully.") % self.id) def migrate(self, host): LOG.info(_("Initiating migration to host %s.") % host) action = MigrateAction(self, host) action.execute() def create_backup(self, backup_info): LOG.info(_("Initiating backup for instance %s.") % self.id) self.guest.create_backup(backup_info) def backup_required_for_replication(self): LOG.debug("Seeing if replication backup is required for instance %s." % self.id) return self.guest.backup_required_for_replication() def get_replication_snapshot(self, snapshot_info, flavor): def _get_replication_snapshot(): LOG.debug("Calling get_replication_snapshot on %s.", self.id) try: rep_source_config = self._render_replica_source_config(flavor) result = self.guest.get_replication_snapshot( snapshot_info, rep_source_config.config_contents) LOG.debug("Got replication snapshot from guest successfully.") return result except Exception: LOG.exception(_("Failed to get replication snapshot from %s.") % self.id) raise return run_with_quotas(self.context.tenant, {'backups': 1}, _get_replication_snapshot) def detach_replica(self, master, for_failover=False): LOG.debug("Calling detach_replica on %s" % self.id) try: self.guest.detach_replica(for_failover) self.update_db(slave_of_id=None) self.slave_list = None except (GuestError, GuestTimeout): LOG.exception(_("Failed to detach replica %s.") % self.id) raise def attach_replica(self, master): LOG.debug("Calling attach_replica on %s" % self.id) try: replica_info = master.guest.get_replica_context() flavor = self.nova_client.flavors.get(self.flavor_id) slave_config = self._render_replica_config(flavor).config_contents self.guest.attach_replica(replica_info, slave_config) self.update_db(slave_of_id=master.id) self.slave_list = None except (GuestError, GuestTimeout): LOG.exception(_("Failed to attach replica %s.") % self.id) raise def make_read_only(self, read_only): LOG.debug("Calling make_read_only on %s" % self.id) self.guest.make_read_only(read_only) def _get_floating_ips(self): """Returns floating ips as a dict indexed by the ip.""" floating_ips = {} for ip in self.nova_client.floating_ips.list(): floating_ips.update({ip.ip: ip}) return floating_ips def detach_public_ips(self): LOG.debug("Begin detach_public_ips for instance %s" % self.id) removed_ips = [] server_id = self.db_info.compute_instance_id nova_instance = self.nova_client.servers.get(server_id) floating_ips = self._get_floating_ips() for ip in self.get_visible_ip_addresses(): if ip in floating_ips: nova_instance.remove_floating_ip(ip) removed_ips.append(ip) return removed_ips def attach_public_ips(self, ips): LOG.debug("Begin attach_public_ips for instance %s" % self.id) server_id = self.db_info.compute_instance_id nova_instance = self.nova_client.servers.get(server_id) for ip in ips: nova_instance.add_floating_ip(ip) def enable_as_master(self): LOG.debug("Calling enable_as_master on %s" % self.id) flavor = self.nova_client.flavors.get(self.flavor_id) replica_source_config = self._render_replica_source_config(flavor) self.update_db(slave_of_id=None) self.slave_list = None self.guest.enable_as_master(replica_source_config.config_contents) def get_last_txn(self): LOG.debug("Calling get_last_txn on %s" % self.id) return self.guest.get_last_txn() def get_latest_txn_id(self): LOG.debug("Calling get_latest_txn_id on %s" % self.id) return self.guest.get_latest_txn_id() def wait_for_txn(self, txn): LOG.debug("Calling wait_for_txn on %s" % self.id) if txn: self.guest.wait_for_txn(txn) def cleanup_source_on_replica_detach(self, replica_info): LOG.debug("Calling cleanup_source_on_replica_detach on %s" % self.id) self.guest.cleanup_source_on_replica_detach(replica_info) def demote_replication_master(self): LOG.debug("Calling demote_replication_master on %s" % self.id) self.guest.demote_replication_master() def reboot(self): try: # Issue a guest stop db call to shutdown the db if running LOG.debug("Stopping datastore on instance %s." % self.id) try: self.guest.stop_db() except (exception.GuestError, exception.GuestTimeout) as e: # Acceptable to be here if db was already in crashed state # Also we check guest state before issuing reboot LOG.debug(str(e)) self._refresh_datastore_status() if not (self.datastore_status_matches( rd_instance.ServiceStatuses.SHUTDOWN) or self.datastore_status_matches( rd_instance.ServiceStatuses.CRASHED)): # We will bail if db did not get stopped or is blocked LOG.error(_("Cannot reboot instance. DB status is %s.") % self.datastore_status.status) return LOG.debug("The guest service status is %s." % self.datastore_status.status) LOG.info(_("Rebooting instance %s.") % self.id) self.server.reboot() # Poll nova until instance is active reboot_time_out = CONF.reboot_time_out def update_server_info(): self.refresh_compute_server_info() return self.server_status_matches(['ACTIVE']) utils.poll_until( update_server_info, sleep_time=2, time_out=reboot_time_out) # Set the status to PAUSED. The guest agent will reset the status # when the reboot completes and MySQL is running. self.set_datastore_status_to_paused() LOG.info(_("Rebooted instance %s successfully.") % self.id) except Exception as e: LOG.error(_("Failed to reboot instance %(id)s: %(e)s") % {'id': self.id, 'e': str(e)}) finally: LOG.debug("Rebooting FINALLY %s" % self.id) self.reset_task_status() def restart(self): LOG.info(_("Initiating datastore restart on instance %s.") % self.id) try: self.guest.restart() except GuestError: LOG.error(_("Failed to initiate datastore restart on instance " "%s.") % self.id) finally: self.reset_task_status() def guest_log_list(self): LOG.info(_("Retrieving guest log list for instance %s.") % self.id) try: return self.guest.guest_log_list() except GuestError: LOG.error(_("Failed to retrieve guest log list for instance " "%s.") % self.id) finally: self.reset_task_status() def guest_log_action(self, log_name, enable, disable, publish, discard): LOG.info(_("Processing guest log for instance %s.") % self.id) try: return self.guest.guest_log_action(log_name, enable, disable, publish, discard) except GuestError: LOG.error(_("Failed to process guest log for instance %s.") % self.id) finally: self.reset_task_status() def refresh_compute_server_info(self): """Refreshes the compute server field.""" server = self.nova_client.servers.get(self.server.id) self.server = server def _refresh_datastore_status(self): """ Gets the latest instance service status from datastore and updates the reference on this BuiltInstanceTask reference """ self.datastore_status = InstanceServiceStatus.find_by( instance_id=self.id) def set_datastore_status_to_paused(self): """ Updates the InstanceServiceStatus for this BuiltInstance to PAUSED. This does not change the reference for this BuiltInstanceTask """ datastore_status = InstanceServiceStatus.find_by(instance_id=self.id) datastore_status.status = rd_instance.ServiceStatuses.PAUSED datastore_status.save() class BackupTasks(object): @classmethod def _parse_manifest(cls, manifest): # manifest is in the format 'container/prefix' # where prefix can be 'path' or 'lots/of/paths' try: container_index = manifest.index('/') prefix_index = container_index + 1 except ValueError: return None, None container = manifest[:container_index] prefix = manifest[prefix_index:] return container, prefix @classmethod def delete_files_from_swift(cls, context, filename): container = CONF.backup_swift_container client = remote.create_swift_client(context) obj = client.head_object(container, filename) manifest = obj.get('x-object-manifest', '') cont, prefix = cls._parse_manifest(manifest) if all([cont, prefix]): # This is a manifest file, first delete all segments. LOG.debug("Deleting files with prefix: %(cont)s/%(prefix)s" % {'cont': cont, 'prefix': prefix}) # list files from container/prefix specified by manifest headers, segments = client.get_container(cont, prefix=prefix) LOG.debug(headers) for segment in segments: name = segment.get('name') if name: LOG.debug("Deleting file: %(cont)s/%(name)s" % {'cont': cont, 'name': name}) client.delete_object(cont, name) # Delete the manifest file LOG.debug("Deleting file: %(cont)s/%(filename)s" % {'cont': cont, 'filename': filename}) client.delete_object(container, filename) @classmethod def delete_backup(cls, context, backup_id): """Delete backup from swift.""" LOG.info(_("Deleting backup %s.") % backup_id) backup = bkup_models.Backup.get_by_id(context, backup_id) try: filename = backup.filename if filename: BackupTasks.delete_files_from_swift(context, filename) except ValueError: backup.delete() except ClientException as e: if e.http_status == 404: # Backup already deleted in swift backup.delete() else: LOG.exception(_("Error occurred when deleting from swift. " "Details: %s") % e) backup.state = bkup_models.BackupState.DELETE_FAILED backup.save() raise TroveError("Failed to delete swift object for backup %s." % backup_id) else: backup.delete() LOG.info(_("Deleted backup %s successfully.") % backup_id) class ResizeVolumeAction(object): """Performs volume resize action.""" def __init__(self, instance, old_size, new_size): self.instance = instance self.old_size = int(old_size) self.new_size = int(new_size) def get_mount_point(self): mount_point = CONF.get( self.instance.datastore_version.manager).mount_point return mount_point def get_device_path(self): return self.instance.device_path def _fail(self, orig_func): LOG.exception(_("%(func)s encountered an error when " "attempting to resize the volume for " "instance %(id)s. Setting service " "status to failed.") % {'func': orig_func.__name__, 'id': self.instance.id}) service = InstanceServiceStatus.find_by(instance_id=self.instance.id) service.set_status(ServiceStatuses.FAILED) service.save() def _recover_restart(self, orig_func): LOG.exception(_("%(func)s encountered an error when attempting to " "resize the volume for instance %(id)s. Trying to " "recover by restarting the " "guest.") % {'func': orig_func.__name__, 'id': self.instance.id}) self.instance.restart() def _recover_mount_restart(self, orig_func): LOG.exception(_("%(func)s encountered an error when attempting to " "resize the volume for instance %(id)s. Trying to " "recover by mounting the volume and then restarting " "the guest.") % {'func': orig_func.__name__, 'id': self.instance.id}) self._mount_volume() self.instance.restart() def _recover_full(self, orig_func): LOG.exception(_("%(func)s encountered an error when attempting to " "resize the volume for instance %(id)s. Trying to " "recover by attaching and" " mounting the volume and then restarting the " "guest.") % {'func': orig_func.__name__, 'id': self.instance.id}) self._attach_volume() self._mount_volume() self.instance.restart() def _stop_db(self): LOG.debug("Instance %s calling stop_db." % self.instance.id) self.instance.guest.stop_db() @try_recover def _unmount_volume(self): LOG.debug("Unmounting the volume on instance %(id)s" % { 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.unmount_volume(device_path=device_path, mount_point=mount_point) LOG.debug("Successfully unmounted the volume %(vol_id)s for " "instance %(id)s" % {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _detach_volume(self): LOG.debug("Detach volume %(vol_id)s from instance %(id)s" % { 'vol_id': self.instance.volume_id, 'id': self.instance.id}) self.instance.nova_client.volumes.delete_server_volume( self.instance.server.id, self.instance.volume_id) def volume_available(): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.status == 'available' utils.poll_until(volume_available, sleep_time=2, time_out=CONF.volume_time_out) LOG.debug("Successfully detached volume %(vol_id)s from instance " "%(id)s" % {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _attach_volume(self): device_path = self.get_device_path() LOG.debug("Attach volume %(vol_id)s to instance %(id)s at " "%(dev)s" % {'vol_id': self.instance.volume_id, 'id': self.instance.id, 'dev': device_path}) self.instance.nova_client.volumes.create_server_volume( self.instance.server.id, self.instance.volume_id, device_path) def volume_in_use(): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.status == 'in-use' utils.poll_until(volume_in_use, sleep_time=2, time_out=CONF.volume_time_out) LOG.debug("Successfully attached volume %(vol_id)s to instance " "%(id)s" % {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _resize_fs(self): LOG.debug("Resizing the filesystem for instance %(id)s" % { 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.resize_fs(device_path=device_path, mount_point=mount_point) LOG.debug("Successfully resized volume %(vol_id)s filesystem for " "instance %(id)s" % {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _mount_volume(self): LOG.debug("Mount the volume on instance %(id)s" % { 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.mount_volume(device_path=device_path, mount_point=mount_point) LOG.debug("Successfully mounted the volume %(vol_id)s on instance " "%(id)s" % {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _extend(self): LOG.debug("Extending volume %(vol_id)s for instance %(id)s to " "size %(size)s" % {'vol_id': self.instance.volume_id, 'id': self.instance.id, 'size': self.new_size}) self.instance.volume_client.volumes.extend(self.instance.volume_id, self.new_size) LOG.debug("Successfully extended the volume %(vol_id)s for instance " "%(id)s" % {'vol_id': self.instance.volume_id, 'id': self.instance.id}) def _verify_extend(self): try: volume = self.instance.volume_client.volumes.get( self.instance.volume_id) if not volume: msg = (_('Failed to get volume %(vol_id)s') % { 'vol_id': self.instance.volume_id}) raise cinder_exceptions.ClientException(msg) def volume_is_new_size(): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.size == self.new_size utils.poll_until(volume_is_new_size, sleep_time=2, time_out=CONF.volume_time_out) self.instance.update_db(volume_size=self.new_size) except PollTimeOut: LOG.exception(_("Timeout trying to extend the volume %(vol_id)s " "for instance %(id)s") % { 'vol_id': self.instance.volume_id, 'id': self.instance.id}) volume = self.instance.volume_client.volumes.get( self.instance.volume_id) if volume.status == 'extending': self._fail(self._verify_extend) elif volume.size != self.new_size: self.instance.update_db(volume_size=volume.size) self._recover_full(self._verify_extend) raise except Exception: LOG.exception(_("Error encountered trying to verify extend for " "the volume %(vol_id)s for instance %(id)s") % { 'vol_id': self.instance.volume_id, 'id': self.instance.id}) self._recover_full(self._verify_extend) raise def _resize_active_volume(self): LOG.debug("Begin _resize_active_volume for id: %(id)s" % { 'id': self.instance.id}) self._stop_db() self._unmount_volume(recover_func=self._recover_restart) self._detach_volume(recover_func=self._recover_mount_restart) self._extend(recover_func=self._recover_full) self._verify_extend() # if anything fails after this point, recovery is futile self._attach_volume(recover_func=self._fail) self._resize_fs(recover_func=self._fail) self._mount_volume(recover_func=self._fail) self.instance.restart() LOG.debug("End _resize_active_volume for id: %(id)s" % { 'id': self.instance.id}) def execute(self): LOG.debug("%(gt)s: Resizing instance %(id)s volume for server " "%(server_id)s from %(old_volume_size)s to " "%(new_size)r GB" % {'gt': greenthread.getcurrent(), 'id': self.instance.id, 'server_id': self.instance.server.id, 'old_volume_size': self.old_size, 'new_size': self.new_size}) if self.instance.server.status == InstanceStatus.ACTIVE: self._resize_active_volume() self.instance.reset_task_status() # send usage event for size reported by cinder volume = self.instance.volume_client.volumes.get( self.instance.volume_id) launched_time = timeutils.isotime(self.instance.updated) modified_time = timeutils.isotime(self.instance.updated) TroveInstanceModifyVolume(instance=self.instance, old_volume_size=self.old_size, launched_at=launched_time, modify_at=modified_time, volume_size=volume.size, ).notify() else: self.instance.reset_task_status() msg = _("Failed to resize instance %(id)s volume for server " "%(server_id)s. The instance must be in state %(state)s " "not %(inst_state)s.") % { 'id': self.instance.id, 'server_id': self.instance.server.id, 'state': InstanceStatus.ACTIVE, 'inst_state': self.instance.server.status} raise TroveError(msg) class ResizeActionBase(object): """Base class for executing a resize action.""" def __init__(self, instance): """ Creates a new resize action for a given instance :param instance: reference to existing instance that will be resized :type instance: trove.taskmanager.models.BuiltInstanceTasks """ self.instance = instance def _assert_guest_is_ok(self): # The guest will never set the status to PAUSED. self.instance.set_datastore_status_to_paused() # Now we wait until it sets it to anything at all, # so we know it's alive. utils.poll_until( self._guest_is_awake, sleep_time=2, time_out=RESIZE_TIME_OUT) def _assert_nova_status_is_ok(self): # Make sure Nova thinks things went well. if not self.instance.server_status_matches(["VERIFY_RESIZE"]): msg = "Migration failed! status=%(act_status)s and " \ "not %(exp_status)s" % { "act_status": self.instance.server.status, "exp_status": 'VERIFY_RESIZE'} raise TroveError(msg) def _assert_datastore_is_ok(self): # Tell the guest to turn on datastore, and ensure the status becomes # RUNNING. self._start_datastore() utils.poll_until( self._datastore_is_online, sleep_time=2, time_out=RESIZE_TIME_OUT) def _assert_datastore_is_offline(self): # Tell the guest to turn off MySQL, and ensure the status becomes # SHUTDOWN. self.instance.guest.stop_db(do_not_start_on_reboot=True) utils.poll_until( self._datastore_is_offline, sleep_time=2, time_out=RESIZE_TIME_OUT) def _assert_processes_are_ok(self): """Checks the procs; if anything is wrong, reverts the operation.""" # Tell the guest to turn back on, and make sure it can start. self._assert_guest_is_ok() LOG.debug("Nova guest is ok.") self._assert_datastore_is_ok() LOG.debug("Datastore is ok.") def _confirm_nova_action(self): LOG.debug("Instance %s calling Compute confirm resize..." % self.instance.id) self.instance.server.confirm_resize() def _datastore_is_online(self): self.instance._refresh_datastore_status() return self.instance.is_datastore_running def _datastore_is_offline(self): self.instance._refresh_datastore_status() return (self.instance.datastore_status_matches( rd_instance.ServiceStatuses.SHUTDOWN)) def _revert_nova_action(self): LOG.debug("Instance %s calling Compute revert resize..." % self.instance.id) self.instance.server.revert_resize() def execute(self): """Initiates the action.""" try: LOG.debug("Instance %s calling stop_db..." % self.instance.id) self._assert_datastore_is_offline() self._perform_nova_action() finally: if self.instance.db_info.task_status != ( inst_models.InstanceTasks.NONE): self.instance.reset_task_status() def _guest_is_awake(self): self.instance._refresh_datastore_status() return not self.instance.datastore_status_matches( rd_instance.ServiceStatuses.PAUSED) def _perform_nova_action(self): """Calls Nova to resize or migrate an instance, and confirms.""" LOG.debug("Begin resize method _perform_nova_action instance: %s" % self.instance.id) need_to_revert = False try: LOG.debug("Initiating nova action") self._initiate_nova_action() LOG.debug("Waiting for nova action") self._wait_for_nova_action() LOG.debug("Asserting nova status is ok") self._assert_nova_status_is_ok() need_to_revert = True LOG.debug("* * * REVERT BARRIER PASSED * * *") LOG.debug("Asserting nova action success") self._assert_nova_action_was_successful() LOG.debug("Asserting processes are OK") self._assert_processes_are_ok() LOG.debug("Confirming nova action") self._confirm_nova_action() except Exception as ex: LOG.exception(_("Exception during nova action.")) if need_to_revert: LOG.error(_("Reverting action for instance %s") % self.instance.id) self._revert_nova_action() self._wait_for_revert_nova_action() if self.instance.server_status_matches(['ACTIVE']): LOG.error(_("Restarting datastore.")) self.instance.guest.restart() else: LOG.error(_("Cannot restart datastore because " "Nova server status is not ACTIVE")) LOG.error(_("Error resizing instance %s.") % self.instance.id) raise ex LOG.debug("Recording success") self._record_action_success() LOG.debug("End resize method _perform_nova_action instance: %s" % self.instance.id) def _wait_for_nova_action(self): # Wait for the flavor to change. def update_server_info(): self.instance.refresh_compute_server_info() return not self.instance.server_status_matches(['RESIZE']) utils.poll_until( update_server_info, sleep_time=2, time_out=RESIZE_TIME_OUT) def _wait_for_revert_nova_action(self): # Wait for the server to return to ACTIVE after revert. def update_server_info(): self.instance.refresh_compute_server_info() return self.instance.server_status_matches(['ACTIVE']) utils.poll_until( update_server_info, sleep_time=2, time_out=REVERT_TIME_OUT) class ResizeAction(ResizeActionBase): def __init__(self, instance, old_flavor, new_flavor): """ :type instance: trove.taskmanager.models.BuiltInstanceTasks :type old_flavor: dict :type new_flavor: dict """ super(ResizeAction, self).__init__(instance) self.old_flavor = old_flavor self.new_flavor = new_flavor self.new_flavor_id = new_flavor['id'] def _assert_nova_action_was_successful(self): # Do check to make sure the status and flavor id are correct. if str(self.instance.server.flavor['id']) != str(self.new_flavor_id): msg = "Assertion failed! flavor_id=%s and not %s" \ % (self.instance.server.flavor['id'], self.new_flavor_id) raise TroveError(msg) def _initiate_nova_action(self): self.instance.server.resize(self.new_flavor_id) def _revert_nova_action(self): LOG.debug("Instance %s calling Compute revert resize... " "Repairing config." % self.instance.id) try: config = self.instance._render_config(self.old_flavor) config = {'config_contents': config.config_contents} self.instance.guest.reset_configuration(config) except GuestTimeout: LOG.exception(_("Error sending reset_configuration call.")) LOG.debug("Reverting resize.") super(ResizeAction, self)._revert_nova_action() def _record_action_success(self): LOG.debug("Updating instance %(id)s to flavor_id %(flavor_id)s." % {'id': self.instance.id, 'flavor_id': self.new_flavor_id}) self.instance.update_db(flavor_id=self.new_flavor_id, task_status=inst_models.InstanceTasks.NONE) update_time = timeutils.isotime(self.instance.updated) TroveInstanceModifyFlavor(instance=self.instance, old_instance_size=self.old_flavor['ram'], instance_size=self.new_flavor['ram'], launched_at=update_time, modify_at=update_time, server=self.instance.server).notify() def _start_datastore(self): config = self.instance._render_config(self.new_flavor) self.instance.guest.start_db_with_conf_changes(config.config_contents) class MigrateAction(ResizeActionBase): def __init__(self, instance, host=None): super(MigrateAction, self).__init__(instance) self.instance = instance self.host = host def _assert_nova_action_was_successful(self): LOG.debug("Currently no assertions for a Migrate Action") def _initiate_nova_action(self): LOG.debug("Migrating instance %(instance)s without flavor change ...\n" "Forcing migration to host(%(host)s)" % {"instance": self.instance.id, "host": self.host}) self.instance.server.migrate(force_host=self.host) def _record_action_success(self): LOG.debug("Successfully finished Migration to " "%(hostname)s: %(id)s" % {'hostname': self.instance.hostname, 'id': self.instance.id}) def _start_datastore(self): self.instance.guest.restart() def load_cluster_tasks(context, cluster_id): manager = Cluster.manager_from_cluster_id(context, cluster_id) strat = strategy.load_taskmanager_strategy(manager) task_manager_cluster_tasks_class = strat.task_manager_cluster_tasks_class return ClusterTasks.load(context, cluster_id, task_manager_cluster_tasks_class) trove-5.0.0/trove/taskmanager/api.py0000664000567000056710000001776212701410316020603 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routes all the requests to the task manager. """ from oslo_log import log as logging import oslo_messaging as messaging from trove.common import cfg from trove.common import exception from trove.common.notification import NotificationCastWrapper import trove.common.rpc.version as rpc_version from trove.common.strategies.cluster import strategy from trove.guestagent import models as agent_models from trove import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class API(object): """API for interacting with the task manager.""" def __init__(self, context): self.context = context super(API, self).__init__() target = messaging.Target(topic=CONF.taskmanager_queue, version=rpc_version.RPC_API_VERSION) self.version_cap = rpc_version.VERSION_ALIASES.get( CONF.upgrade_levels.taskmanager) self.client = self.get_client(target, self.version_cap) def _cast(self, method_name, version, **kwargs): LOG.debug("Casting %s" % method_name) with NotificationCastWrapper(self.context, 'taskmanager'): cctxt = self.client.prepare(version=version) cctxt.cast(self.context, method_name, **kwargs) def get_client(self, target, version_cap, serializer=None): return rpc.get_client(target, version_cap=version_cap, serializer=serializer) def _transform_obj(self, obj_ref): # Turn the object into a dictionary and remove the mgr if "__dict__" in dir(obj_ref): obj_dict = obj_ref.__dict__ # We assume manager contains a object due to the *clients if obj_dict.get('manager'): del obj_dict['manager'] return obj_dict raise ValueError("Could not transform %s" % obj_ref) def _delete_heartbeat(self, instance_id): agent_heart_beat = agent_models.AgentHeartBeat() try: heartbeat = agent_heart_beat.find_by_instance_id(instance_id) heartbeat.delete() except exception.ModelNotFoundError as e: LOG.error(e.message) def resize_volume(self, new_size, instance_id): LOG.debug("Making async call to resize volume for instance: %s" % instance_id) self._cast("resize_volume", self.version_cap, new_size=new_size, instance_id=instance_id) def resize_flavor(self, instance_id, old_flavor, new_flavor): LOG.debug("Making async call to resize flavor for instance: %s" % instance_id) self._cast("resize_flavor", self.version_cap, instance_id=instance_id, old_flavor=self._transform_obj(old_flavor), new_flavor=self._transform_obj(new_flavor)) def reboot(self, instance_id): LOG.debug("Making async call to reboot instance: %s" % instance_id) self._cast("reboot", self.version_cap, instance_id=instance_id) def restart(self, instance_id): LOG.debug("Making async call to restart instance: %s" % instance_id) self._cast("restart", self.version_cap, instance_id=instance_id) def detach_replica(self, instance_id): LOG.debug("Making async call to detach replica: %s" % instance_id) self._cast("detach_replica", self.version_cap, instance_id=instance_id) def promote_to_replica_source(self, instance_id): LOG.debug("Making async call to promote replica to source: %s" % instance_id) self._cast("promote_to_replica_source", self.version_cap, instance_id=instance_id) def eject_replica_source(self, instance_id): LOG.debug("Making async call to eject replica source: %s" % instance_id) self._cast("eject_replica_source", self.version_cap, instance_id=instance_id) def migrate(self, instance_id, host): LOG.debug("Making async call to migrate instance: %s" % instance_id) self._cast("migrate", self.version_cap, instance_id=instance_id, host=host) def delete_instance(self, instance_id): LOG.debug("Making async call to delete instance: %s" % instance_id) self._cast("delete_instance", self.version_cap, instance_id=instance_id) def create_backup(self, backup_info, instance_id): LOG.debug("Making async call to create a backup for instance: %s" % instance_id) self._cast("create_backup", self.version_cap, backup_info=backup_info, instance_id=instance_id) def delete_backup(self, backup_id): LOG.debug("Making async call to delete backup: %s" % backup_id) self._cast("delete_backup", self.version_cap, backup_id=backup_id) def create_instance(self, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id=None, availability_zone=None, root_password=None, nics=None, overrides=None, slave_of_id=None, cluster_config=None, volume_type=None, modules=None): LOG.debug("Making async call to create instance %s " % instance_id) self._cast("create_instance", self.version_cap, instance_id=instance_id, name=name, flavor=self._transform_obj(flavor), image_id=image_id, databases=databases, users=users, datastore_manager=datastore_manager, packages=packages, volume_size=volume_size, backup_id=backup_id, availability_zone=availability_zone, root_password=root_password, nics=nics, overrides=overrides, slave_of_id=slave_of_id, cluster_config=cluster_config, volume_type=volume_type, modules=modules) def create_cluster(self, cluster_id): LOG.debug("Making async call to create cluster %s " % cluster_id) self._cast("create_cluster", self.version_cap, cluster_id=cluster_id) def grow_cluster(self, cluster_id, new_instance_ids): LOG.debug("Making async call to grow cluster %s " % cluster_id) cctxt = self.client.prepare(version=self.version_cap) cctxt.cast(self.context, "grow_cluster", cluster_id=cluster_id, new_instance_ids=new_instance_ids) def shrink_cluster(self, cluster_id, instance_ids): LOG.debug("Making async call to shrink cluster %s " % cluster_id) cctxt = self.client.prepare(version=self.version_cap) cctxt.cast(self.context, "shrink_cluster", cluster_id=cluster_id, instance_ids=instance_ids) def delete_cluster(self, cluster_id): LOG.debug("Making async call to delete cluster %s " % cluster_id) self._cast("delete_cluster", self.version_cap, cluster_id=cluster_id) def load(context, manager=None): if manager: task_manager_api_class = (strategy.load_taskmanager_strategy(manager) .task_manager_api_class) else: task_manager_api_class = API return task_manager_api_class(context) trove-5.0.0/babel.cfg0000664000567000056710000000002112701410316015526 0ustar jenkinsjenkins00000000000000[python: **.py] trove-5.0.0/.coveragerc0000664000567000056710000000114112701410316016125 0ustar jenkinsjenkins00000000000000# .coveragerc to control coverage.py [run] branch = True source=trove omit=*trove/tests*,*trove/openstack/common* [report] # Regexes for lines to exclude from consideration exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Don't complain about missing debug-only code: def __repr__ if self\.debug # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError # Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: ignore_errors = False [html] directory=cover trove-5.0.0/setup.cfg0000664000567000056710000000423512701410521015632 0ustar jenkinsjenkins00000000000000[metadata] name = trove summary = OpenStack DBaaS description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [files] packages = trove [entry_points] console_scripts = trove-api = trove.cmd.api:main trove-taskmanager = trove.cmd.taskmanager:main trove-mgmt-taskmanager = trove.cmd.taskmanager:mgmt_main trove-conductor = trove.cmd.conductor:main trove-manage = trove.cmd.manage:main trove-guestagent = trove.cmd.guest:main trove-fake-mode = trove.cmd.fakemode:main trove.api.extensions = account = trove.extensions.routes.account:Account mgmt = trove.extensions.routes.mgmt:Mgmt mysql = trove.extensions.routes.mysql:Mysql security_group = trove.extensions.routes.security_group:Security_group trove.guestagent.module.drivers = ping = trove.guestagent.module.drivers.ping_driver:PingDriver oslo.messaging.notify.drivers = trove.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver trove.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver trove.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver trove.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver trove.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver [global] setup-hooks = pbr.hooks.setup_hook [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [compile_catalog] directory = trove/locale domain = trove [update_catalog] domain = trove output_dir = trove/locale input_file = trove/locale/trove.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = trove/locale/trove.pot [wheel] universal = 1 [egg_info] tag_date = 0 tag_svn_revision = 0 tag_build = trove-5.0.0/generate_examples.py0000775000567000056710000000253412701410316020060 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import argparse import os import sys import run_tests def import_tests(): from trove.tests.examples import snippets snippets.monkey_patch_uuid_and_date() if __name__ == "__main__": parser = argparse.ArgumentParser(description='Generate Example Snippets') parser.add_argument('--fix-examples', action='store_true', help='Fix the examples rather than failing tests.') args = parser.parse_args() if args.fix_examples: os.environ['TESTS_FIX_EXAMPLES'] = 'True' # Remove the '--fix-examples' argument from sys.argv as it is not a # valid argument in the run_tests module. sys.argv.pop(sys.argv.index('--fix-examples')) run_tests.main(import_tests) trove-5.0.0/tools/0000775000567000056710000000000012701410521015145 5ustar jenkinsjenkins00000000000000trove-5.0.0/tools/install_venv.py0000775000567000056710000001104212701410316020226 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Trove's development virtualenv """ import os import subprocess import sys ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.venv') PIP_REQUIRES = os.path.join(ROOT, 'requirements.txt') TEST_REQUIRES = os.path.join(ROOT, 'test-requirements.txt') PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) def die(message, *args): print >> sys.stderr, message % args sys.exit(1) def check_python_version(): if sys.version_info < (2, 7): die("Need Python Version >= 2.7") def run_command(cmd, redirect_output=True, check_exit_code=True): """ Runs a command in an out-of-process shell, returning the output of that command. Working directory is ROOT. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip()) HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip()) def check_dependencies(): """Make sure virtualenv is in the path.""" if not HAS_VIRTUALENV: print 'not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: print 'Installing virtualenv via easy_install...', if not (run_command(['which', 'easy_install']) and run_command(['easy_install', 'virtualenv'])): die('ERROR: virtualenv not found.\n\Trove development' ' requires virtualenv, please install it using your' ' favorite package management tool') print 'done.' print 'done.' def create_virtualenv(venv=VENV): """Creates the virtual environment and installs PIP only into the virtual environment """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' print 'Installing pip in virtualenv...', if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): die("Failed to install pip.") print 'done.' def install_dependencies(venv=VENV): print 'Installing dependencies with pip (this can take a while)...' # Install greenlet by hand - just listing it in the requires file does not # get it in stalled in the right order run_command(['tools/with_venv.sh', '-E', venv, 'pip', 'install', 'greenlet'], redirect_output=False) for requires in (PIP_REQUIRES, TEST_REQUIRES): run_command(['tools/with_venv.sh', '-E', venv, 'pip', 'install', '-r', requires], redirect_output=False) # Tell the virtual env how to "import trove" pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "trove.pth") f = open(pthfile, 'w') f.write("%s\n" % ROOT) def print_help(): help = """ Trove development environment setup is complete. Trove development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Trove virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print help def main(argv): check_python_version() check_dependencies() create_virtualenv() install_dependencies() print_help() if __name__ == '__main__': main(sys.argv) trove-5.0.0/tools/with_venv.sh0000775000567000056710000000073612701410316017525 0ustar jenkinsjenkins00000000000000#!/bin/bash set -e me=${0##*/} dir="$(dirname $0)" function print_usage() { cat >&2 <=1.6 SQLAlchemy<1.1.0,>=1.0.10 eventlet!=0.18.3,>=0.18.2 keystonemiddleware!=4.1.0,>=4.0.0 WebOb>=1.2.3 PasteDeploy>=1.5.0 Paste sqlalchemy-migrate>=0.9.6 netaddr!=0.7.16,>=0.7.12 netifaces>=0.10.4 httplib2>=0.7.5 lxml>=2.3 passlib>=1.6 python-heatclient>=0.6.0 python-novaclient!=2.33.0,>=2.29.0 python-cinderclient>=1.3.1 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 python-swiftclient>=2.2.0 python-designateclient>=1.5.0 python-neutronclient!=4.1.0,>=2.6.0 iso8601>=0.1.9 jsonschema!=2.5.0,<3.0.0,>=2.0.0 Jinja2>=2.8 pexpect!=3.3,>=3.1 oslo.config>=3.7.0 oslo.context>=0.2.0 oslo.i18n>=2.1.0 oslo.middleware>=3.0.0 oslo.serialization>=1.10.0 oslo.service>=1.0.0 oslo.utils>=3.5.0 oslo.concurrency>=3.5.0 Babel>=1.3 six>=1.9.0 stevedore>=1.5.0 oslo.messaging>=4.0.0 osprofiler>=1.1.0 oslo.log>=1.14.0 oslo.db>=4.1.0 [:(python_version!='2.7')] Routes!=2.0,>=1.12.3 [:(python_version=='2.7' or python_version=='2.6' or python_version=='3.3')] enum34 [:(python_version=='2.7')] Routes!=2.0,!=2.1,>=1.12.3 MySQL-python trove-5.0.0/trove.egg-info/dependency_links.txt0000664000567000056710000000000112701410517022711 0ustar jenkinsjenkins00000000000000 trove-5.0.0/trove.egg-info/not-zip-safe0000664000567000056710000000000112701410500021061 0ustar jenkinsjenkins00000000000000 trove-5.0.0/trove.egg-info/SOURCES.txt0000664000567000056710000013036212701410521020527 0ustar jenkinsjenkins00000000000000.coveragerc .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog LICENSE MANIFEST.in README.rst babel.cfg doc-test.conf generate_examples.py requirements.txt run_tests.py run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini apidocs/.gitignore apidocs/pom.xml apidocs/replacements.config apidocs/src/cdb-devguide.xml apidocs/src/images/Choose_CS_Image.png apidocs/src/images/Choose_CS_Image_CCP.png apidocs/src/images/Choose_Image_CCP.png apidocs/src/images/Choose_Image_CCP.tiff apidocs/src/images/Cloud_DB_Infographic-1.png apidocs/src/images/Cloud_DB_Infographic-1.svg apidocs/src/images/Create_CS.png apidocs/src/images/phpMyAdmin.png apidocs/src/samples/db-backup-create-incremental-request-json.txt apidocs/src/samples/db-backup-create-incremental-request.json apidocs/src/samples/db-backup-create-incremental-response-json.txt apidocs/src/samples/db-backup-create-incremental-response.json apidocs/src/samples/db-backup-create-request-json.txt apidocs/src/samples/db-backup-create-request.json apidocs/src/samples/db-backup-create-response-json.txt apidocs/src/samples/db-backup-create-response.json apidocs/src/samples/db-backup-delete-request-json.txt apidocs/src/samples/db-backup-delete-response-json.txt apidocs/src/samples/db-backup-get-request-json.txt apidocs/src/samples/db-backup-get-response-json.txt apidocs/src/samples/db-backup-get-response.json apidocs/src/samples/db-backup-list-request-json.txt apidocs/src/samples/db-backup-list-response-json.txt apidocs/src/samples/db-backup-list-response.json apidocs/src/samples/db-backup-restore-request-json.txt apidocs/src/samples/db-backup-restore-request.json apidocs/src/samples/db-backup-restore-response-json.txt apidocs/src/samples/db-backup-restore-response.json apidocs/src/samples/db-backups-by-instance-request-json.txt apidocs/src/samples/db-backups-by-instance-response-json.txt apidocs/src/samples/db-backups-by-instance-response.json apidocs/src/samples/db-change-user-attributes-request-json.txt apidocs/src/samples/db-change-user-attributes-request.json apidocs/src/samples/db-change-user-attributes-response-json.txt apidocs/src/samples/db-change-users-password-request-json.txt apidocs/src/samples/db-change-users-password-request.json apidocs/src/samples/db-change-users-password-response-json.txt apidocs/src/samples/db-check-root-user-request-json.txt apidocs/src/samples/db-check-root-user-response-json.txt apidocs/src/samples/db-check-root-user-response.json apidocs/src/samples/db-configuration-attach-to-instance-request-json.txt apidocs/src/samples/db-configuration-attach-to-instance-request.json apidocs/src/samples/db-configuration-attach-to-instance-response-json.txt apidocs/src/samples/db-configuration-create-request-json.txt apidocs/src/samples/db-configuration-create-request.json apidocs/src/samples/db-configuration-create-response-json.txt apidocs/src/samples/db-configuration-create-response.json apidocs/src/samples/db-configuration-delete-request-json.txt apidocs/src/samples/db-configuration-delete-response-json.txt apidocs/src/samples/db-configuration-detach-from-instance-request-json.txt apidocs/src/samples/db-configuration-detach-from-instance-request.json apidocs/src/samples/db-configuration-detach-from-instance-response-json.txt apidocs/src/samples/db-configuration-details-request-json.txt apidocs/src/samples/db-configuration-details-response-json.txt apidocs/src/samples/db-configuration-details-response.json apidocs/src/samples/db-configuration-edit-parameters-request-json.txt apidocs/src/samples/db-configuration-edit-parameters-request.json apidocs/src/samples/db-configuration-edit-parameters-response-json.txt apidocs/src/samples/db-configuration-list-instances-request-json.txt apidocs/src/samples/db-configuration-list-instances-response-json.txt apidocs/src/samples/db-configuration-list-instances-response.json apidocs/src/samples/db-configuration-list-request-json.txt apidocs/src/samples/db-configuration-list-response-json.txt apidocs/src/samples/db-configuration-list-response.json apidocs/src/samples/db-configuration-parameter-for-datastore-version-request-json.txt apidocs/src/samples/db-configuration-parameter-for-datastore-version-response-json.txt apidocs/src/samples/db-configuration-parameter-for-datastore-version-response.json apidocs/src/samples/db-configuration-parameter-without-datastore-version-request-json.txt apidocs/src/samples/db-configuration-parameter-without-datastore-version-response-json.txt apidocs/src/samples/db-configuration-parameter-without-datastore-version-response.json apidocs/src/samples/db-configuration-parameters-for-datastore-version-request-json.txt apidocs/src/samples/db-configuration-parameters-for-datastore-version-response-json.txt apidocs/src/samples/db-configuration-parameters-for-datastore-version-response.json apidocs/src/samples/db-configuration-parameters-without-datastore-version-request-json.txt apidocs/src/samples/db-configuration-parameters-without-datastore-version-response-json.txt apidocs/src/samples/db-configuration-parameters-without-datastore-version-response.json apidocs/src/samples/db-configuration-update-parameters-request-json.txt apidocs/src/samples/db-configuration-update-parameters-request.json apidocs/src/samples/db-configuration-update-parameters-response-json.txt apidocs/src/samples/db-create-databases-request-json.txt apidocs/src/samples/db-create-databases-request.json apidocs/src/samples/db-create-databases-response-json.txt apidocs/src/samples/db-create-instance-request-json.txt apidocs/src/samples/db-create-instance-request.json apidocs/src/samples/db-create-instance-response-json.txt apidocs/src/samples/db-create-instance-response.json apidocs/src/samples/db-create-users-request-json.txt apidocs/src/samples/db-create-users-request.json apidocs/src/samples/db-create-users-response-json.txt apidocs/src/samples/db-datastore-by-id-request-json.txt apidocs/src/samples/db-datastore-by-id-response-json.txt apidocs/src/samples/db-datastore-by-id-response.json apidocs/src/samples/db-datastore-version-by-id-request-json.txt apidocs/src/samples/db-datastore-version-by-id-response-json.txt apidocs/src/samples/db-datastore-version-by-id-response.json apidocs/src/samples/db-datastore-versions-list-request-json.txt apidocs/src/samples/db-datastore-versions-list-response-json.txt apidocs/src/samples/db-datastore-versions-list-response.json apidocs/src/samples/db-datastores-list-request-json.txt apidocs/src/samples/db-datastores-list-response-json.txt apidocs/src/samples/db-datastores-list-response.json apidocs/src/samples/db-delete-databases-request-json.txt apidocs/src/samples/db-delete-databases-response-json.txt apidocs/src/samples/db-delete-instance-request-json.txt apidocs/src/samples/db-delete-instance-response-json.txt apidocs/src/samples/db-delete-users-request-json.txt apidocs/src/samples/db-delete-users-response-json.txt apidocs/src/samples/db-disable-root-user-request-json.txt apidocs/src/samples/db-disable-root-user-response-json.txt apidocs/src/samples/db-enable-root-user-request-json.txt apidocs/src/samples/db-enable-root-user-response-json.txt apidocs/src/samples/db-enable-root-user-response.json apidocs/src/samples/db-faults-badRequest.json apidocs/src/samples/db-faults-instanceFault.json apidocs/src/samples/db-faults-itemNotFound.json apidocs/src/samples/db-flavors-by-id-request-json.txt apidocs/src/samples/db-flavors-by-id-response-json.txt apidocs/src/samples/db-flavors-by-id-response.json apidocs/src/samples/db-flavors-request-json.txt apidocs/src/samples/db-flavors-response-json.txt apidocs/src/samples/db-flavors-response.json apidocs/src/samples/db-get-default-instance-configuration-request-json.txt apidocs/src/samples/db-get-default-instance-configuration-response-json.txt apidocs/src/samples/db-get-default-instance-configuration-response.json apidocs/src/samples/db-grant-user-access-request-json.txt apidocs/src/samples/db-grant-user-access-request.json apidocs/src/samples/db-grant-user-access-response-json.txt apidocs/src/samples/db-instance-reboot-request-json.txt apidocs/src/samples/db-instance-reboot-request.json apidocs/src/samples/db-instance-reboot-response-json.txt apidocs/src/samples/db-instance-resize-flavor-request-json.txt apidocs/src/samples/db-instance-resize-flavor-request.json apidocs/src/samples/db-instance-resize-flavor-response-json.txt apidocs/src/samples/db-instance-resize-instance-request-json.txt apidocs/src/samples/db-instance-resize-instance-request.json apidocs/src/samples/db-instance-resize-instance-response-json.txt apidocs/src/samples/db-instance-resize-volume-request-json.txt apidocs/src/samples/db-instance-resize-volume-request.json apidocs/src/samples/db-instance-resize-volume-response-json.txt apidocs/src/samples/db-instance-restart-request-json.txt apidocs/src/samples/db-instance-restart-request.json apidocs/src/samples/db-instance-restart-response-json.txt apidocs/src/samples/db-instance-status-detail-request-json.txt apidocs/src/samples/db-instance-status-detail-response-json.txt apidocs/src/samples/db-instance-status-detail-response.json apidocs/src/samples/db-instances-index-pagination-request-json.txt apidocs/src/samples/db-instances-index-pagination-response-json.txt apidocs/src/samples/db-instances-index-pagination-response.json apidocs/src/samples/db-instances-index-request-json.txt apidocs/src/samples/db-instances-index-response-json.txt apidocs/src/samples/db-instances-index-response.json apidocs/src/samples/db-list-databases-pagination-request-json.txt apidocs/src/samples/db-list-databases-pagination-response-json.txt apidocs/src/samples/db-list-databases-pagination-response.json apidocs/src/samples/db-list-databases-request-json.txt apidocs/src/samples/db-list-databases-response-json.txt apidocs/src/samples/db-list-databases-response.json apidocs/src/samples/db-list-user-access-request-json.txt apidocs/src/samples/db-list-user-access-response-json.txt apidocs/src/samples/db-list-user-access-response.json apidocs/src/samples/db-list-user-dbs-request-json.txt apidocs/src/samples/db-list-user-dbs-response-json.txt apidocs/src/samples/db-list-user-dbs-response.json apidocs/src/samples/db-list-users-pagination-request-json.txt apidocs/src/samples/db-list-users-pagination-response-json.txt apidocs/src/samples/db-list-users-pagination-response.json apidocs/src/samples/db-list-users-request-json.txt apidocs/src/samples/db-list-users-response-json.txt apidocs/src/samples/db-list-users-response.json apidocs/src/samples/db-mgmt-get-account-details-request-json.txt apidocs/src/samples/db-mgmt-get-account-details-response-json.txt apidocs/src/samples/db-mgmt-get-account-details-response.json apidocs/src/samples/db-mgmt-get-host-detail-request-json.txt apidocs/src/samples/db-mgmt-get-host-detail-response-json.txt apidocs/src/samples/db-mgmt-get-host-detail-response.json apidocs/src/samples/db-mgmt-get-instance-details-request-json.txt apidocs/src/samples/db-mgmt-get-instance-details-response-json.txt apidocs/src/samples/db-mgmt-get-instance-details-response.json apidocs/src/samples/db-mgmt-get-root-details-request-json.txt apidocs/src/samples/db-mgmt-get-root-details-response-json.txt apidocs/src/samples/db-mgmt-get-root-details-response.json apidocs/src/samples/db-mgmt-get-storage-request-json.txt apidocs/src/samples/db-mgmt-get-storage-response-json.txt apidocs/src/samples/db-mgmt-get-storage-response.json apidocs/src/samples/db-mgmt-instance-diagnostics-request-json.txt apidocs/src/samples/db-mgmt-instance-diagnostics-response-json.txt apidocs/src/samples/db-mgmt-instance-diagnostics-response.json apidocs/src/samples/db-mgmt-instance-index-request-json.txt apidocs/src/samples/db-mgmt-instance-index-response-json.txt apidocs/src/samples/db-mgmt-instance-index-response.json apidocs/src/samples/db-mgmt-list-accounts-request-json.txt apidocs/src/samples/db-mgmt-list-accounts-response-json.txt apidocs/src/samples/db-mgmt-list-accounts-response.json apidocs/src/samples/db-mgmt-list-hosts-request-json.txt apidocs/src/samples/db-mgmt-list-hosts-response-json.txt apidocs/src/samples/db-mgmt-list-hosts-response.json apidocs/src/samples/db-restore-delete-request-json.txt apidocs/src/samples/db-restore-delete-response-json.txt apidocs/src/samples/db-revoke-user-access-request-json.txt apidocs/src/samples/db-revoke-user-access-response-json.txt apidocs/src/samples/db-version-request-json.txt apidocs/src/samples/db-version-response-json.txt apidocs/src/samples/db-version-response.json apidocs/src/samples/db-versions-request-json.txt apidocs/src/samples/db-versions-response-json.txt apidocs/src/samples/db-versions-response.json apidocs/src/xsd/common.ent apidocs/src/xsd/dbaas.wadl apidocs/src/xsd/dbaas.xsd apidocs/src/xslts/js/shjs/sh_java.js apidocs/src/xslts/js/shjs/sh_javascript.js apidocs/src/xslts/js/shjs/sh_main.js apidocs/src/xslts/js/shjs/sh_xml.js apidocs/src/xslts/js/trc/util.js apidocs/src/xslts/js/trc/schema/controller.js apidocs/src/xslts/js/trc/schema/layoutManager.js apidocs/src/xslts/js/trc/schema/sampleManager.js apidocs/src/xslts/style/schema.css apidocs/src/xslts/style/shjs/sh_acid.css apidocs/src/xslts/style/shjs/sh_darkblue.css apidocs/src/xslts/style/shjs/sh_emacs.css apidocs/src/xslts/style/shjs/sh_night.css apidocs/src/xslts/style/shjs/sh_pablo.css apidocs/src/xslts/style/shjs/sh_print.css apidocs/src/xslts/style/shjs/sh_style.css apidocs/src/xslts/style/shjs/sh_whitengrey.css apidocs/src/xslts/xslt/.htaccess apidocs/src/xslts/xslt/schema.xslt contrib/trove-guestagent devstack/README.rst devstack/plugin.sh devstack/settings devstack/files/debs/trove devstack/files/rpms/trove devstack/files/rpms-suse/trove doc/source/conf.py doc/source/index.rst doc/source/dev/building_guest_images.rst doc/source/dev/design.rst doc/source/dev/install.rst doc/source/dev/manual_install.rst doc/source/dev/notifier.rst doc/source/dev/testing.rst doc/source/dev/trove_api_extensions.rst doc/votes/channel_logging etc/tests/core.test.conf etc/tests/localhost.test.conf etc/trove/api-paste.ini etc/trove/api-paste.ini.test etc/trove/trove-conductor.conf.sample etc/trove/trove-guestagent.conf.sample etc/trove/trove-taskmanager.conf.sample etc/trove/trove.conf.sample etc/trove/trove.conf.test etc/trove/cloudinit/README etc/trove/conf.d/README etc/trove/conf.d/guest_info.conf releasenotes/notes/.placeholder releasenotes/notes/add-cors-support-fe3ecbecb68f7efd.yaml releasenotes/notes/cassandra-backup-and-restore-00de234de67ea5ee.yaml releasenotes/notes/cassandra-configuration-groups-e6bcf4014a79f14f.yaml releasenotes/notes/cassandra-user-functions-041abfa4f4baa591.yaml releasenotes/notes/couchdb-backup-restore-0cc3324c3088f947.yaml releasenotes/notes/couchdb-user-db-functions-fa41ac47fce095cb.yaml releasenotes/notes/datastore-manager-refactor-5aeac4e6bfa6e07b.yaml releasenotes/notes/db2-backup-restore-96ab214cddd15181.yaml releasenotes/notes/dbaas-ceilometer-notifications-5a623d0d6520be72.yaml releasenotes/notes/drop-python-26-support-39dff0c5636edc74.yaml releasenotes/notes/fix-apply-configuration-on-prepare-4cff827b7f3c4d33.yaml releasenotes/notes/fix-bad-swift-endpoint-in-guestlog-05f7483509dacbbf.yaml releasenotes/notes/fix-mongo-cluster-grow-8fa4788af0ce5309.yaml releasenotes/notes/fix-trove-events-8ce54233504065cf.yaml releasenotes/notes/implement-cassandra-clustering-9f7bc3ae6817c19e.yaml releasenotes/notes/implement-cassandra-root-b0870d23dbf1a848.yaml releasenotes/notes/implement-mariadb-clustering-088ac2f6012689fb.yaml releasenotes/notes/improve-mysql-user-list-pagination-71457d934500f817.yaml releasenotes/notes/mariadb-gtid-replication-1ea972bcfe909773.yaml releasenotes/notes/module-management-66d3979cc45ed440.yaml releasenotes/notes/mongo-cluster-grow-use-az-and-nic-values-207b041113e7b4fb.yaml releasenotes/notes/mysql-user-list-pagination-9496c401c180f605.yaml releasenotes/notes/percona-2.3-support-2eab8f12167e44bc.yaml releasenotes/notes/pxc-cluster-root-enable-30c366e3b5bcda51.yaml releasenotes/notes/pxc-grow-shrink-0b1ee689cbc77743.yaml releasenotes/notes/secure-mongodb-instances-1e6d7df3febab8f4.yaml releasenotes/notes/use-osprofiler-options-58263c311617b127.yaml releasenotes/notes/vertica-configuration-groups-710c892c1e3d6a90.yaml releasenotes/notes/vertica-grow-shrink-cluster-e32d48f5b2e1bfab.yaml releasenotes/notes/vertica-load-via-curl-call-4d47c4e0b1b53471.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/install_venv.py tools/start-fake-mode.sh tools/stop-fake-mode.sh tools/with_venv.sh trove/README trove/__init__.py trove/rpc.py trove/version.py trove/versions.py trove.egg-info/PKG-INFO trove.egg-info/SOURCES.txt trove.egg-info/dependency_links.txt trove.egg-info/entry_points.txt trove.egg-info/not-zip-safe trove.egg-info/pbr.json trove.egg-info/requires.txt trove.egg-info/top_level.txt trove/backup/__init__.py trove/backup/models.py trove/backup/service.py trove/backup/state.py trove/backup/views.py trove/cluster/__init__.py trove/cluster/models.py trove/cluster/service.py trove/cluster/tasks.py trove/cluster/views.py trove/cmd/__init__.py trove/cmd/api.py trove/cmd/common.py trove/cmd/conductor.py trove/cmd/fakemode.py trove/cmd/guest.py trove/cmd/manage.py trove/cmd/taskmanager.py trove/common/__init__.py trove/common/api.py trove/common/apischema.py trove/common/auth.py trove/common/base_exception.py trove/common/base_wsgi.py trove/common/cfg.py trove/common/configurations.py trove/common/context.py trove/common/crypto_utils.py trove/common/debug_utils.py trove/common/exception.py trove/common/extensions.py trove/common/i18n.py trove/common/instance.py trove/common/limits.py trove/common/local.py trove/common/models.py trove/common/notification.py trove/common/pagination.py trove/common/pastedeploy.py trove/common/profile.py trove/common/remote.py trove/common/serializable_notification.py trove/common/single_tenant_remote.py trove/common/stream_codecs.py trove/common/template.py trove/common/utils.py trove/common/views.py trove/common/wsgi.py trove/common/xmlutils.py trove/common/rpc/__init__.py trove/common/rpc/service.py trove/common/rpc/version.py trove/common/schemas/atom-link.rng trove/common/schemas/atom.rng trove/common/schemas/v1.1/limits.rng trove/common/strategies/__init__.py trove/common/strategies/strategy.py trove/common/strategies/cluster/__init__.py trove/common/strategies/cluster/base.py trove/common/strategies/cluster/strategy.py trove/common/strategies/cluster/experimental/__init__.py trove/common/strategies/cluster/experimental/cassandra/__init__.py trove/common/strategies/cluster/experimental/cassandra/api.py trove/common/strategies/cluster/experimental/cassandra/guestagent.py trove/common/strategies/cluster/experimental/cassandra/taskmanager.py trove/common/strategies/cluster/experimental/galera_common/__init__.py trove/common/strategies/cluster/experimental/galera_common/api.py trove/common/strategies/cluster/experimental/galera_common/guestagent.py trove/common/strategies/cluster/experimental/galera_common/taskmanager.py trove/common/strategies/cluster/experimental/mongodb/__init__.py trove/common/strategies/cluster/experimental/mongodb/api.py trove/common/strategies/cluster/experimental/mongodb/guestagent.py trove/common/strategies/cluster/experimental/mongodb/taskmanager.py trove/common/strategies/cluster/experimental/redis/__init__.py trove/common/strategies/cluster/experimental/redis/api.py trove/common/strategies/cluster/experimental/redis/guestagent.py trove/common/strategies/cluster/experimental/redis/taskmanager.py trove/common/strategies/cluster/experimental/vertica/__init__.py trove/common/strategies/cluster/experimental/vertica/api.py trove/common/strategies/cluster/experimental/vertica/guestagent.py trove/common/strategies/cluster/experimental/vertica/taskmanager.py trove/common/strategies/storage/__init__.py trove/common/strategies/storage/base.py trove/common/strategies/storage/swift.py trove/common/strategies/storage/experimental/__init__.py trove/conductor/__init__.py trove/conductor/api.py trove/conductor/manager.py trove/conductor/models.py trove/configuration/__init__.py trove/configuration/models.py trove/configuration/service.py trove/configuration/views.py trove/datastore/__init__.py trove/datastore/models.py trove/datastore/service.py trove/datastore/views.py trove/db/__init__.py trove/db/models.py trove/db/sqlalchemy/__init__.py trove/db/sqlalchemy/api.py trove/db/sqlalchemy/mappers.py trove/db/sqlalchemy/migration.py trove/db/sqlalchemy/session.py trove/db/sqlalchemy/utils.py trove/db/sqlalchemy/migrate_repo/README trove/db/sqlalchemy/migrate_repo/__init__.py trove/db/sqlalchemy/migrate_repo/manage.py trove/db/sqlalchemy/migrate_repo/migrate.cfg trove/db/sqlalchemy/migrate_repo/schema.py trove/db/sqlalchemy/migrate_repo/versions/001_base_schema.py trove/db/sqlalchemy/migrate_repo/versions/002_service_images.py trove/db/sqlalchemy/migrate_repo/versions/003_service_statuses.py trove/db/sqlalchemy/migrate_repo/versions/004_root_enabled.py trove/db/sqlalchemy/migrate_repo/versions/005_heartbeat.py trove/db/sqlalchemy/migrate_repo/versions/006_dns_records.py trove/db/sqlalchemy/migrate_repo/versions/007_add_volume_flavor.py trove/db/sqlalchemy/migrate_repo/versions/008_add_instance_fields.py trove/db/sqlalchemy/migrate_repo/versions/009_add_deleted_flag_to_instances.py trove/db/sqlalchemy/migrate_repo/versions/010_add_usage.py trove/db/sqlalchemy/migrate_repo/versions/011_quota.py trove/db/sqlalchemy/migrate_repo/versions/012_backup.py trove/db/sqlalchemy/migrate_repo/versions/013_add_security_group_artifacts.py trove/db/sqlalchemy/migrate_repo/versions/014_update_instance_flavor_id.py trove/db/sqlalchemy/migrate_repo/versions/015_add_service_type.py trove/db/sqlalchemy/migrate_repo/versions/016_add_datastore_type.py trove/db/sqlalchemy/migrate_repo/versions/017_update_datastores.py trove/db/sqlalchemy/migrate_repo/versions/018_datastore_versions_fix.py trove/db/sqlalchemy/migrate_repo/versions/019_datastore_fix.py trove/db/sqlalchemy/migrate_repo/versions/020_configurations.py trove/db/sqlalchemy/migrate_repo/versions/021_conductor_last_seen.py trove/db/sqlalchemy/migrate_repo/versions/022_add_backup_parent_id.py trove/db/sqlalchemy/migrate_repo/versions/023_add_instance_indexes.py trove/db/sqlalchemy/migrate_repo/versions/024_add_backup_indexes.py trove/db/sqlalchemy/migrate_repo/versions/025_add_service_statuses_indexes.py trove/db/sqlalchemy/migrate_repo/versions/026_datastore_versions_unique_fix.py trove/db/sqlalchemy/migrate_repo/versions/027_add_datastore_capabilities.py trove/db/sqlalchemy/migrate_repo/versions/028_recreate_agent_heartbeat.py trove/db/sqlalchemy/migrate_repo/versions/029_add_backup_datastore.py trove/db/sqlalchemy/migrate_repo/versions/030_add_master_slave.py trove/db/sqlalchemy/migrate_repo/versions/031_add_timestamps_to_configurations.py trove/db/sqlalchemy/migrate_repo/versions/032_clusters.py trove/db/sqlalchemy/migrate_repo/versions/033_datastore_parameters.py trove/db/sqlalchemy/migrate_repo/versions/034_change_task_description.py trove/db/sqlalchemy/migrate_repo/versions/035_flavor_id_int_to_string.py trove/db/sqlalchemy/migrate_repo/versions/036_add_datastore_version_metadata.py trove/db/sqlalchemy/migrate_repo/versions/037_modules.py trove/db/sqlalchemy/migrate_repo/versions/__init__.py trove/dns/__init__.py trove/dns/driver.py trove/dns/manager.py trove/dns/models.py trove/dns/designate/__init__.py trove/dns/designate/driver.py trove/extensions/__init__.py trove/extensions/account/__init__.py trove/extensions/account/models.py trove/extensions/account/service.py trove/extensions/account/views.py trove/extensions/cassandra/__init__.py trove/extensions/cassandra/service.py trove/extensions/common/__init__.py trove/extensions/common/models.py trove/extensions/common/service.py trove/extensions/common/views.py trove/extensions/mgmt/__init__.py trove/extensions/mgmt/clusters/__init__.py trove/extensions/mgmt/clusters/models.py trove/extensions/mgmt/clusters/service.py trove/extensions/mgmt/clusters/views.py trove/extensions/mgmt/configuration/__init__.py trove/extensions/mgmt/configuration/service.py trove/extensions/mgmt/configuration/views.py trove/extensions/mgmt/datastores/__init__.py trove/extensions/mgmt/datastores/service.py trove/extensions/mgmt/datastores/views.py trove/extensions/mgmt/host/__init__.py trove/extensions/mgmt/host/models.py trove/extensions/mgmt/host/service.py trove/extensions/mgmt/host/views.py trove/extensions/mgmt/host/instance/__init__.py trove/extensions/mgmt/host/instance/service.py trove/extensions/mgmt/instances/__init__.py trove/extensions/mgmt/instances/models.py trove/extensions/mgmt/instances/service.py trove/extensions/mgmt/instances/views.py trove/extensions/mgmt/quota/__init__.py trove/extensions/mgmt/quota/service.py trove/extensions/mgmt/quota/views.py trove/extensions/mgmt/upgrade/__init__.py trove/extensions/mgmt/upgrade/models.py trove/extensions/mgmt/upgrade/service.py trove/extensions/mgmt/volume/__init__.py trove/extensions/mgmt/volume/models.py trove/extensions/mgmt/volume/service.py trove/extensions/mgmt/volume/views.py trove/extensions/mysql/__init__.py trove/extensions/mysql/common.py trove/extensions/mysql/models.py trove/extensions/mysql/service.py trove/extensions/mysql/views.py trove/extensions/pxc/__init__.py trove/extensions/pxc/service.py trove/extensions/routes/__init__.py trove/extensions/routes/account.py trove/extensions/routes/mgmt.py trove/extensions/routes/mysql.py trove/extensions/routes/security_group.py trove/extensions/security_group/__init__.py trove/extensions/security_group/models.py trove/extensions/security_group/service.py trove/extensions/security_group/views.py trove/extensions/vertica/__init__.py trove/extensions/vertica/service.py trove/flavor/__init__.py trove/flavor/models.py trove/flavor/service.py trove/flavor/views.py trove/guestagent/__init__.py trove/guestagent/api.py trove/guestagent/dbaas.py trove/guestagent/guest_log.py trove/guestagent/models.py trove/guestagent/pkg.py trove/guestagent/service.py trove/guestagent/volume.py trove/guestagent/backup/__init__.py trove/guestagent/backup/backupagent.py trove/guestagent/common/__init__.py trove/guestagent/common/configuration.py trove/guestagent/common/guestagent_utils.py trove/guestagent/common/operating_system.py trove/guestagent/common/sql_query.py trove/guestagent/common/timeutils.py trove/guestagent/datastore/__init__.py trove/guestagent/datastore/manager.py trove/guestagent/datastore/service.py trove/guestagent/datastore/experimental/__init__.py trove/guestagent/datastore/experimental/cassandra/__init__.py trove/guestagent/datastore/experimental/cassandra/manager.py trove/guestagent/datastore/experimental/cassandra/service.py trove/guestagent/datastore/experimental/couchbase/__init__.py trove/guestagent/datastore/experimental/couchbase/manager.py trove/guestagent/datastore/experimental/couchbase/service.py trove/guestagent/datastore/experimental/couchbase/system.py trove/guestagent/datastore/experimental/couchdb/__init__.py trove/guestagent/datastore/experimental/couchdb/manager.py trove/guestagent/datastore/experimental/couchdb/service.py trove/guestagent/datastore/experimental/couchdb/system.py trove/guestagent/datastore/experimental/db2/__init__.py trove/guestagent/datastore/experimental/db2/manager.py trove/guestagent/datastore/experimental/db2/service.py trove/guestagent/datastore/experimental/db2/system.py trove/guestagent/datastore/experimental/mariadb/__init__.py trove/guestagent/datastore/experimental/mariadb/manager.py trove/guestagent/datastore/experimental/mariadb/service.py trove/guestagent/datastore/experimental/mongodb/__init__.py trove/guestagent/datastore/experimental/mongodb/manager.py trove/guestagent/datastore/experimental/mongodb/service.py trove/guestagent/datastore/experimental/mongodb/system.py trove/guestagent/datastore/experimental/percona/__init__.py trove/guestagent/datastore/experimental/percona/manager.py trove/guestagent/datastore/experimental/percona/service.py trove/guestagent/datastore/experimental/postgresql/__init__.py trove/guestagent/datastore/experimental/postgresql/manager.py trove/guestagent/datastore/experimental/postgresql/pgutil.py trove/guestagent/datastore/experimental/postgresql/service/__init__.py trove/guestagent/datastore/experimental/postgresql/service/access.py trove/guestagent/datastore/experimental/postgresql/service/config.py trove/guestagent/datastore/experimental/postgresql/service/database.py trove/guestagent/datastore/experimental/postgresql/service/install.py trove/guestagent/datastore/experimental/postgresql/service/process.py trove/guestagent/datastore/experimental/postgresql/service/root.py trove/guestagent/datastore/experimental/postgresql/service/status.py trove/guestagent/datastore/experimental/postgresql/service/users.py trove/guestagent/datastore/experimental/pxc/__init__.py trove/guestagent/datastore/experimental/pxc/manager.py trove/guestagent/datastore/experimental/pxc/service.py trove/guestagent/datastore/experimental/redis/__init__.py trove/guestagent/datastore/experimental/redis/manager.py trove/guestagent/datastore/experimental/redis/service.py trove/guestagent/datastore/experimental/redis/system.py trove/guestagent/datastore/experimental/vertica/__init__.py trove/guestagent/datastore/experimental/vertica/manager.py trove/guestagent/datastore/experimental/vertica/service.py trove/guestagent/datastore/experimental/vertica/system.py trove/guestagent/datastore/galera_common/__init__.py trove/guestagent/datastore/galera_common/manager.py trove/guestagent/datastore/galera_common/service.py trove/guestagent/datastore/mysql/__init__.py trove/guestagent/datastore/mysql/manager.py trove/guestagent/datastore/mysql/service.py trove/guestagent/datastore/mysql_common/__init__.py trove/guestagent/datastore/mysql_common/manager.py trove/guestagent/datastore/mysql_common/service.py trove/guestagent/datastore/technical-preview/__init__.py trove/guestagent/db/__init__.py trove/guestagent/db/models.py trove/guestagent/module/__init__.py trove/guestagent/module/driver_manager.py trove/guestagent/module/module_manager.py trove/guestagent/module/drivers/__init__.py trove/guestagent/module/drivers/module_driver.py trove/guestagent/module/drivers/ping_driver.py trove/guestagent/strategies/__init__.py trove/guestagent/strategies/backup/__init__.py trove/guestagent/strategies/backup/base.py trove/guestagent/strategies/backup/mysql_impl.py trove/guestagent/strategies/backup/experimental/__init__.py trove/guestagent/strategies/backup/experimental/cassandra_impl.py trove/guestagent/strategies/backup/experimental/couchbase_impl.py trove/guestagent/strategies/backup/experimental/couchdb_impl.py trove/guestagent/strategies/backup/experimental/db2_impl.py trove/guestagent/strategies/backup/experimental/mongo_impl.py trove/guestagent/strategies/backup/experimental/postgresql_impl.py trove/guestagent/strategies/backup/experimental/redis_impl.py trove/guestagent/strategies/replication/__init__.py trove/guestagent/strategies/replication/base.py trove/guestagent/strategies/replication/mysql_base.py trove/guestagent/strategies/replication/mysql_binlog.py trove/guestagent/strategies/replication/mysql_gtid.py trove/guestagent/strategies/replication/experimental/__init__.py trove/guestagent/strategies/replication/experimental/mariadb_gtid.py trove/guestagent/strategies/replication/experimental/redis_sync.py trove/guestagent/strategies/restore/__init__.py trove/guestagent/strategies/restore/base.py trove/guestagent/strategies/restore/mysql_impl.py trove/guestagent/strategies/restore/experimental/__init__.py trove/guestagent/strategies/restore/experimental/cassandra_impl.py trove/guestagent/strategies/restore/experimental/couchbase_impl.py trove/guestagent/strategies/restore/experimental/couchdb_impl.py trove/guestagent/strategies/restore/experimental/db2_impl.py trove/guestagent/strategies/restore/experimental/mongo_impl.py trove/guestagent/strategies/restore/experimental/postgresql_impl.py trove/guestagent/strategies/restore/experimental/redis_impl.py trove/instance/__init__.py trove/instance/models.py trove/instance/service.py trove/instance/tasks.py trove/instance/views.py trove/limits/__init__.py trove/limits/service.py trove/limits/views.py trove/locale/trove-log-error.pot trove/locale/trove-log-info.pot trove/locale/trove-log-warning.pot trove/locale/trove.pot trove/locale/fr/LC_MESSAGES/trove-log-error.po trove/locale/fr/LC_MESSAGES/trove-log-info.po trove/locale/fr/LC_MESSAGES/trove-log-warning.po trove/module/__init__.py trove/module/models.py trove/module/service.py trove/module/views.py trove/network/__init__.py trove/network/base.py trove/network/neutron.py trove/network/nova.py trove/quota/__init__.py trove/quota/models.py trove/quota/quota.py trove/taskmanager/__init__.py trove/taskmanager/api.py trove/taskmanager/manager.py trove/taskmanager/models.py trove/taskmanager/service.py trove/templates/default.heat.template trove/templates/cassandra/config.template trove/templates/cassandra/override.config.template trove/templates/cassandra/validation-rules.json trove/templates/couchbase/config.template trove/templates/couchbase/override.config.template trove/templates/couchdb/config.template trove/templates/couchdb/override.config.template trove/templates/db2/config.template trove/templates/db2/override.config.template trove/templates/mariadb/cluster.config.template trove/templates/mariadb/config.template trove/templates/mariadb/override.config.template trove/templates/mariadb/replica.config.template trove/templates/mariadb/replica_source.config.template trove/templates/mongodb/config.template trove/templates/mongodb/override.config.template trove/templates/mongodb/validation-rules.json trove/templates/mysql/config.template trove/templates/mysql/override.config.template trove/templates/mysql/replica.config.template trove/templates/mysql/replica_source.config.template trove/templates/mysql/validation-rules.json trove/templates/mysql/5.5/replica.config.template trove/templates/mysql/5.5/replica_source.config.template trove/templates/mysql/mysql-test/config.template trove/templates/percona/config.template trove/templates/percona/override.config.template trove/templates/percona/replica.config.template trove/templates/percona/replica_source.config.template trove/templates/percona/validation-rules.json trove/templates/percona/5.5/replica.config.template trove/templates/percona/5.5/replica_source.config.template trove/templates/postgresql/config.template trove/templates/postgresql/override.config.template trove/templates/postgresql/validation-rules.json trove/templates/pxc/cluster.config.template trove/templates/pxc/config.template trove/templates/pxc/override.config.template trove/templates/pxc/replica.config.template trove/templates/pxc/replica_source.config.template trove/templates/pxc/validation-rules.json trove/templates/pxc/5.5/replica.config.template trove/templates/pxc/5.5/replica_source.config.template trove/templates/redis/config.template trove/templates/redis/override.config.template trove/templates/redis/replica.config.template trove/templates/redis/replica_source.config.template trove/templates/redis/validation-rules.json trove/templates/vertica/config.template trove/templates/vertica/override.config.template trove/templates/vertica/validation-rules.json trove/tests/__init__.py trove/tests/config.py trove/tests/int_tests.py trove/tests/root_logger.py trove/tests/api/__init__.py trove/tests/api/backups.py trove/tests/api/configurations.py trove/tests/api/databases.py trove/tests/api/datastores.py trove/tests/api/flavors.py trove/tests/api/header.py trove/tests/api/instances.py trove/tests/api/instances_actions.py trove/tests/api/instances_delete.py trove/tests/api/instances_mysql_down.py trove/tests/api/instances_resize.py trove/tests/api/limits.py trove/tests/api/replication.py trove/tests/api/root.py trove/tests/api/root_on_create.py trove/tests/api/user_access.py trove/tests/api/users.py trove/tests/api/versions.py trove/tests/api/mgmt/__init__.py trove/tests/api/mgmt/accounts.py trove/tests/api/mgmt/admin_required.py trove/tests/api/mgmt/configurations.py trove/tests/api/mgmt/datastore_versions.py trove/tests/api/mgmt/hosts.py trove/tests/api/mgmt/instances.py trove/tests/api/mgmt/instances_actions.py trove/tests/api/mgmt/malformed_json.py trove/tests/api/mgmt/quotas.py trove/tests/api/mgmt/storage.py trove/tests/db/__init__.py trove/tests/db/migrations.py trove/tests/examples/__init__.py trove/tests/examples/client.py trove/tests/examples/snippets.py trove/tests/fakes/__init__.py trove/tests/fakes/common.py trove/tests/fakes/conf.py trove/tests/fakes/dns.py trove/tests/fakes/guestagent.py trove/tests/fakes/keystone.py trove/tests/fakes/limits.py trove/tests/fakes/nova.py trove/tests/fakes/swift.py trove/tests/fakes/taskmanager.py trove/tests/scenario/__init__.py trove/tests/scenario/groups/__init__.py trove/tests/scenario/groups/backup_group.py trove/tests/scenario/groups/cluster_actions_group.py trove/tests/scenario/groups/configuration_group.py trove/tests/scenario/groups/database_actions_group.py trove/tests/scenario/groups/guest_log_group.py trove/tests/scenario/groups/instance_actions_group.py trove/tests/scenario/groups/instance_create_group.py trove/tests/scenario/groups/instance_delete_group.py trove/tests/scenario/groups/module_group.py trove/tests/scenario/groups/negative_cluster_actions_group.py trove/tests/scenario/groups/replication_group.py trove/tests/scenario/groups/root_actions_group.py trove/tests/scenario/groups/test_group.py trove/tests/scenario/groups/user_actions_group.py trove/tests/scenario/helpers/__init__.py trove/tests/scenario/helpers/cassandra_helper.py trove/tests/scenario/helpers/couchdb_helper.py trove/tests/scenario/helpers/mariadb_helper.py trove/tests/scenario/helpers/mongodb_helper.py trove/tests/scenario/helpers/mysql_helper.py trove/tests/scenario/helpers/percona_helper.py trove/tests/scenario/helpers/postgresql_helper.py trove/tests/scenario/helpers/pxc_helper.py trove/tests/scenario/helpers/redis_helper.py trove/tests/scenario/helpers/sql_helper.py trove/tests/scenario/helpers/test_helper.py trove/tests/scenario/helpers/vertica_helper.py trove/tests/scenario/runners/__init__.py trove/tests/scenario/runners/backup_runners.py trove/tests/scenario/runners/cluster_actions_runners.py trove/tests/scenario/runners/configuration_runners.py trove/tests/scenario/runners/database_actions_runners.py trove/tests/scenario/runners/guest_log_runners.py trove/tests/scenario/runners/instance_actions_runners.py trove/tests/scenario/runners/instance_create_runners.py trove/tests/scenario/runners/instance_delete_runners.py trove/tests/scenario/runners/module_runners.py trove/tests/scenario/runners/negative_cluster_actions_runners.py trove/tests/scenario/runners/replication_runners.py trove/tests/scenario/runners/root_actions_runners.py trove/tests/scenario/runners/test_runners.py trove/tests/scenario/runners/user_actions_runners.py trove/tests/unittests/__init__.py trove/tests/unittests/trove_testtools.py trove/tests/unittests/api/__init__.py trove/tests/unittests/api/test_versions.py trove/tests/unittests/api/common/__init__.py trove/tests/unittests/api/common/test_extensions.py trove/tests/unittests/api/common/test_limits.py trove/tests/unittests/backup/__init__.py trove/tests/unittests/backup/test_backup_controller.py trove/tests/unittests/backup/test_backup_models.py trove/tests/unittests/backup/test_backupagent.py trove/tests/unittests/backup/test_storage.py trove/tests/unittests/cluster/__init__.py trove/tests/unittests/cluster/test_cassandra_cluster.py trove/tests/unittests/cluster/test_cluster.py trove/tests/unittests/cluster/test_cluster_controller.py trove/tests/unittests/cluster/test_cluster_models.py trove/tests/unittests/cluster/test_cluster_pxc_controller.py trove/tests/unittests/cluster/test_cluster_redis_controller.py trove/tests/unittests/cluster/test_cluster_vertica_controller.py trove/tests/unittests/cluster/test_cluster_views.py trove/tests/unittests/cluster/test_galera_cluster.py trove/tests/unittests/cluster/test_mongodb_cluster.py trove/tests/unittests/cluster/test_redis_cluster.py trove/tests/unittests/cluster/test_vertica_cluster.py trove/tests/unittests/common/__init__.py trove/tests/unittests/common/test_common_extensions.py trove/tests/unittests/common/test_context.py trove/tests/unittests/common/test_crypto_utils.py trove/tests/unittests/common/test_exception.py trove/tests/unittests/common/test_notification.py trove/tests/unittests/common/test_pagination.py trove/tests/unittests/common/test_remote.py trove/tests/unittests/common/test_stream_codecs.py trove/tests/unittests/common/test_template.py trove/tests/unittests/common/test_utils.py trove/tests/unittests/common/test_wsgi.py trove/tests/unittests/conductor/__init__.py trove/tests/unittests/conductor/test_conf.py trove/tests/unittests/conductor/test_methods.py trove/tests/unittests/configuration/__init__.py trove/tests/unittests/configuration/test_configuration_controller.py trove/tests/unittests/datastore/__init__.py trove/tests/unittests/datastore/base.py trove/tests/unittests/datastore/test_capability.py trove/tests/unittests/datastore/test_datastore.py trove/tests/unittests/datastore/test_datastore_version_metadata.py trove/tests/unittests/datastore/test_datastore_versions.py trove/tests/unittests/db/__init__.py trove/tests/unittests/db/test_migration_utils.py trove/tests/unittests/dns/__init__.py trove/tests/unittests/dns/test_designate_driver.py trove/tests/unittests/guestagent/__init__.py trove/tests/unittests/guestagent/test_agent_heartbeats_models.py trove/tests/unittests/guestagent/test_api.py trove/tests/unittests/guestagent/test_backups.py trove/tests/unittests/guestagent/test_cassandra_manager.py trove/tests/unittests/guestagent/test_configuration.py trove/tests/unittests/guestagent/test_couchbase_manager.py trove/tests/unittests/guestagent/test_couchdb_manager.py trove/tests/unittests/guestagent/test_db2_manager.py trove/tests/unittests/guestagent/test_dbaas.py trove/tests/unittests/guestagent/test_dbmodels.py trove/tests/unittests/guestagent/test_galera_cluster_api.py trove/tests/unittests/guestagent/test_galera_manager.py trove/tests/unittests/guestagent/test_guestagent_utils.py trove/tests/unittests/guestagent/test_manager.py trove/tests/unittests/guestagent/test_mariadb_manager.py trove/tests/unittests/guestagent/test_models.py trove/tests/unittests/guestagent/test_mongodb_cluster_manager.py trove/tests/unittests/guestagent/test_mongodb_manager.py trove/tests/unittests/guestagent/test_mysql_manager.py trove/tests/unittests/guestagent/test_operating_system.py trove/tests/unittests/guestagent/test_pkg.py trove/tests/unittests/guestagent/test_query.py trove/tests/unittests/guestagent/test_redis_manager.py trove/tests/unittests/guestagent/test_service.py trove/tests/unittests/guestagent/test_vertica_api.py trove/tests/unittests/guestagent/test_vertica_manager.py trove/tests/unittests/guestagent/test_volume.py trove/tests/unittests/instance/__init__.py trove/tests/unittests/instance/test_instance_controller.py trove/tests/unittests/instance/test_instance_models.py trove/tests/unittests/instance/test_instance_status.py trove/tests/unittests/instance/test_instance_views.py trove/tests/unittests/mgmt/__init__.py trove/tests/unittests/mgmt/test_clusters.py trove/tests/unittests/mgmt/test_datastore_controller.py trove/tests/unittests/mgmt/test_datastores.py trove/tests/unittests/mgmt/test_models.py trove/tests/unittests/module/__init__.py trove/tests/unittests/module/test_module_controller.py trove/tests/unittests/module/test_module_models.py trove/tests/unittests/module/test_module_views.py trove/tests/unittests/mysql/__init__.py trove/tests/unittests/mysql/test_common.py trove/tests/unittests/mysql/test_user_controller.py trove/tests/unittests/network/__init__.py trove/tests/unittests/network/test_neutron_driver.py trove/tests/unittests/quota/__init__.py trove/tests/unittests/quota/test_quota.py trove/tests/unittests/router/__init__.py trove/tests/unittests/router/test_router.py trove/tests/unittests/secgroups/__init__.py trove/tests/unittests/secgroups/test_security_group.py trove/tests/unittests/taskmanager/__init__.py trove/tests/unittests/taskmanager/test_api.py trove/tests/unittests/taskmanager/test_clusters.py trove/tests/unittests/taskmanager/test_galera_clusters.py trove/tests/unittests/taskmanager/test_manager.py trove/tests/unittests/taskmanager/test_models.py trove/tests/unittests/taskmanager/test_vertica_clusters.py trove/tests/unittests/upgrade/__init__.py trove/tests/unittests/upgrade/test_controller.py trove/tests/unittests/upgrade/test_models.py trove/tests/unittests/util/__init__.py trove/tests/unittests/util/matchers.py trove/tests/unittests/util/util.py trove/tests/util/__init__.py trove/tests/util/check.py trove/tests/util/client.py trove/tests/util/event_simulator.py trove/tests/util/mysql.py trove/tests/util/server_connection.py trove/tests/util/usage.py trove/tests/util/users.pytrove-5.0.0/trove.egg-info/PKG-INFO0000664000567000056710000000400212701410517017734 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: trove Version: 5.0.0 Summary: OpenStack DBaaS Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: Trove -------- Trove is Database as a Service for OpenStack. ============================= Usage for integration testing ============================= If you'd like to start up a fake Trove API daemon for integration testing with your own tool, run: .. code-block:: bash $ ./tools/start-fake-mode.sh Stop the server with: .. code-block:: bash $ ./tools/stop-fake-mode.sh ====== Tests ====== To run all tests and PEP8, run tox, like so: .. code-block:: bash $ tox To run just the tests for Python 2.7, run: .. code-block:: bash $ tox -epy27 To run just PEP8, run: .. code-block:: bash $ tox -epep8 To generate a coverage report,run: .. code-block:: bash $ tox -ecover (note: on some boxes, the results may not be accurate unless you run it twice) If you want to run only the tests in one file you can use testtools e.g. .. code-block:: bash $ python -m testtools.run trove.tests.unittests.python.module.path Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 trove-5.0.0/trove.egg-info/top_level.txt0000664000567000056710000000000612701410517021371 0ustar jenkinsjenkins00000000000000trove trove-5.0.0/trove.egg-info/pbr.json0000664000567000056710000000005612701410517020322 0ustar jenkinsjenkins00000000000000{"git_version": "48a4004", "is_release": true}trove-5.0.0/trove.egg-info/entry_points.txt0000664000567000056710000000216712701410517022147 0ustar jenkinsjenkins00000000000000[console_scripts] trove-api = trove.cmd.api:main trove-conductor = trove.cmd.conductor:main trove-fake-mode = trove.cmd.fakemode:main trove-guestagent = trove.cmd.guest:main trove-manage = trove.cmd.manage:main trove-mgmt-taskmanager = trove.cmd.taskmanager:mgmt_main trove-taskmanager = trove.cmd.taskmanager:main [oslo.messaging.notify.drivers] trove.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver trove.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver trove.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver trove.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver trove.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver [trove.api.extensions] account = trove.extensions.routes.account:Account mgmt = trove.extensions.routes.mgmt:Mgmt mysql = trove.extensions.routes.mysql:Mysql security_group = trove.extensions.routes.security_group:Security_group [trove.guestagent.module.drivers] ping = trove.guestagent.module.drivers.ping_driver:PingDriver trove-5.0.0/doc/0000775000567000056710000000000012701410521014552 5ustar jenkinsjenkins00000000000000trove-5.0.0/doc/source/0000775000567000056710000000000012701410521016052 5ustar jenkinsjenkins00000000000000trove-5.0.0/doc/source/index.rst0000664000567000056710000000475512701410316017730 0ustar jenkinsjenkins00000000000000=========================================== Welcome to Trove's developer documentation! =========================================== Introduction ============ Trove is Database as a Service for OpenStack. It's designed to run entirely on OpenStack, with the goal of allowing users to quickly and easily utilize the features of a relational database without the burden of handling complex administrative tasks. Cloud users and database administrators can provision and manage multiple database instances as needed. Initially, the service will focus on providing resource isolation at high performance while automating complex administrative tasks including deployment, configuration, patching, backups, restores, and monitoring. For an in-depth look at the project's design and structure, see the :doc:`dev/design` page. Installation And Deployment =========================== Trove is constantly under development. The easiest way to install Trove is using the Trove integration scripts that can be found in git in the `Trove Integration`_ Repository. For further details on how to install Trove using the integration scripts please refer to the :doc:`dev/install` page. For further details on how to install Trove to work with existing OpenStack environment please refer to the :doc:`dev/manual_install` page. Developer Resources =================== For those wishing to develop Trove itself, or to extend Trove's functionality, the following resources are provided. .. toctree:: :maxdepth: 1 dev/design dev/testing dev/install dev/manual_install.rst dev/building_guest_images.rst dev/notifier.rst dev/trove_api_extensions.rst * Source Code Repositories - `Trove`_ - `Trove Integration`_ - `Trove Client`_ * `Trove Wiki`_ on OpenStack * `Trove API Documentation`_ on docs.openstack.org Guest Images ============ In order to use Trove, you need to have Guest Images for each datastore and version. These images are loaded into Glance and registered with Trove. For those wishing to develop guest images, please refer to the :doc:`dev/building_guest_images.rst` page. Search Trove Documentation ========================== * :ref:`search` .. _Trove Wiki: https://wiki.openstack.org/wiki/Trove .. _Trove: https://git.openstack.org/cgit/openstack/trove .. _Trove Integration: https://git.openstack.org/cgit/openstack/trove-integration .. _Trove Client: https://git.openstack.org/cgit/openstack/python-troveclient .. _Trove API Documentation: http://developer.openstack.org/api-ref-databases-v1.html trove-5.0.0/doc/source/dev/0000775000567000056710000000000012701410521016630 5ustar jenkinsjenkins00000000000000trove-5.0.0/doc/source/dev/notifier.rst0000664000567000056710000000022012701410316021175 0ustar jenkinsjenkins00000000000000========================== Available Notifier Drivers ========================== .. list-plugins:: oslo.messaging.notify.drivers :detailed: trove-5.0.0/doc/source/dev/design.rst0000664000567000056710000001177312701410316020646 0ustar jenkinsjenkins00000000000000.. _design: ============ Trove Design ============ High Level description ====================== Trove is designed to support a single-tenant database within a Nova instance. There will be no restrictions on how Nova is configured, since Trove interacts with other OpenStack components purely through the API. Trove-api ========= The trove-api service provides a RESTful API that supports JSON and XML to provision and manage Trove instances. * A REST-ful component * Entry point - Trove/bin/trove-api * Uses a WSGI launcher configured by Trove/etc/trove/api-paste.ini * Defines the pipeline of filters; authtoken, ratelimit, etc. * Defines the app_factory for the troveapp as trove.common.api:app_factory * The API class (a wsgi Router) wires the REST paths to the appropriate Controllers * Implementation of the Controllers are under the relevant module (versions/instance/flavor/limits), in the service.py module * Controllers usually redirect implementation to a class in the models.py module * At this point, an api module of another component (TaskManager, GuestAgent, etc.) is used to send the request onwards through RabbitMQ Trove-taskmanager ================= The trove-taskmanager service does the heavy lifting as far as provisioning instances, managing the lifecycle of instances, and performing operations on the Database instance. * A service that listens on a RabbitMQ topic * Entry point - Trove/bin/trove-taskmanager * Runs as a RpcService configured by Trove/etc/trove/trove-taskmanager.conf.sample which defines trove.taskmanager.manager.Manager as the manager - basically this is the entry point for requests arriving through the queue * As described above, requests for this component are pushed to MQ from another component using the TaskManager's api module using _cast() or _call() (sync/a-sync) and putting the method's name as a parameter * Trove/openstack/common/rpc/dispatcher.py- RpcDispatcher.dispatch() invokes the proper method in the Manager by some equivalent to reflection * The Manager then redirect the handling to an object from the models.py module. It loads an object from the relevant class with the context and instance_id * Actual handling is usually done in the models.py module Trove-guestagent ================ The guestagent is a service that runs within the guest instance, responsible for managing and performing operations on the Database itself. The Guest Agent listens for RPC messages through the message bus and performs the requested operation. * Similar to TaskManager in the sense of running as a service that listens on a RabbitMQ topic * GuestAgent runs on every DB instance, and a dedicated MQ topic is used (identified as the instance's id) * Entry point - Trove/bin/trove-guestagent * Runs as a RpcService configured by Trove/etc/trove/trove-guestagent.conf.sample which defines trove.guestagent.manager.Manager as the manager - basically this is the entry point for requests arriving through the queue * As described above, requests for this component are pushed to MQ from another component using the GuestAgent's api module using _cast() or _call() (sync/a-sync) and putting the method's name as a parameter * Trove/openstack/common/rpc/dispatcher.py- RpcDispatcher.dispatch() invokes the proper method in the Manager by some equivalent to reflection * The Manager then redirect the handling to an object (usually) from the dbaas.py module. * Actual handling is usually done in the dbaas.py module Trove-conductor =============== Conductor is a service that runs on the host, responsible for receiving messages from guest instances to update information on the host. For example, instance statuses and the current status of a backup. With conductor, guest instances do not need a direct connection to the host's database. Conductor listens for RPC messages through the message bus and performs the relevant operation. * Similar to guest-agent in that it is a service that listens to a RabbitMQ topic. The difference is conductor lives on the host, not the guest. * Guest agents communicate to conductor by putting messages on the topic defined in cfg as conductor_queue. By default this is "trove-conductor". * Entry point - Trove/bin/trove-conductor * Runs as RpcService configured by Trove/etc/trove/trove-conductor.conf.sample which defines trove.conductor.manager.Manager as the manager. This is the entry point for requests arriving on the queue. * As guestagent above, requests are pushed to MQ from another component using _cast() (synchronous), generally of the form {"method": "", "args": {}} * Actual database update work is done by trove/conductor/manager.py * The "heartbeat" method updates the status of an instance. This is used to report that instance has changed from NEW to BUILDING to ACTIVE and so on. * The "update_backup" method changes the details of a backup, including its current status, size of the backup, type, and checksum. .. Trove - Database as a Service: https://wiki.openstack.org/wiki/Trove trove-5.0.0/doc/source/dev/install.rst0000664000567000056710000000525412701410316021040 0ustar jenkinsjenkins00000000000000.. _install: ================== Trove Installation ================== Trove is constantly under development. The easiest way to install Trove is using the Trove integration scripts that can be found in git in the `Trove Integration Repository`_. Steps to set up a Trove Developer Environment ============================================= ---------------------------- Installing trove-integration ---------------------------- * Install a fresh Ubuntu 14.04 (Trusty Tahr) image (preferably a virtual machine) * Make sure we have git installed:: # apt-get update # apt-get install git -y * Add a user named ubuntu if you do not already have one:: # adduser ubuntu * Set the ubuntu user up with sudo access:: # visudo Add *ubuntu ALL=(ALL) NOPASSWD: ALL* to the sudoers file. * Login with ubuntu:: # su ubuntu # cd ~ * Clone this repo:: # git clone https://git.openstack.org/openstack/trove-integration.git * cd into the scripts directory:: # cd trove-integration/scripts/ --------------------------------- Running redstack to install Trove --------------------------------- Redstack is the core script that allows you to install and interact with your developer installation of Trove. Redstack has the following options that you can run. * Get the command list with a short description of each command and what it does:: # ./redstack * Install all the dependencies and then install Trove. This brings up trove (tr-api tr-tmgr tr-cond) and initializes the trove database:: # ./redstack install * Kick start the build/test-init/build-image commands. Add mysql as a parameter to set build and add the mysql guest image:: # ./redstack kick-start mysql * You may need to add this iptables rule, so be sure to save it!:: # sudo iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -o eth0 -j MASQUERADE ------------------------ Running the trove client ------------------------ * The trove client is run using the trove command. You can show the complete documentation on the shell by running trove help:: # trove help ----------------------- Running the nova client ----------------------- * The nova client is run using the nova command. You can show the complete documentation on the shell by running nova help::: # nova help More information ================ For more information and help on how to use redstack and other trove-integration scripts, please look at the `README documentation`_ in the `Trove Integration Repository`_. .. _Trove Integration Repository: https://git.openstack.org/cgit/openstack/trove-integration .. _README documentation: https://git.openstack.org/cgit/openstack/trove-integration/plain/README.md trove-5.0.0/doc/source/dev/testing.rst0000664000567000056710000001011612701410316021040 0ustar jenkinsjenkins00000000000000.. _testing: ========================= Notes on Trove Unit Tests ========================= Mock Object Library ------------------- Trove unit tests make a frequent use of the Python Mock library. This library lets the caller replace (*"mock"*) parts of the system under test with mock objects and make assertions about how they have been used. [1]_ The Problem of Dangling Mocks ----------------------------- Often one needs to mock global functions in shared system modules. The caller must restore the original state of the module after it is no longer required. Dangling mock objects in global modules (mocked members of imported modules that never get restored) have been causing various transient failures in the unit test suite. The main issues posed by dangling mock objects include:: - Such object references propagate across the entire test suite. Any caller may be hit by a non-functional - or worse - crippled module member because some other (potentially totally unrelated) test case failed to restore it. - Dangling mock references shared across different test modules may lead to unexpected results/behavior in multi-threaded environments. One example could be a test case failing because a mock got called multiple times from unrelated modules. Such issues are likely to exhibit transient random behavior depending on the runtime environment, making them difficult to debug. There are several possible strategies available for dealing with dangling mock objects (see the section on recommended patterns). Further information is available in [1]_. Dangling Mock Detector ---------------------- All Trove unit tests should extend 'trove_testtools.TestCase'. It is a subclass of 'testtools.TestCase' which automatically checks for dangling mock objects after each test. It does that by recording mock instances in loaded modules before and after a test case. It marks the test as failed and reports the leaked reference if it finds any. Recommended Mocking Patterns ---------------------------- - Mocking a class or object shared across multiple test cases. Use the patcher pattern in conjunction with the setUp() and tearDown() methods [ see section 26.4.3.5. of [1]_ ]. .. code-block:: python def setUp(self): super(CouchbaseBackupTests, self).setUp() self.exe_timeout_patch = patch.object(utils, 'execute_with_timeout') def test_case(self): # This line can be moved to the setUp() method if the mock object # is not needed. mock_object = self.exe_timeout_patch.start() def tearDown(self): super(CouchbaseBackupTests, self).tearDown() self.exe_timeout_patch.stop() Note also: patch.stopall() This method stops all active patches that were started with start. - Mocking a class or object for a single entire test case. Use the decorator pattern. .. code-block:: python @patch.object(utils, 'execute_with_timeout') @patch.object(os, 'popen') def test_case(self, popen_mock, execute_with_timeout_mock): pass @patch.multiple(utils, execute_with_timeout=DEFAULT, generate_random_password=MagicMock(return_value=1)) def test_case(self, generate_random_password, execute_with_timeout): pass - Mocking a class or object for a smaller scope within one test case. Use the context manager pattern. .. code-block:: python def test_case(self): # Some code using real implementation of 'generate_random_password'. with patch.object(utils, 'generate_random_password') as pwd_mock: # Using the mocked implementation of 'generate_random_password'. # Again code using the actual implementation of the method. def test_case(self): with patch.multiple(utils, execute_with_timeout_mock=DEFAULT, generate_random_password=MagicMock( return_value=1)) as mocks: password_mock = mocks['generate_random_password'] execute_mock = mocks['execute_with_timeout_mock'] References ---------- .. [1] Mock Guide: https://docs.python.org/3/library/unittest.mock.htmltrove-5.0.0/doc/source/dev/building_guest_images.rst0000664000567000056710000006115612701410316023726 0ustar jenkinsjenkins00000000000000.. _build_guest_images: .. role:: bash(code) :language: bash ========================================= Building Guest Images for OpenStack Trove ========================================= .. If section numbers are desired, unindent this .. sectnum:: .. If a TOC is desired, unindent this .. contents:: Overview ======== When Trove receives a command to create a guest instance, it does so by launching a Nova instance based on the appropriate guest image that is stored in Glance. To operate Trove it is vital to have a properly constructed guest image, and while tools are provided that help you build them, the Trove project itself does not distribute guest images. This document shows you how to build guest images for use with Trove. It is assumed that you have a working OpenStack deployment with the key services like Keystone, Glance, Swift, Cinder, Nova and networking through either Nova Networks or Neutron where you will deploy the guest images. It is also assumed that you have Trove functioning and all the Trove services operating normally. If you don't have these prerequisites, this document won't help you get them. Consult the appropriate documentation for installing and configuring OpenStack for that. High Level Overview of a Trove Guest Instance ============================================= At the most basic level, a Trove Guest Instance is a Nova instance launched by Trove in response to a create command. For most of this document, we will confine ourselves to single instance databases; in other words, without the additional complexity of replication or mirroring. Guest instances and Guest images for replicated and mirrored database instances will be addressed specifically in later sections of this document. This section describes the various components of a Trove Guest Instance. ----------------------------- Operating System and Database ----------------------------- A Trove Guest Instance contains at least a functioning Operating System and the database software that the instance wishes to provide (as a Service). For example, if your chosen operating system is Ubuntu and you wish to deliver MySQL version 5.5, then your guest instance is a Nova instance running the Ubuntu operating system and will have MySQL version 5.5 installed on it. ----------------- Trove Guest Agent ----------------- Trove supports multiple databases, some of them are relational (RDBMS) and some are non-relational (NoSQL). In order to provide a common management interface to all of these, the Trove Guest Instance has on it a 'Guest Agent'. The Trove Guest Agent is a component of the Trove system that is specific to the database running on that Guest Instance. The purpose of the Trove Guest Agent is to implement the Trove Guest Agent API for the specific database. This includes such things as the implementation of the database 'start' and 'stop' commands. The Trove Guest Agent API is the common API used by Trove to communicate with any guest database, and the Guest Agent is the implementation of that API for the specific database. The Trove Guest Agent runs on the Trove Guest Instance. ------------------------------ Persistent Storage, Networking ------------------------------ The database stores data on persistent storage on Cinder (if configured, see trove.conf and the volume_support parameter) or ephemeral storage on the Nova instance. The database is accessible over the network and the Guest Instance is configured for network access by client applications. Building Guest Images using DIB =============================== A Trove Guest Image can be built with any tool that produces an image accepted by Nova. In this document we describe how to build guest images using the 'Disk Image Builder' (DIB) tool, and we focus on building qemu images [1]_. DIB is an OpenStack tool and is available for download at https://git.openstack.org/cgit/openstack/diskimage-builder/tree/ or https://pypi.python.org/pypi/diskimage-builder/0.1.38. DIB uses a chroot'ed environment to construct the image. The goal is to build a bare machine that has all the components required for launch by Nova. ---------- Invocation ---------- You can download the DIB tool from OpenStack's public git repository. Note that DIB works with Ubuntu and Fedora (RedHat). Other operating systems are not yet fully supported. .. code-block:: bash user@machine:/opt/stack$ git clone https://git.openstack.org/openstack/diskimage-builder Cloning into 'diskimage-builder'... remote: Counting objects: 8881, done. remote: Total 8881 (delta 0), reused 0 (delta 0) Receiving objects: 100% (8881/8881), 1.92 MiB | 0 bytes/s, done. Resolving deltas: 100% (4668/4668), done. Checking connectivity... done. user@machine:/opt/stack$ Ensure that you have qemu-img [2]_ and kpartx installed. The disk-image-create command is the main command in the DIB tool that is used to build guest images for Trove. The disk-image-create command takes the following options: .. code-block:: bash user@machine:/opt/stack/diskimage-builder$ ./bin/disk-image-create -h Usage: disk-image-create [OPTION]... [ELEMENT]... Options: -a i386|amd64|armhf -- set the architecture of the image(default amd64) -o imagename -- set the imagename of the output image file(default image) -t qcow2,tar -- set the image types of the output image files (default qcow2) File types should be comma separated -x -- turn on tracing -u -- uncompressed; do not compress the image - larger but faster -c -- clear environment before starting work --image-size size -- image size in GB for the created image --image-cache directory -- location for cached images(default ~/.cache/image-create) --max-online-resize size -- max number of filesystem blocks to support when resizing. Useful if you want a really large root partition when the image is deployed. Using a very large value may run into a known bug in resize2fs. Setting the value to 274877906944 will get you a 1PB root file system. Making this value unnecessarily large will consume extra disk space on the root partition with extra file system inodes. --min-tmpfs size -- minimum size in GB needed in tmpfs to build the image --no-tmpfs -- do not use tmpfs to speed image build --offline -- do not update cached resources --qemu-img-options -- option flags to be passed directly to qemu-img. Options need to be comma separated, and follow the key=value pattern. --root-label label -- label for the root filesystem. Defaults to 'cloudimg-rootfs'. --ramdisk-element -- specify the main element to be used for building ramdisks. Defaults to 'ramdisk'. Should be set to 'dracut-ramdisk' for platforms such as RHEL and CentOS that do not package busybox. --install-type -- specify the default installation type. Defaults to 'source'. Set to 'package' to use package based installations by default. -n skip the default inclusion of the 'base' element -p package[,package,package] -- list of packages to install in the image -h|--help -- display this help and exit ELEMENTS_PATH will allow you to specify multiple locations for the elements. NOTE: At least one distribution root element must be specified. Examples: disk-image-create -a amd64 -o ubuntu-amd64 vm ubuntu export ELEMENTS_PATH=~/source/tripleo-image-elements/elements disk-image-create -a amd64 -o fedora-amd64-heat-cfntools vm fedora heat-cfntools user@machine:/opt/stack/diskimage-builder$ The example command provided above would build a perfectly functional Nova image with the 64 bit Fedora operating system. In addition to the -a argument which specifies to build an amd64 (64 bit) image, and the -o which specifies the output file, the command line lists the various elements that should be used in building the image. The next section of this document talks about image elements. Building a Trove guest image is a little more involved and the standard elements (more about this later) are highly configurable through the use of environment variables. This command will create a guest image usable by Trove: .. code-block:: bash export HOST_USERNAME export HOST_SCP_USERNAME export GUEST_USERNAME export NETWORK_GATEWAY export REDSTACK_SCRIPTS export SERVICE_TYPE export PATH_TROVE export ESCAPED_PATH_TROVE export SSH_DIR export GUEST_LOGDIR export ESCAPED_GUEST_LOGDIR export ELEMENTS_PATH=$REDSTACK_SCRIPTS/files/elements:$PATH_TRIPLEO_ELEMENTS/elements export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive" local QEMU_IMG_OPTIONS=$(! $(qemu-img | grep -q 'version 1') && \ echo "--qemu-img-options compat=0.10") ${PATH_DISKIMAGEBUILDER}/bin/disk-image-create -a amd64 -o "${IMAGE_NAME}" \ -x ${QEMU_IMG_OPTIONS} ${DISTRO} ${EXTRA_ELEMENTS} \ vm heat-cfntools cloud-init-datasources ${DISTRO}-guest \ ${DISTRO}-${SERVICE_TYPE} ----------------------------- Disk Image Builder 'Elements' ----------------------------- DIB Elements are 'executed' by the disk-image-create command to produce the guest image. An element consists of a number of bash scripts that are executed by DIB in a specific order to generate the image. You provide the names of the elements that you would like executed, in order, on the command line to disk-image-create. Elements are executed within the chroot'ed environment while DIB is run. Elements are executed in phases and the various phases are (in order) root.d, extra-data.d, pre-install.d, install.d, post-install.d, block-device.d, finalise.d [3]_, and cleanup.d [4]_. The latter reference provides a very good outline on writing elements and is a 'must read'. Some elements use environment.d to setup environment variables. Element dependencies can be established using the element-deps and element-provides files which are plain text files. ----------------- Existing Elements ----------------- DIB comes with some tools that are located in the elements directory. .. code-block:: bash user@machine:/opt/stack/diskimage-builder/elements$ ls apt-conf dpkg ramdisk apt-preferences dracut-network ramdisk-base apt-sources dracut-ramdisk rax-nova-agent architecture-emulation-binaries element-manifest redhat-common baremetal enable-serial-console rhel base epel rhel7 cache-url fedora rhel-common centos7 hwburnin rpm-distro cleanup-kernel-initrd hwdiscovery select-boot-kernel-initrd cloud-init-datasources ilo selinux-permissive cloud-init-nocloud ironic-agent serial-console debian ironic-discoverd-ramdisk source-repositories debian-systemd iso stable-interface-names debian-upstart local-config svc-map deploy manifests uboot deploy-baremetal mellanox ubuntu deploy-ironic modprobe-blacklist ubuntu-core deploy-kexec opensuse vm dhcp-all-interfaces package-installs yum dib-run-parts pip-cache zypper disable-selinux pkg-map dkms pypi In addition, projects like TripleO [5]_ provide elements as well. Trove provides a set of elements as part of the trove-integration [6]_ project which will be described in the next section. Trove Reference Elements ======================== Reference elements provided by Trove are part of the trove-integration project. In keeping with the philosophy of making elements 'layered', Trove provides two sets of elements. The first implements the guest agent for various operating systems and the second implements the database for these operating systems. --------------------------- Provided Reference Elements --------------------------- The Trove reference elements are located in the trove-integration/scripts/files/elements directory. The elements [operating-system]-guest provide the Trove Guest capabilities and the [operating-system]-[database] elements provide support for each database on the specified database. .. code-block:: bash user@machine:/opt/stack/trove-integration/scripts/files/elements$ ls -l total 56 drwxrwxr-x 5 user group 4096 Jan 7 12:47 fedora-guest drwxrwxr-x 3 user group 4096 Jan 7 12:47 fedora-mongodb drwxrwxr-x 3 user group 4096 Jan 7 12:47 fedora-mysql drwxrwxr-x 3 user group 4096 Jan 7 12:47 fedora-percona drwxrwxr-x 3 user group 4096 Jan 7 12:47 fedora-postgresql drwxrwxr-x 3 user group 4096 Jan 7 12:47 fedora-redis drwxrwxr-x 3 user group 4096 Jan 7 12:47 ubuntu-cassandra drwxrwxr-x 3 user group 4096 Jan 7 12:47 ubuntu-couchbase drwxrwxr-x 6 user group 4096 Jan 7 12:47 ubuntu-guest drwxrwxr-x 3 user group 4096 Jan 7 12:47 ubuntu-mongodb drwxrwxr-x 4 user group 4096 Jan 7 12:47 ubuntu-mysql drwxrwxr-x 4 user group 4096 Jan 7 12:47 ubuntu-percona drwxrwxr-x 3 user group 4096 Jan 7 12:47 ubuntu-postgresql drwxrwxr-x 3 user group 4096 Jan 7 12:47 ubuntu-redis user@machine:/opt/stack/trove-integration/scripts/files/elements$ With this infrastructure in place, and the elements from DIB and TripleO accessible to the DIB command, one can generate the (for example) Ubuntu guest image for Percona Server with the command line: .. code-block:: bash ${DIB} -a amd64 -o ${output-file} Ubuntu vm heat-cfntools \ cloud-init-datasources ubuntu-guest ubuntu-percona Where ${DIB} is the fully qualified path to the disk-image-create command and ${output-file} is the name of the output file to be created. ------------------------------------------------------------------- Contributing Reference Elements When Implementing a New 'Datastore' ------------------------------------------------------------------- When contributing a new datastore, you should contribute elements that will allow any user of Trove to be able to build a guest image for that datastore. This is typically accomplished by submitting files into the trove-integration project, as above. Getting the Guest Agent Code onto a Trove Guest Instance ======================================================== The guest agent code typically runs on the guest instance alongside the database. There are two ways in which the guest agent code can be placed on the guest instance and we describe both of these here. ---------------------------------------- Guest Agent Code Installed at Build Time ---------------------------------------- In this option, the guest agent code is built into the guest image, thereby ensuring that all database instances that are launched with the image will have the exact same version of the guest image. This can be accomplished by placing suitable code in the elements for the image and these elements will ensure that the guest agent code is installed on the image. -------------------------------------- Guest Agent Code Installed at Run Time -------------------------------------- In this option, the guest agent code is not part of the guest image and instead the guest agent code is obtained at runtime, potentially from some well known location. In devstack, this is implemented in trove-guest.upstart.conf and trove-guest.systemd.conf. Shown below is the code from trove-guest.upstart.conf (this code may change in the future and is shown here as an example only). See the code highlighted below: .. code-block:: bash description "Trove Guest" author "Auto-Gen" start on (filesystem and net-device-up IFACE!=lo) stop on runlevel [016] chdir /var/run pre-start script mkdir -p /var/run/trove chown GUEST_USERNAME:root /var/run/trove/ mkdir -p /var/lock/trove chown GUEST_USERNAME:root /var/lock/trove/ mkdir -p GUEST_LOGDIR chown GUEST_USERNAME:root GUEST_LOGDIR chmod +r /etc/guest_info # If /etc/trove does not exist, copy the trove source and the # guest agent config from the user's development environment if [ ! -d /etc/trove ]; then -> sudo -u GUEST_USERNAME rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' -avz --exclude='.*' HOST_SCP_USERNAME@NETWORK_GATEWAY:PATH_TROVE/ /home/GUEST_USERNAME/trove mkdir -p /etc/trove -> sudo -u GUEST_USERNAME rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' -avz --exclude='.*' HOST_SCP_USERNAME@NETWORK_GATEWAY:/etc/trove/trove-guestagent.conf ~GUEST_USERNAME/ mv ~GUEST_USERNAME/trove-guestagent.conf /etc/trove/trove-guestagent.conf fi end script exec su -c "/home/GUEST_USERNAME/trove/contrib/trove-guestagent -config-file=/etc/guest_info --config-file=/etc/trove/trove-guestagent.conf" GUEST_USERNAME In building an image for a production Trove deployment, it is a very bad idea to use this mechanism. It makes sense in a development environment where the thing that you are developing is in Trove and part of the Guest Agent! This is because you get to merely boot a new Trove instance and the freshly modified code gets run on the Guest. But, in any other circumstance, it is much better to have the guest image include the guest agent code. Considerations in Building a Guest Image ======================================== In building a guest image, there are several considerations that one must take into account. Some of the ones that we have encountered are described below. --------------------------------------- Speed of Launch and Start-up Activities --------------------------------------- The actions performed on first boot can be very expensive and may impact the time taken to launch a new guest instance. So, for example, guest images that don't have the database software pre-installed and instead download and install during launch could take longer to launch. In building a guest image, therefore care should be taken to ensure that activities performed on first boot are traded off against the demands for start-time. --------------------------------------------------------- Database licensing, and Database Software Download Issues --------------------------------------------------------- Some database software downloads are licensed and manual steps are required in order to obtain the installable software. In other instances, no repositories may be setup to serve images of a particular database. In these cases, it is suggested that an extra step be used to build the guest image. User Manually Downloads Database Software ----------------------------------------- The user manually downloads the database software in a suitable format and places it in a specified location on the machine that will be used to build the guest image. An environment variable 'DATASTORE_PKG_LOCATION' is set to point to this location. It can be a single file (for example new_db.deb) or a folder (for example new_db_files) depending on what the elements expect. In the latter case, the folder would need to contain all the files that the elements need in order to install the database software (a folder would typically be used only if more than one file was required). Use an extra-data.d Folder -------------------------- Use an extra-data.d folder for the element and copy the file into the image Steps in extra-data.d are run first, and outside the DIB chroot'ed environment. The step here can copy the installable from DATASTORE_PKG_LOCATION into the image (typically into TMP_HOOKS_PATH). For example, if DATASTORE_PKG_LOCATION contains the full path to an installation package, an element in this folder could contain the following line: .. code-block:: bash dd if=${DATASTORE_PKG_LOCATION} of=${TMP_HOOKS_PATH}/new_db.deb Use an install.d Step to Install the Software --------------------------------------------- A standard install.d step can now install the software from TMP_HOOKS_DIR. For example, an element in this folder could contain: .. code-block:: bash dpkg -i ${TMP_HOOKS_PATH}/new_db.deb Once elements have been set up that expect a package to be available, the guest image can be created by executing the following: .. code-block:: bash DATASTORE_PKG_LOCATION=/path/to/new_db.deb ./script_to_call_dib.sh Assuming the elements for new_db are available in redstack, this would equate to: .. code-block:: bash DATASTORE_PKG_LOCATION=/path/to/new_db.deb ./redstack kick-start new_db Building Guest Images Using Standard Elements ============================================= A very good reference for how one builds guest images can be found by reviewing the redstack script (trove-integration/scripts). Lower level routines that actually invoke Disk Image Builder can be found in trove-integration/scripts/functions_qemu. The following block of code illustrates the most basic invocation of DIB to create a guest image. This code is in trove-integration/scripts/functions_qemu as part of the function build_vm(). We look at this section of code in detail below. .. code-block:: bash export HOST_USERNAME export HOST_SCP_USERNAME export GUEST_USERNAME export NETWORK_GATEWAY export REDSTACK_SCRIPTS export SERVICE_TYPE export PATH_TROVE export ESCAPED_PATH_TROVE export SSH_DIR export GUEST_LOGDIR export ESCAPED_GUEST_LOGDIR export ELEMENTS_PATH=$REDSTACK_SCRIPTS/files/elements:$PATH_TRIPLEO_ELEMENTS/elements export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive" local QEMU_IMG_OPTIONS=$(! $(qemu-img | grep -q 'version 1') && \ echo "--qemu-img-options compat=0.10") ${PATH_DISKIMAGEBUILDER}/bin/disk-image-create -a amd64 -o "${IMAGE_NAME}" \ -x ${QEMU_IMG_OPTIONS} ${DISTRO} ${EXTRA_ELEMENTS} \ vm heat-cfntools cloud-init-datasources ${DISTRO}-guest \ ${DISTRO}-${SERVICE_TYPE} Several of the environment variables referenced above are referenced in the course of the Disk Image Building process. For example, let's look at GUEST_LOGDIR. Looking at the element elements/fedora-guest/extra-data.d/20-guest-upstart, we find: .. code-block:: bash #!/bin/bash set -e set -o xtrace [...] [ -n "${ESCAPED_GUEST_LOGDIR}" ] || die "ESCAPED_GUEST_LOGDIR must be set to the escaped guest log dir" sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/GUEST_LOGDIR/${ESCAPED_GUEST_LOGDIR}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/NETWORK_GATEWAY/${NETWORK_GATEWAY}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" \ ${REDSTACK_SCRIPTS}/files/trove-guest.systemd.conf > \ ${TMP_HOOKS_PATH}/trove-guest.service As you can see, the value of GUEST_LOGDIR is used in the extra-data.d script to appropriately configure the trove-guest.systemd.conf file. This pattern is one that you can expect in your own building of guest images. The invocation of disk-image-create provides a list of elements that are to be invoked 'in order'. That list of elements is: .. code-block:: bash ${DISTRO} ${EXTRA_ELEMENTS} vm heat-cfntools cloud-init-datasources ${DISTRO}-guest ${DISTRO}-${SERVICE_TYPE} When invoked to (for example) create a MySQL guest image on Ubuntu, we can expect that DISTRO would be 'Ubuntu' and SERVICE_TYPE would be MySQL. And therefore these would end up being the elements: .. code-block:: bash ubuntu From diskimage-builder/elements/ubuntu vm From diskimage-builder/elements/vm heat-cfntools From tripleo-image-elements/elements/heat-cfntools cloud-init-datasources From diskimage-builder/elements/cloud-init-datasources ubuntu-guest From trove-integration/scripts/files/elements/ubuntu-guest ubuntu-mysql From trove-integration/scripts/files/elements/ubuntu-mysql References ========== .. [1] For more information about QEMU, refer to http://wiki.qemu.org/Main_Page .. [2] On Ubuntu, qemu-img is part of the package qemu-utils, on Fedora and RedHat it is part of the qemu package. .. [3] User (especially in the USA) are cautioned about this spelling which once resulted in several sleepless nights. .. [4] https://git.openstack.org/cgit/openstack/diskimage-builder/tree/README.rst#writing-an-element .. [5] https://git.openstack.org/cgit/openstack/tripleo-image-elements/tree/elements .. [6] https://git.openstack.org/cgit/openstack/trove-integration/tree/scripts/files/elements trove-5.0.0/doc/source/dev/trove_api_extensions.rst0000664000567000056710000000016512701410316023635 0ustar jenkinsjenkins00000000000000==================== Trove API Extensions ==================== .. list-plugins:: trove.api.extensions :detailed: trove-5.0.0/doc/source/dev/manual_install.rst0000664000567000056710000003462212701410316022376 0ustar jenkinsjenkins00000000000000.. _manual_install: ========================= Manual Trove Installation ========================= Objectives ========== This document provides a step-by-step guide for manual installation of Trove with an existing OpenStack environment for development purposes. This document will not cover: - OpenStack setup - Trove service configuration Requirements ============ A running OpenStack environment is required, including the following components: - Compute (Nova) - Image Service (Glance) - Identity (Keystone) - A networking component (either Neutron or Nova-Network) - If you want to provision datastores on block-storage volumes, you also will need Block Storage (Cinder) - If you want to do backup/restore and replication, you will also need Object Storage (Swift) - An environment with a freshly installed Ubuntu 14.04 LTS to run Trove services. This will be referred to as "local environment" - AMQP service (RabbitMQ or QPID) - MySQL (SQLite, PostgreSQL) database for Trove's internal needs, accessible from the local environment - Certain OpenStack services must be accessible from VMs: - Swift - VMs must be accessible from local environment for development/debugging purposes - OpenStack services must be accessible directly from the local environment, such as: - Nova - Cinder - Swift - Heat Installation ============ ----------- Gather info ----------- The following information about the existing environment is required: - Keystone host and port(s) - OpenStack administrator's username, tenant name and password - Nova URL - Cinder URL - Swift URL - Heat URL - AMQP connection credentials (server URL, user, password) - Trove's controller backend connection string (MySQL, SQLite, PostgreSQL) -------------------- Install dependencies -------------------- Required packages for Trove --------------------------- List of packages to be installed: .. code-block:: bash $ sudo apt-get install build-essential libxslt1-dev qemu-utils mysql-client \ git python-dev python-pexpect python-mysqldb libmysqlclient-dev Python settings --------------- To find out which setuptools version is latest please check out the `setuptools repo`_. .. _setuptools repo: https://pypi.python.org/pypi/setuptools/ To find out which pip version is latest please visit the `pip repo`_. .. _pip repo: https://pypi.python.org/pypi/pip/ Some packages in Ubuntu repositories are outdated. Please make sure to update to the latest versions from the appropriate sources. Use latest setuptools: .. code-block:: bash $ cd ~ $ wget https://pypi.python.org/packages/source/s/setuptools/setuptools-{{latest}}.tar.gz $ tar xfvz setuptools-{{latest}}.tar.gz $ cd setuptools-{{latest}} $ python setup.py install --user Use latest pip: .. code-block:: bash $ wget https://pypi.python.org/packages/source/p/pip/pip-{{latest}}.tar.gz $ tar xfvz pip-{{latest}}.tar.gz $ cd pip-{{latest}} $ python setup.py install --user Note '--user' above -- we installed packages in user's home dir, in $HOME/.local/bin, so we need to add it to path: .. code-block:: bash $ echo PATH="$HOME/.local/bin:$PATH" >> ~/.profile $ . ~/.profile Install virtualenv, create environment and activate it: .. code-block:: bash $ pip install virtualenv --user $ virtualenv --system-site-packages env $ source env/bin/activate Get Trove --------- Obtain the Trove source components from OpenStack repositories: .. code-block:: bash $ cd ~ $ git clone https://git.openstack.org/openstack/trove.git $ git clone https://git.openstack.org/openstack/python-troveclient.git Install Trove ============= First, install the requirements: .. code-block:: bash $ cd ~/trove $ pip install -r requirements.txt -r test-requirements.txt Then, install Trove: .. code-block:: bash $ sudo python setup.py develop Finally, install the Trove client: .. code-block:: bash $ cd ~/python-troveclient $ sudo python setup.py develop $ cd ~ Other required OpenStack clients (python-novaclient, python-keystoneclient, etc.) should already be installed as part of the Trove requirements. --------------------------- Prepare Trove for OpenStack --------------------------- You will first need to create a tenant called 'trove_for_trove_usage'. Next, create users called 'regular_trove_user' and 'admin_trove_user' —using 'trove' as the password. These are the accounts used by the Trove service. Additionally, you will need to register Trove as an OpenStack service and its endpoints: .. code-block:: bash $ keystone --os-username --os-password --os-tenant-name --os-auth-url http://:/v2.0 tenant-create --user trove_for_trove_usage $ keystone --os-username --os-password --os-tenant-name --os-auth-url http://:/v2.0 user-create --user regular_trove_user --pass trove --tenant trove_for_trove_usage $ keystone --os-username --os-password --os-tenant-name --os-auth-url http://:/v2.0 user-create --user admin_trove_user --pass trove --tenant trove_for_trove_usage $ keystone --os-username --os-password --os-tenant-name --os-auth-url http://:/v2.0 user-role-add --user admin_trove_user --tenant trove_for_trove_usage --role admin $ keystone --os-username --os-password --os-tenant-name --os-auth-url http://:/v2.0 service-create --user trove --type database $ keystone --os-username --os-password --os-tenant-name --os-auth-url http://:/v2.0 endpoint-create --service trove --region RegionOne --publicurl 'http://:/v1.0/$(tenant_id)s' --adminurl 'http://:/v1.0/$(tenant_id)s' --internalurl 'http://:/v1.0/$(tenant_id)s' Where and are the IP address and Port of the server where Trove was installed. This IP should be reachable from any hosts that will be used to communicate with Trove. Prepare Trove configuration files ================================= There are several configuration files for Trove: - api-paste.ini and trove.conf — For trove-api service - trove-taskmanager.conf — For trove-taskmanager service - trove-guestagent.conf — For trove-guestagent service - trove-conductor.conf — For trove-conductor service - .cloudinit — Userdata for VMs during provisioning Cloud-init scripts are userdata that is being used for different datastore types like mysql/percona, cassandra, mongodb, redis, couchbase while provisioning new compute instances. Samples of the above are available in ~/trove/etc/trove/ as \*.conf.sample files. If a clean Ubuntu image is used as the source image for Trove instances, the cloud-init script must install and run guestagent in the instance. As an alternative, one may consider creating a custom image with pre-installed and pre-configured Trove in it. Source images ============= As the source image for Trove instances, we will use a Trove-compatible Ubuntu image: .. code-block:: bash $ export DATASTORE_TYPE="mysql" $ wget http://tarballs.openstack.org/trove/images/ubuntu/${DATASTORE_TYPE}.qcow2 $ glance --os-username admin_trove_user --os-password trove --os-tenant-name trove_for_trove_usage --os-auth-url http://:/v2.0 image-create --name trove-image --is-public True --container-format ovf --disk-format qcow2 --owner ${DATASTORE_TYPE}.qcow2 Note: http://tarballs.openstack.org/trove/images includes mysql, percona, mongodb Trove-compatible images. At this step please remember the image ID or store it in an environment variable (IMAGEID). .. code-block:: bash $ glance --os-username trove --os-password trove --os-tenant-name trove --os-auth-url http://:/v2.0 image-create --name trove-image --is-public true --container-format ovf --disk-format qcow2 --owner trove < precise.qcow2 $ export IMAGEID= Cloud-init scripts ================== ------------------- Cloud-init location ------------------- By default, trove-taskmanager will look at /etc/trove/cloudinit for .cloudinit. ------------------ Cloud-init content ------------------ Each cloud-init script for Trove-compatible images should contain: - Trove installation Custom images with Trove code inside ==================================== *To be added* Prepare the database ==================== Create the Trove database schema: - Connect to the storage backend (MySQL, PostgreSQL) - Create a database called `trove` (this database will be used for storing Trove ORM) - Compose connection string. Example: mysql://:@:/ Initialize the database ======================= Once the database for Trove is created, its structure needs to be populated. .. code-block:: bash $ trove-manage db_sync Setup Trove Datastores ====================== --------- Datastore --------- A Datastore is a data structure that describes a set of Datastore Versions, which consists of:: - ID -- simple auto-generated UUID - Name -- user-defined attribute, actual name of a datastore - Datastore Versions Example:: - mysql, cassandra, redis, etc. ----------------- Datastore Version ----------------- A Datastore Version is a data structure that describes a version of a specific database pinned to datastore, which consists of:: - ID — Simple auto-generated UUID - Datastore ID — Reference to Datastore - Name — User-defined attribute, actual name of a database version - Datastore manager — trove-guestagent manager that is used for datastore management - Image ID — Reference to a specific Glance image ID - Packages — Operating system specific packages that would be deployed onto datastore VM - Active — Boolean flag that defines if version can be used for instance deployment or not Example:: - ID - edb1d22a-b66d-4e86-be60-756240439272 - Datastore ID - 9c3d890b-a2f2-4ba5-91b2-2997d0791502 - Name - mysql-5.6 - Datastore manager - mysql - Image ID - d73a402-3953-4721-8c99-86fc72e1cb51 - Packages - mysql-server=5.5, percona-xtrabackup=2.1 - Active - True -------------------------------------------- Datastore and Datastore Version registration -------------------------------------------- To register a datastore, you must execute: .. code-block:: bash $ export DATASTORE_TYPE="mysql" # available options: mysql, mongodb, postgresql, redis, cassandra, couchbase, couchdb, db2, vertica, etc. $ export DATASTORE_VERSION="5.6" # available options: for cassandra 2.0.x, for mysql: 5.x, for mongodb: 2.x.x, etc. $ export PACKAGES="mysql-server-5.6" # available options: cassandra=2.0.9, mongodb=2.0.4, etc $ export IMAGEID="9910350b-77e3-4790-86be-b971d0cf9175" # Glance image ID of the relevant Datastore version (see Source images section) $ trove-manage datastore_update ${DATASTORE_TYPE} "" $ trove-manage datastore_version_update ${DATASTORE_TYPE} ${DATASTORE_VERSION} ${DATASTORE_TYPE} ${IMAGEID} ${PACKAGES} 1 $ trove-manage datastore_update ${DATASTORE_TYPE} ${DATASTORE_VERSION} ========= Run Trove ========= Trove services configuration and tuning ======================================= *To be added* Starting Trove services ======================= Run trove-api: .. code-block:: bash $ trove-api --config-file=${TROVE_CONF_DIR}/trove-api.conf & Run trove-taskmanager: .. code-block:: bash $ trove-taskmanager --config-file=${TROVE_CONF_DIR}/trove-taskamanger.conf & Run trove-conductor: .. code-block:: bash $ trove-conductor --config-file=${TROVE_CONF_DIR}/trove-conductor.conf & ================= Trove interaction ================= Keystonerc ========== You need to build a `keystonerc` file that contains data to simplify the auth processes while using the Trove client: .. code-block:: bash export OS_TENANT_NAME=trove export OS_USERNAME=regular_trove_user export OS_PASSWORD=trove export OS_AUTH_URL="http://:/v2.0/" export OS_AUTH_STRATEGY=keystone Trove deployment verification ============================= First you need to execute: .. code-block:: bash $ . keystonerc To see `help` for a specific command: .. code-block:: bash $ trove help To create an instance: .. code-block:: bash $ trove create [--size ] [--databases [ ...]] [--users [ ...]] [--backup ] [--availability_zone ] [--datastore ] [--datastore_version ] [--nic ] [--configuration ] [--replica_of ] =============== Troubleshooting =============== No instance IPs in the output of 'trove show ' =========================================================== If the Trove instance was successfully created, is showing ACTIVE state and working, yet there is no IP address for the instance shown in the output of 'trove show , then confirm the following lines are added to trove.conf :: network_label_regex = ^NETWORK_NAME$ where NETWORK_NAME should be replaced with real name of the network to which the instance is connected to. To decide which network would you like to attach a Trove instance to, run the following command: .. code-block:: bash $ nova net-list or .. code-block:: bash $ neutron net-list One possible way to find the network name is to execute the 'nova list' command. The output will list all OpenStack instances for the tenant, including network information. Look for :: NETWORK_NAME=IP_ADDRESS trove-5.0.0/doc/source/conf.py0000664000567000056710000002277612701410316017371 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'oslosphinx', 'stevedore.sphinxext'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Trove' copyright = u'2013, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from trove.version import version_info as trove_version # The full version, including alpha/beta/rc tags. release = trove_version.version_string_with_vcs() # The short X.Y version. version = trove_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['trove.'] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['_static'] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]) latex_documents = [ ( 'index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual' ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). # man_pages = [ # ( # 'index', # '%s' % project, # u'%s Documentation' % project, # u'OpenStack Foundation', # 1 # ), # ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( 'index', '%s' % project, u'%s Documentation' % project, u'OpenStack Foundation', '%s' % project, 'Database as a service.', 'Miscellaneous' 'manual' ), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'%s' % project epub_author = u'OpenStack Foundation' epub_publisher = u'OpenStack Foundation' epub_copyright = u'2013, OpenStack Foundation' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Fix unsupported image types using the PIL. #epub_fix_images = False # Scale large images. #epub_max_image_width = 0 # If 'no', URL addresses will not be shown. #epub_show_urls = 'inline' # If false, no index is generated. #epub_use_index = True trove-5.0.0/doc/votes/0000775000567000056710000000000012701410521015712 5ustar jenkinsjenkins00000000000000trove-5.0.0/doc/votes/channel_logging0000664000567000056710000000055412701410316020761 0ustar jenkinsjenkins00000000000000i propose to log the irc channel #openstack-trove. This will help us go back in history and find answers for things we have already answered and help when people go on a dreaded vacation. Or when people decide to travel 4 wednesdays in a row for work... ####################### https://review.openstack.org/#/c/47900/ Vote results: 13 votes. 13 for, 0 against.trove-5.0.0/LICENSE0000664000567000056710000002363712701410316015027 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. trove-5.0.0/run_tests.sh0000775000567000056710000001124312701410316016375 0ustar jenkinsjenkins00000000000000#!/bin/bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Trove's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -x, --stop Stop running tests after the first error or failure." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -p, --pep8 Just run pep8" echo " -P, --no-pep8 Don't run pep8" echo " -c, --coverage Generate coverage report" echo " -h, --help Print this usage message" echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_option { case "$1" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -p|--pep8) just_pep8=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -*) noseopts="$noseopts $1";; *) noseargs="$noseargs $1" esac } venv=.venv with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 noseargs= noseopts= wrapper="" just_pep8=0 no_pep8=0 coverage=0 recreate_db=1 for arg in "$@"; do process_option $arg done # If enabled, tell nose to collect coverage data if [ $coverage -eq 1 ]; then noseopts="$noseopts --with-coverage --cover-package=trove" fi function run_tests { # Just run the test suites in current environment ${wrapper} $NOSETESTS 2> run_tests.log # If we get some short import error right away, print the error log directly RESULT=$? if [ "$RESULT" -ne "0" ]; then ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` if [ "$ERRSIZE" -lt "40" ]; then cat run_tests.log fi fi return $RESULT } function run_pep8 { echo "Running pep8 ..." # Just run PEP8 in current environment # # NOTE(sirp): W602 (deprecated 3-arg raise) is being ignored for the # following reasons: # # 1. It's needed to preserve traceback information when re-raising # exceptions; this is needed b/c Eventlet will clear exceptions when # switching contexts. # # 2. There doesn't appear to be an alternative, "pep8-tool" compatible way of doing this # in Python 2 (in Python 3 `with_traceback` could be used). # # 3. Can find no corroborating evidence that this is deprecated in Python 2 # other than what the PEP8 tool claims. It is deprecated in Python 3, so, # perhaps the mistake was thinking that the deprecation applied to Python 2 # as well. ${wrapper} flake8 } NOSETESTS="python run_tests.py $noseopts $noseargs" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (noseopts), which begin with a '-', and # arguments (noseargs). if [ -z "$noseargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" ${wrapper} coverage html -d covhtml -i fi trove-5.0.0/ChangeLog0000664000567000056710000021323312701410517015570 0ustar jenkinsjenkins00000000000000CHANGES ======= 5.0.0 ----- * Trove's tox tests should respect upper-constraints.txt * Address change in Route v2.3 * Update .gitreview for stable/mitaka 5.0.0.0rc1 ---------- * updating the release notes from mitaka commits * Fix incorrect innobackupex args (fixed gate) * Implementation of couchdb user and database functions * Secure mongodb instances and clusters by default * Fix pagination function * Add better validation to cluster shrink * Vertica configuration groups * Implement Backup and Restore for CouchDB * Server support for instance module feature * Addition of DB2 backup & restore functionality * Moved CORS middleware configuration into set_defaults * Updated from global requirements * Update db2 settings to reflect correct hostname * Revert "Time to get rid of most vestiges of slave_of" * Block pymongo version 3.1 * Updated from global requirements * Updated from global requirements 5.0.0.0b3 --------- * Implement Cassandra clustering * Vertica Cluster Grow and Shrink * Implement MariaDB Clustering * Fixed wrongly used assertEqual * Use assertEqual instead of assertTrue * Add support for root functions to Cassandra * Implement DBaaS Ceilometer Notifications * Updated from global requirements * Server side of module maintenance commands * Excessive messages logged during unit tests * Fix test order and region in guest_log scenario * Pass datastore details when required (again) * Update setup.cfg entry points for oslo namespace-less import paths * Updated from global requirements * root enable for pxc clustered datastore * Enable cluster tests for Redis * Updated from global requirements * Add backup & restore for Cassandra * Implement Cassandra Configuration Groups * Disable unsupported root-disable tests * Fix MySQL user list pagination * Implement user functions for Cassandra datastore * Fixed test_prepare_mysql_with_snapshot failure on rhel 7.2 * Malformed user access sql for postgres guest agent * Code cleanup - commented code * Move 'enable root on prepare' to the base * Handle bad Swift endpoint in guest_log * pxc grow/shrink cluster implementation * Fix Percona XtraDB Cluster guest to work with v5.6 * Use uppercase 'S' in word "OpenStack" * Add root-actions int-tests * Use OSprofiler options consolidated in lib itself * Use correct depends_on decorator for log tests * Add support for root-disable * Updated from global requirements * Unittests use trove_testtools * Redis should perform backup using BGSAVE not SAVE * Remove slave_of reference from scenario tests * Fix issue of mismatched test-requirements.txt file * Add support for configuration groups in int-tests * Updated from global requirements * Enable Vertica load via curl * Fix apply configuration on prepare * Fix leaked mocks in 'test_dbaas' * Implement Guest Log File Retrieval * Fixed a typo in log message * Make scenario-tests work with all datastores * Updated from global requirements * Add missing sudo for systemctl command * MariaDB GTID Replication * py3: Replaces xrange() with range() * Imported Translations from Zanata * Make Trove exists events notifs be emitted in current audit period 5.0.0.0b2 --------- * Updated from global requirements * Update requirements.txt * Drop python 2.6 support * Updated from global requirements * Revert Skiptest from change set 245845 * Use built-in function setattr() directly * Time to get rid of most vestiges of slave_of * Cleanup trove debug and info logging messages * Fix indexing of dict.keys() in python3 * Replaces itertools.izip with six.moves.zip * Updated from global requirements * Sometimes prepare messages are delayed * Add debug testenv in tox * Test: make enforce_type=True in CONF.set_override * Code refactoring for couchdb * Added Keystone and RequestID headers to CORS middleware * MongoDB cluster grow failing in get_admin_password * Marking downgrade as deprecated * Mongo cluster grow - use az and nic values * Updated from global requirements * Register additional datastores for int-tests * PostgreSQL configuration groups * Fixed 'client connection lost' bug in test_helper * Use assertTrue/False instead of assertEqual(T/F) * Fix to_gb & to_mb conversion function rounding issue * Trivial: Remove meaningless default value in __get__() * Trivial: replace deprecated _impl_messaging * Change assertTrue(isinstance()) by optimal assert * Fix missing value types for log message * Fix 'cannot access' error with Redis restore * Updated from global requirements * Using LOG.warning replace LOG.warn * Trivial: Remove vim header from source files * Keep py3.X compatibility for urllib * Define 'device_path' for Redis * Experimental datastores use new service commands * Finish cluster int-tests * Add MySQL int-test helper client * Move rabbit conf settings to separate section * Remove updateuser test * Replace assertEqual(None, *) with assertIsNone in tests * Modifying Vertica clusters to have a master node * mock out the guestagent write_file call * Use a specific network for the test * Move to oslo_db * Deprecated tox -downloadcache option removed * Move storage strategy code from guestagent to common * Catch all errors in Mock detector * Updated from global requirements * Use configured datastore on instance creation * Mongodb's "security.authorization" wrong type * Remove version per M-1 release instructions 5.0.0.0b1 --------- * updating with other reno changes * Change reporting of unhandled logging * Updated from global requirements * Fix race condition in replication backup delete * Update Trove Installation guide * Add better input checking for MongoDB * fix mongo create database * Fix PostgreSQL root functions * Port run_tests.py to Python 3 * Fix configuration lookup failure * Do not use api-paste.ini osprofiler options * Delete python bytecode before every test run * Add 'volume_type' parameter to instance create * Refactor the datastore manager classes (more) * Updated from global requirements * Update internal status when waiting for a change * Move MongoDB mongos upstart script to elements * Remove kombu as a dependency for Trove * Correct the computation of elapsed time while waiting for state change * Fix tox py27 error * Guestagent configuration tests mock chown/chmod * Correct errors resulting in "No handlers ..." error message * Added CORS middleware to Trove * Updated from global requirements * Move ignore_dbs and ignore_users out of DEFAULT * Redis 'hz' conf parameter using wrong MIN value * Add reno for release notes management * fix the version info of trove to use pbr * Defer revision dir initialization in GA * Imported Translations from Zanata * this should fix the failing gate * Address issues with Trove eventlet monkey-patching * Refactor the datastore manager classes * Replace assertEqual(None, *) with assertIsNone in tests * Incorrect usage of python-novaclient * Use oslo_config new type PortOpt for port options * Updated from global requirements * Changes names of some quota values * The verbose option in section [DEFAULT] has been deprecated * root_on_create for Couchbase should be false * Pagination limit code refactor * Fix example value for notification_topics * oslo.utils 2.6.0 causing tox test to hang * Add .eggs/ to .gitignore * Adding more doc strings to event simulator * Fix the bug of "Error spelling of 'AMPQ'" * Corrected error message for unsupported datastore flavors * Add unspecified options to backup unittests * Fix redis cluster unit test for assertRaisesRegexp * Unused variable backup_cmd removed * Add instance create int-tests 4.0.0 ----- * Fix promote for Redis datastore * Fix publish_exists_event authentication exception * Fix publish_exists_event authentication exception * Imported Translations from Zanata * Fix the bug of "Fix spelling typo in trove" * Use IPOpt to validate IPAddress * Use stevedore directive to document plugins * Cleanup of Translations 4.0.0.0rc1 ---------- * Allow more instances than cluster_member_count for pxc * Open Mitaka Development * Updated from global requirements * Fix promote for Redis datastore * Fix Mongo report_root call to have correct args * Enable deploying Trove instances into single tenant * Add support for extended_properties for clusters * Fix Postgres services management * Couchbase cluster-init command needs credentials * Properly patch _init_overrides_dir in MongoDB * Fix get_flavors test by sorting json output * use the legacy_compute v2 api for nova instead of v2.1 for now * Updated from global requirements * Fix typos (from "UPD" to "UDP") * Root enablement for Vertica clusters/instances * Disable MongoDB cluster security * Add user and database actions int-tests * Mongodb Cluster Scaling * Fixes the mgmt.test_datastores errors * Percona Xtradb Cluster implementation * Add support for Redis replication * Redis Cluster Initial Implementation * Expect ValidationError on an empty user update * Revert change set 217881 * Add support for MariaDB datastore in Trove * Test instance name can not have special charactrers in it now * Add Redis backup/restore functionality * Fixed redeclared CONF = cfg.CONF 4.0.0.0b3 --------- * Fix instance from alternating status on create * MongoDB backup uses "nogroup" which is OS specific * MySQL Manager Refactor * Removing unused dependency: discover * Increase test timeout for instance creation * Enable all trove services by default * Add generic int-test classes * Initialize directory for Mongo's runtime files * Fix not to output confusing message in tr-tmgr.log * Word spellings have been corrected * Update ignore_dbs for MySQL 5.6 * Implements Datastore Registration API * Fix description for "Inapt spelling of a word" * Fix race conditions in config overrides tasks * MongoDB create_admin_user not authorized * Mongodb storing config overrides in /var/run * Updated from global requirements * Updated from global requirements * Associate flavor types with datastore versions * Fix a few typos in log messages and comments * provide default port for pydev_debug_port * Updated from global requirements * Configuration Groups for MongoDB * Adds the PATCH method to extensions * Imported Translations from Transifex * Use oslo.log library instead of system logging module * Updated from global requirements * add a missing i18n import for backup strategy * MongoDB databases and users not created on create * MongoDB user management - access grant/revoke/show * Imported Translations from Transifex * User name_string schema limited to 16 chars * Have devstack plugin install Trove client * Notifications for exists events need nova remote admin url set * MongoDB database management features * Improve the guestagent configuration manager * Updated from global requirements * Adds lower_case_table_names support for MySQL * MongoDB cluster instances missing 'key' * Cluster instances could falsely report 'ready' * MongoDB user management - root enable/show 4.0.0.0b2 --------- * Imported Translations from Transifex * Remove openstack.common package * Switch to the oslo_log library * Updated from global requirements * Switch to the oslo.serialization library * Switch to the oslo.context library * MongoDB cluster taskmanager's add_shard not called * MongoDB prepare needs to wait for Mongo to start * MongoDB cluster strategy missing create_admin_user * Configuration Groups for Redis * Switch to oslo.service * Which interfaces trove starts up on should be logged * Updated from global requirements * MongoDB user management - create/list/show/delete * correct some grammar * Remove H305,H307,H402,H407,H904 * Updated from global requirements * Support authentication in the MongoDB guest agent * Imported Translations from Transifex * Allow int tests to run in SSL environment * Fix unit test mocks for new mock release * default for percona in notification_service_id * Updated from global requirements * Updated from global requirements * Added replica_of attribute to test_index_list * Fake mode service does not start after the changes to the oslo service package changes * Support nics and AZ for MongoDB clusters * Removed the non-existent method call * Fixes the tests in test_configuration.py * Updated from global requirements * Fix ssl.PROTOCOL_SSLv3 not supported by Python 2.7.9 * Provide option to read SSH credentials from test env * Fixed the tests in test_models.py * Make test_ensure_mysql_is_running more robust * Fixes the failing unit-tests * Implement guestagent Configuration Manager * Move mysql datadir to a sub-directory on mounted volume * Fixes the method update_datastore 4.0.0.0b1 --------- * Update version for Liberty 4.0.0a0 ------- * correct the annotation of param * Fixed API string references to MySql * Fixes the failing tests in mgmt/test_models.py * Remove nova_proxy_admin_user from trove guest * PostgreSQL guest agent can't remove temp file * Notification serialization of context * Implements integration tests for Vertica * Updated from global requirements * MongoDB single instance backup and restore * Sent in the topic when taskmanager setup * Decrease replication slave retry wait time * Adds the missing import to manage.py * Fixes db_upgrade and db_downgrade methods * Updated from global requirements * Added dangling mock detection to 'guestagent' * Remove rsdns directory * Fix leaked mocks in the 'MockMgmtInstanceTest' * Update devstack to handle Trove/Neutron setups * Fix leaked mocks in the 'LimitsControllerTest' * correct api schema for instance patch * Fixes a broken debug message in schema matching * Updated from global requirements * Fix create Vertica cluster or instance to show ERROR on failure * Fixes hacking rules * Move guestagent settings to default section * Fixes the unsafe global mocks * Fix leaked mocks in the 'guestagent/test_api' module * Improving manual install docs * Error message missing tenant id * Fix gate failure on gate-trove-pep8 * Added more unit-tests to Vertica-Cluster-Strategy * accepting network and availability zone for instances in cluster * Fixed the unmocked entry in taskmanager unit-tests * Update modules to turn on dangling mock detection * Updated glance API for creating public image * Updated from global requirements * Added more unit-tests to taskmanager * Added unit-tests for mgmt-cluster-service * Updated from global requirements * Added dangling mock detection to 'mgmt' module * Added dangling mock detection to 'conductor' * Added dangling mock detection to 'taskmanager' * Added dangling mock detection to 'secgroups' * Fix leaked mocks in the 'upgrade' module if any * Added dangling mock detection to 'backup' module * Added dangling mock detection to 'dns' module * Added dangling mock detection to 'cluster' module * Drop use of 'oslo' namespace package * Fix leaked mocks in the 'common' module if any * Implement dangling mock detector for unittests * Imported Translations from Transifex * Fixes the unsafe mocking in test_dbaas * Abstract 'mkdir' shell commands in guestagent * Added more unit-tests to guestagent * Corrects order of parameters to assertEqual * Fix process attribute check in BackupRunner * Fix Mongo status check to work for Mongo 3.0 * Updated from global requirements 2015.1.0 -------- * Remove [Service] section from mongo config tmpl * Remove [Service] section from mongo config tmpl * Add unix_socket_directories setting for pgsql * Support testing on 32 bit systems * Fixes unit-tests in test_dbaas.py * Release Import of Translations from Transifex * Fixes mocking of operating_system.service_discovery * Added more unit-tests to Vertica * update .gitreview for stable/kilo * Abstract 'mv'/'cp' shell commands in guestagent * Abstract rm/chmod shell commands in guestagent * Updated from global requirements * Fixes the rollback of flavor resize for couchdb, db2 & vertica 2015.1.0rc1 ----------- * Update openstack-common reference in openstack/common/README * Update redis system.py paths for current RHEL/CentOS/Fedora * Open Liberty development * add devstack plugin * Fixes config templates for mysql & percona * Remove ordereddict from requirements.txt * Make integration-tests run quicker * Adds rpc_ping method to new datastores * Use UTC to compute heartbeat age in eject-replica-source * Avoid unnecessary restart of replication master * Fixed NotificationTransformer to handle missing InstanceServiceStatus * Moves taskmanager-common-code for clusters * Fixes the resize APIs for Vertica-guest * Use oslo util to determine network interface * Update Trove to use novaclient v2 * Fix DB2 unit test to properly mock restart * Rewrites the ClusterView.build_instances * Eject-replica-source chooses most recent slave * Add short document on building guest images * Fix replica source state validation * Add support for DB2 datastore in Trove * Reject negative volume size in API * Updating Flavor Resize Restrictions * Fix CouchDB unit test to properly mock restart * Fix common misspellings * Implement clustering for Vertica datastore * Corrects list_database query for MySQL * Includes snapshot parameter to vertica & couchdb * Move sql_xx params to [database] conf section * Corrects my.cnf location in RHEL based distros * Fix the mocking in test_dbaas.py * Updated from global requirements * Remove flaky assert from TestMgmtInstanceDeleted test 2015.1.0b3 ---------- * Add support for HP Vertica datastore in Trove * Replication V2 * Add Mgmt API For Testing RPC Connectivity * Enhance Mgmt-Show To Support Deleted Instances * Add support for CouchDB datastore in Trove * Update config-detach to not remove default config * bypass_url required in nova admin client * Change nova_proxy_admin_tenant_name to id * Imported Translations from Transifex * Cleaned up redundancy between instance update() and edit() * Inject guest conf files to configurable location * Updated from global requirements * Updated from global requirements * Introduce a classification of datastores and strategies * Remove now obsolete tox targets * Updated from global requirements * convert the max and min values to int instead of string * Fixes package configuration method in pkg.py * Delete mysql error log file as root on restore * Changed error message for datastore parameter for configuration-create * Correct a test and order of parameters to assertEqual * Do not use '/tmp' as default guestagent log location * Updated from global requirements * Fix PostgreSQL volume definitions * Create docs test target * Integrate OSprofiler and Trove * Provide more readable error message when swift is not installed * Updated from global requirements * Use canonical MySQL root pwd reset procedure * Deletes volume on instance delete after resize * Imported Translations from Transifex * Update cassandra.yaml ownership after write_config operation * Updated from global requirements * Update CONTRIBUTING.RST file * resync oslo-incubator code * Updated validation_rules.json due to MySQL doc 2015.1.0b2 ---------- * Added 'redis' test group to int-tests * Changed hardcoded Mongodb username to variable * Fix backup state check while restoring an instance * Updated from global requirements * Add limit stanzas for mongos * Update DatastoreNotFound status code to 404 * Move cluster strategies to strategies/cluster * MySQL restore wait for shutdown before killing * Updated from global requirements * Correct calls to mask_password() which no longer work * Spelling errors fixed * Eliminate redundant modules from oslo-incubator * Address predictable temp file vulnerability * Imported Translations from Transifex * Fix trove-tox-doc-publish-checkbuild failures * Quote Postgres names to avoid implicit conversion * Trove create with --backup fails for postgresql * Obsolete oslo-incubator modules - processutils * Use dict comprehensions instead of dict constructor * Fix MongoDB guest strategy implementation * Using consistent control_exchange option for rpc * Obsolete oslo-incubator modules - wsgi * Obsolete oslo-incubator modules - exception * Integration with oslo.messaging library * Obsolete oslo-incubator modules - gettextutils (now oslo.i18n) * Support string flavor IDs * Adds negative unittests to test_backup_controller.py * Use unit file to enable systemd service * Obsolete oslo-incubator modules - jsonutils (now oslo.serialization) * Obsolete oslo-incubator modules - timeutils 2015.1.0b1 ---------- * Updated from global requirements * Adds negative test to test_instance_controller.py * Enable volume resize tests * Assign os_region_name a default value * Fix trove resize-volume resize2fs error * Obsolete oslo-incubator modules - unused modules * Updated from global requirements * Clean up github references from docs * Fix timeout in test_slave_user_removed int-test * Workflow documentation is now in infra-manual * Obsolete oslo-incubator modules - importutils * Eliminate duplicated LoopingCall and LoopingCallDone code * Forbid replica provisioning from replica site * Legacy MySQL datastore is shown on datastore-list * Add missing api example for incremental backups * Config Group Load Fails If DS Version Inactive * Remove Python 2.6 classifier * Rename attrs_exist() to contains_allowed_attrs() * Add few audit log messages to guestagent module * Ensure Replication Tests do not use a stale token * Fix broken instance provisioning with disabled volume support * Updated from global requirements * Poll for replica read_only status in test * Updated from global requirements * Create example generator * Rename generic variable named with mysql specific name * Deleting failed replication backup can hide error * Increase instances.task_description column size * Fix exception handling in get_replication_snapshot * Update and correct documentation snippets * Updated from global requirements * remove keystonemiddleware settings from api-paste.ini * configuration parameters payload changed * Eliminate use of sudo for two things that don't need it * making service catalog for endpoints more configurable * Added regression test for config with long value * Imported Translations from Transifex * Couchbase backup failing * Added Replication templates for Percona * Updated from global requirements * Updated from global requirements * Check for server attributes before using them * Validate backup size during restore * Couchbase Root Password Can Go Out Of Sync * Document that H301 and H306 are ignored on purpose * Instance-Delete Should Stop the Database First * Configuration group checking 0 validation rules * Update some log calls for translation and lazyness * Yum install should get a list of packages as a string * Imported Translations from Transifex * Allow users the ability to update an instance name * Updated from global requirements * Logging audit for guestagent/redis * Miscellaneous Cluster Fixes * Update config.template for Cassandra 2.1.0 * Increase test rate limit to avoid rate limit error * add back the deleted parameter in the configuration group tests 2014.2 ------ * Logging audit for guestagent/mongodb * Cluster Error On Missing Volume Sizes Unoptimal * cluster_config argument missing in prepare() * restart_required cfg-param is bool and not string * Update contributing.rst to include guidelines on Code Review * cluster_config argument missing in prepare() * restart_required cfg-param is bool and not string 2014.2.rc2 ---------- * convert restart_required to 'true' or 'false' string * fixing the flags for guest on resize volume * convert restart_required to 'true' or 'false' string * Refreshed translations * Mark strings for translation * Imported Translations from Transifex * Removing dependency on trove models in the guest agent * Mgmt Reboot allowed if datastore in crashed state * Docs: Fix Sphinx warnings 2014.2.rc1 ---------- * Use unique passwords for replication user * Add templates for replica and replica source * Open Kilo development * Mandate detach replicas before deleting replica source * Event simulator II * Complete mocking for test_extensions * Make the replication snapshot timeout configurable * Use different timeouts for create and restore * Partially address concerns in Couchbase restore strategy * Updated from global requirements * Discover config file for mongodb * Isolate unit tests from integration tests data * Deprecate unused entries in cfg.py * Sync latest process and str utils from oslo * Mark trove as being a universal wheel * Document Trove configuration options * Add postgresql to notification_service_id option * loading configuration parameters from trove-manage better * Fixed database migration script issues * Updated from global requirements * Fix config parameters test for fake mode * Delete backup created during replication * Make --repo-path an optional argument for db_recreate * Imported Translations from Transifex * Stop using intersphinx * Fix NoSuchOptError on Couchbase create * Strengthens the regex for mongodb json functions * Add SUSE support in mysql datastore * Add support to detect SUSE * Register postgres_group in trove config * Marks mysql slave read-only * Fix issue with intermittent test failures in test_pkg.py * Updated from global requirements 2014.2.b3 --------- * Datastore Configuration Parameters stored in db * In some cases, guest agents may leave temporary config files * Add detach-replica support * make backup_incremental_strategy a datastore specific option * Use 'replica' instead of 'slave' * Fix unit tests to work with random PYTHONHASHSEED * Updated from global requirements * Clusters Guest Implementation * Clusters TaskManager Implementation * Clusters API Implementation * Avoid leaking mocks across unit tests * Fixed restore to work correctly with pexpect * Add PostgreSQL support * Cleaned up sample trove-guestagent.conf * Imported Translations from Transifex * Removing the XML info from the docs * Add replication slave info to instance show * Look up trove instance by ID instead of name * Snapshot component for replication * handle repeating mysqld options containing equals * Set the python hash seed that tox uses to 0 * Use netifaces to lookup IP address on guest agent * Added the bind_host configuration option when launching the API * Move usage_timeout out of guest options * Add new checklinks tox environment * show stdout/err from failed command execution * Mysql guest agent functionality for replication * Adjusted audit logging for trove.instance module * Unit Tests for Mysql replication functionality * Load trove API extensions using stevedore * allow both ipv4 and ipv6 hostnames by default * Update 'list_users' call to use AGENT_HIGH_TIMEOUT * Adjusted audit logging for taskmanager module * Correct monkey patching in GuestAgentBackupTest * guestagent/mysql: Remove unused function * recent audit log change broke this LOG.debug message * Mocks utils.execute_with_timeout for mongodb tests * Imported Translations from Transifex * guestagent/test_volume.py leaves a file in /tmp * Imported Translations from Transifex * Per datastore volume support * Logging audit for trove/mysql module * Sync service.py from oslo-incubator with deps * Updated from global requirements * Remove accounts use of non-existent Nova extension * Logging audit for trove/guestagent/datastore module * Logging audit for guestagent/couchbase * Logging audit for trove/guestagent module * fix pexpect.spawn.match AttributeError * Logging audit for guestagent/cassandra * Make configuration tests configurable by datastore * Handle error from execute() when deleting non-existant file * document running a small set of tests * guestagent/volume: Remove not necessary sudo call 2014.2.b2 --------- * Logging audit for guestagent/strategies module * Add neutron support * Imported Translations from Transifex * Logging audit for trove/guestagent/backup module * Use auth_token from keystonemiddleware * guestagent contract for packages should be a list * Make default extension path relative to pybasedir * Refactored datastores to use common chown function * Handle exception from pexpect child.close * Moved core int-test groups from trove-int to trove * Add Backup/Restore support for Couchbase * Manual install page needed swift info * Restrict backup-list on instance to tenant * Enhance trove-manage help * Imported Translations from Transifex * Fixes redundant get call in a few places * Remove setuptools_git requirement * Fix backup execution workflow * Add datatore/version name into configurations response * Imported Translations from Transifex * Stop cassandra during configuration and volume migration * Enable usage of config-drive for file injection * Enable trove to specify cinder volume_type when creating a volume * Change default for update_status_on_fail * Add instance IP to /instances * Fix updated timestamp for SecurityGroup model * Updated from global requirements * Use (# of CPUs) api/conductor workers by default * Imported Translations from Transifex * Fixed '--version' for trove processes/utilities * Logging audit for trove/common module * Imported Translations from Transifex * Fix tracking of SG's provisioned by Heat * Add CONTRIBUTING.rst * Imported Translations from Transifex * Updated from global requirements * Logging audit for trove/db module * Remove redundant heat templates from codebase * Imported Translations from Transifex * Imported Translations from Transifex * Sync processutils from oslo with deps * Sync jsonutils (and dependencies) from oslo-incubator * Add timestamps and instance count to config groups * Expose trove conductor manager class as conf property * Updated from global requirements * Deleting incremental backup metadata after restore * Fix data too long for column 'task_description' * Logging audit for trove/backup module * Corrects typo in instance models and service * Logging audit for trove/configuration module * Add guestagent API calls for replication * Imported Translations from Transifex * Fix typos in trove/instance/models.py * Imported Translations from Transifex * Added route for Admin API to support guest upgrade * Add sample admin_{user,tenant_name,password} * Delete undeclared variable in guest-agent API class * Fix enable on boot when working with systemd and symlinked units * Add slave_of_id to instance model * Updates developer install doc to use trove cli * Imported Translations from Transifex * Reverting deleted tests * Add datastore version to backups * Fix inheritance for ConfigurationParameterDeleted * Add warnings when a path for api_extensions_path does not exist 2014.2.b1 --------- * Add a new column and indexes to agent_heartbeats * Ensure routing key is specified in the address for a direct producer * Added an int-test for user-update-attributes * Adds exception handling around heat stack creation * Make default extension path relative to pybasedir * Increase time_out to 30 sec for failing int-tests * Use tcp/udp ports from config for heat templates * Fix calls for proboscis methods * Apply six for metaclass * Increased stop timeout for cassandra * Wait for Couchbase to be ready before node-init * Enabled H402 flake8 rule * Fix log reporting for DNS creation process * pep8: Ignore locale files * switch from mox to mox3 * Include datastore version details in datastore calls * Adds the foundation for datastore capabilities * Remove unused xml config file * Reports enabled-root for restored instance * Clean up openstack-common.conf * Corrects the class name "ExtensionManager" * Corrects mgmt-taskmanager startup method * Remove admin_token from configs * Cleans up ServiceUser.__str__ method * Imported Translations from Transifex * Update database-api to follow OpenStack conventions * Remove all mostly untranslated PO files * Updated from global requirements * Consider datastore version when generating configs * Fix heat template for cassandra * debug level logs should not be translated * Imported Translations from Transifex * Specify correct constraint name for postgresql * Get service endpoints from catalog * Added support of resize-flavor to Redis datastore * Fix prepare call for redis guest agent * Migrate v20 not handling exception * Datastore_versions.name shouldn't be unique * Pretty print JSON sample files * Added fix to support Couchbase resize-flavor * Imported Translations from Transifex * Fix datastore tests so that they pass in live mode * Add support for 'trove root-enable' in Couchbase * Fix missing use of test config datastore version in tests * Use six.StringIO/BytesIO instead of StringIO.StringIO * Resolves volume resize issue * Fail a test if time.sleep called for no reason * Improve help for backup_incremental_strategy * Updated from global requirements * Imported Translations from Transifex * Unmounting ephemeral if it has already been mounted * Corrected the assert message in test_dbaas.py * Improve help strings * Imported Translations from Transifex * Remove usages of deprecated name LoopingCall in rpc * Check that all po/pot files are valid * Correct inconsistent state issues with config * Mocks out file.open to not rely on OS dependent files * Make sure eventlet starts correctly * Moved the apidocs from openstack/database-api * Bind to all interfaces for MongoDB * Added separate rate limit setting for mgmt POST * Fix Couchbase Kill Command * Updated from global requirements * changing conductor logging levels * Trove doesn't use extras * Correct the command to stop cassandra server * Remove dependencies on pep8, pyflakes and flake8 * Fix internal error generated from config-detach * Collapse mysql OptGroup Sections * Changed assert statement of test_volume_found 2014.1 ------ * Open Juno development * Fixed unit test to not actually run backup command * Fix create call for security group rules * Updated from global requirements * Remove mockito, and replace with mock * Add heat.template for Redis datastore * Don't specify admin_token in paste config * Changes Volume Prefix From mysql To datastore * Tox tests should not require errors in rigid order * Start using oslosphinx theme for docs * Imported Translations from Transifex * Pop instead of get for timeout kwarg * Change Cassandra to Service Start vs Bin * Setup trove for translation * Fixed unit test to not actually run backup command * Added Backup/Restore validations * Update sqlalchemy migrate scripts for postgres * Remove IDENTIFIED BY clause for MySQL REVOKE * fix default rabbitmq configuration values in sample cfgs * Fix Timestamp diff in Instance and Notification * Improve Datastore Not Implemented exceptions * Hide Datastores In List With No Active Versions * Removes volumes redefinition in fake nova_client * Add timeout on prepare call per datastore * Parses default configuration of an instance * Make hostnames in designate driver all lower case * Fixed Instance Status usage * Fixes insecure update of /etc/fstab file * Don't run main() on import * Fixed backup unittests to use mocked cmd * Removes extra initialization from config * Test restore full and restore incremental * rename and fix the db_wipe command * Remove Min/Max for Configuration Group Booleans * Root_on_create per datastore * Removes XML api from trove * Increases timeout for guest agent resize_fs * Update Oslo wiki link in README * Adding missing indexes for trove db * Improve readme.rst 2014.1.b3 --------- * Initial support for single instance MongoDB support * Fix inconsistent usage of mount_point * Adding percona secgroup config * the check for status.status should happen before trying to log it * simplify dns_ip_address code * Add security group rules for Couchbase * Remove unused variables * Fixes restore from incremental backups for Percona * Initial support for single instance Couchbase * Make backup/restore pluggable * Security groups workflow update * Remove extraneous vim configuration comments * Fixes get configuration defaults * Adding "version" to "datastore" field during list instances API call * Fixes a race condition in resize flavor for Percona * Raise max header size to accommodate large tokens * Call debug_utils.setup() in all entry points * Use six.moves.urllib.parse instead of urlparse * Use consistent timeouts for integration tests * Rename Openstack to OpenStack * Fix Redis After Configuration Group Merge * Initial support for single instance Cassandra Database * fix traceback when default_version is not in the database * Fixes a race condition in resize flavor * Raises BadRequest for grant_access on ignore_dbs * Corrects service URLs from "%s/%s/" to "%s/%s" * Remove unused admin_context from conductor manager * Improve help strings * Modifying tests to run with different configurations * Adding additional datastore tests * Adding Incremental Backups * Ignore outdated messages sent to conductor * Adding override.config.template for percona * adding configuration group support * Remove copyright and comments from empty files * Fixes resizes for volumes attached to active Nova servers * Add clearer help kwarg for max_volumes_per_user * Make test mysql connection code less confusing * Corrects matching of heat resources status * Migrating trove to entry points * Changing DNS to pass string to driver * Fix default_datastore migration script * Add Neutron support * Simplify swift storage load logic * Makes the backup tests less onerous * Corrects help messages in cfg.py * Fix Occasional test_one_network_label_exact Fail * Replaces local generate_uuid with common.utils * Disable redundant DB initialization on guesagent's start * RootReport through Taskmanager * Changes encoding from 'ascii' to 'utf-8' * Adds exception handling to heat code * make datastore_version_id required 2014.1.b2 --------- * Mask database user's password in trove logs * Datastores improvements * changing the routes for the api to be clear * Edits on help strings * Validate databases for user grants * Spelling correction in taskmanager/models.py * Adds Missing _() for Exception Message Translation * Add Volume Resize filesystem size assertion * move datastore manager to the datastore version * Remove copyright from empty files * Replace oslo.uuidutils module with trove.utils * Updates oslo.context * Relocates generate_random_password to common.utils * Fixup indentation errors found by Pep8 1.4.6+ * Fixed misspelled help message in cfg.py * Log service_status.status instead of service_status * Added Redis Crud Operations * Removes deprecated usage of BaseException.message * Enabled F403 flake8 rule * Update for datastore tests to support multiple datastore types * Make use of IP filtering when creating DNS records * Instance details view shows hostname (if it has it) or IP * Using base32 encoding to generate DNS records * Removes unused "server" from taskmanager * Add volume total to Mgmt Instance Get * Adds LOG in event_simulator.py * Removes BuiltInstanceTasks.get_volume_mountpoint * Adds tenant_id to guest_info in heat code * make the bin scripts called with main() * Removes directory creation prior to _tmp_mount * Cleans and improves heat code path * server identifier corrected * Corrects the help message for trove_volume_support * Adds non-volume-support to create server with heat * Adds missing dependency to the documentation * Removes privilege modification for MYSQL_BASE_DIR * Changes command to create the directory * Cleans the method init_engine * don't try to concatenate a string with a variable that can be None * Removes dead code from trove/tests/api/root.py * Acknowledge Nova VERIFY_RESIZE as RESIZE state * Properly mark a backup as FAILED if the process failed * Moving storage logic out of backup runner class * Paginate backup list api * Updates trove-guestagent.conf.sample * Enabling H403, H702 rules * Fix deleted_at timestamp in usage tests * Fix mysqldump backup and restore * check for invalid hostnames on update_attributes * Add -U to pip install command in tox.ini * use_stderr should be False for tox tests * Fixed typos in files in trove/guestagent * Updated from global requirements * Fixed PEP8/flake8 issues * Replace "tokenauth" by "authtoken" in configuration files * Update tox.ini to use new features * Removing IDE specific ignores * Extract suffix from req URL to avoid escaping dots * Unittest Case for get_user * Added ability to pass custom command-line options to backup runner * Updates tox.ini to remove suppressed H401 * Conductor Does Not Default to Verbose/Debug Logs * Fixed misspellings of common words 2014.1.b1 --------- * Removes unused import statements * db: make compatible with SQLAlchemy 0.8 * Removes security_groups from _create_server_volume_heat signature * setting fake mode to print startup errors * Conductor proxies host db access for guests * Clean up zombie processes on backup failure * Corrects heat template for mysql * Adding designate dns support to trove * need to mock out of the clear_expired_passwords for tox * Remove radmin credentials from create_heat_client * Allow query logging * Fixing typos in _create_server_volume * Add default case for mysqld_bin * Add support of datastore types * User-Create Host Does Not Allow Wildcarded Octet * Externalization of heat template * Update openstack/common/lockutils * Added logging in taskmanager models && _() strings * Fix action_result check * Convert to a more modern form of openstack-common.conf * Update tests to work with the latest testtools * host response attribute should prefer 'host' instead of 'hostId' * Add a hook for backup processes to check if successful * Replace GUID with generated password * Add optional ip address filter for hiding ips * Fix checksum verification exception * Fix bug in Security Group association to Instance * Fixes trove-api daemon fake mode functionality * Add Backup/Restore Checksum Validation * Fix white_box mode for integration tests * Add tests for hosts mgmt commands * Security groups workflow update * Fix User ID in Context and Notifications * Fix service_type in instance creation using heat services * Simulates events so run_tests.py executes 10x faster * Moved create_instance DNS attempt to post-prepare * Provide service type management code structure * Fixes pagination with non-string markers types * Replace deprecated method aliases in tests * Quote and escape pagination markers * Update statuses on GA timeout * Fixing restart tests * Update trove-guestagent.conf.sample * Add GA related parameters to trove-taskmanager.conf.sample * Modifying tests to use the compat client * Task manager refactoring done * Fix Timestamps for Resize Usage Events * Service Registration using conf file * PEP8. E125, F811, F401, H703 * Allow service_id per service_type for Usage Events * Fix quota issue where usages can drop to negative value * Fix the fake nova server implementation * Add tenant id to guest_info file * Remove Duplicate trove_auth_url Property * Adding location attribute to Fake Backup object * Correct the fake implementation of UsageVerifier * Extract generic part of GA code from MySQL specific modules * Allow early host % on validate * fixing symlink attack bug present in the application * Volume timeout is set too low * Update from global requirements * Added server_id to my.cnf template logic * Fixed method signature _create_server_volume_heat * PEP8. F841 * Require oslo.config 1.2.0 final * Pydev remote debugging support added * Vote for channel logging * Duplicate Import Statement * User and Database List in Create Not Validated * Support Optional Super User in Instance Create * oslo-incubator rpc update * Replace OpenStack LLC with OpenStack Foundation * Allow optional availability_zone to be passed * Rename webtest to WebTest * Set sane defaults for required conf params in trove/common/cfg.py * PEP8 rules. H102,103,201 * Adds instructions for manual Trove installation * Fix and enable gating on H702 * Fixed Admin Auth Token in Notification * Fixed backups so that they no longer fail when backup size is > 2GB * Use LOG.exception in place of sys.exc_info * Fixed directory create exec * Move ServiceStatuses from trove.instance to trove.common * Open Icehouse development * Mark sensitive cfg options with secure flag * Modify User Attributes API - Fix * Made apischema a bit less restrictive * Enclose command args in with_venv.sh * Fix and enable gating on H703 - string localisation * Do not use locals() for string formatting 2013.2.b3 --------- * service_statuses updated_at Field Not Set * Increased unit test coverage * Support Security Group Name Prefix Customization * Implement resize volume method * Adds includedir back to templates * Implementing heat as an optional provisioning system * Add and associate security group rule with group * jsonschema upgrade to v1.3.0(as minimal) * Trove - service_statuses status set to delete when instance deleted * Modify unit test to allow cli to merge * pep8 checking was added for bin folder * Update nova_volume_url to cinder_url in CONF files * Fixed use of word separators in Notification * Changed system commands depends on OS version * Fix admin extension gives 500 when listing instances * Adds init file for routes directory * Ensure safe format strings for TroveError * Reindenting a function call in taskman models * Guest config file injection * Update oslo.cfg to >= 1.2.0 * Add instance cloudinit support * Fix resize volume stuck in "RESIZE" status * Add RPM class implementation * Use same Nova Client throughout Fresh Instance Task * Secure root password on restore from backup after root-enable * Fixing bug in Interrogator tests mocks * Fix bug with insecure instance on Prepare loss * Moves extension files to routes * Removing mycnf static files from contrib * allows a mgmt user to migrate an instance to a specific host * Configurable network labels used for showing IPs in instance details * Replace nova client with cinder client to use volume * Fix spelling of python * Adding volume size to the backup views/models * Modify User Attributes - name, host and password * Renamed secgroup description * Added docs and made template filename variable * Turns pkg.py into a system aware packager * Clear new or building backups on reset-task-status * Added param name to validation error messages * Fix drift in deleted timestamp for Notification and Database * Added developer documentation for Trove * Makes two tests wait for the instance to go ACTIVE 2013.2.b2 --------- * Change the swift file deletion to use the manifest * Create templated config files * Add service_type to the instances table * Migrating the create/resize code to use flavor obj * Bump pbr to the version 0.5.16 * Make Volume conditionally required attribute in Instance Create Schema * Wildcards in User Host * Update to latest Oslo rpc modules * Fixing the broken API contract for flavor_id * Restore should use pgrep instead of mysqladmin to check for mysql down * Python 3.x compatibility fixes * Adding instance id to the security group extension API * Enable disk usage view in instance detail view * API Validation for Trove API * Requirements: Remove factory_boy, allow SQLAlchemy 0.7.10 * Fix Notifications * Update kombu library version requirement * GuestTimeout needed to be imported * Fixes my.cnf not reverting with a resize revert * Fix few missed issues with the reddwarf -> trove rename * Rename README to README.rst * Start using Hacking * Fix up trivial License Header mismatches * Package AUTHORS and ChangeLog file * Fixed backups GET and DELETE to restrict access to the owner of backup * Rename from reddwarf to trove * Add trove.egg* entry in .gitignore * Fix faulty 404 errors when requesting bad versions * Change server exceptions to show less details * Changed instances of the name reddwarf with trove in the README * Adding support for encrypted backups * Remove explicit depend on distribute * Renamed repos to trove * Fixed restore to wait for full mysqld shutdown before attempting restart * Fix quota bug where it raises exception when resources are deleted * Adding Exists Event Publishing * Allow remote implementations to be overridden * Fixed race condition in the Restore workflow * Adding delete restore instance tests * Integer Flavor Ids * chmod 755 bin/reddwarf-mgmt-taskmanager * Renaming security group URL API * Updated to use normal requirements names * Migrate to pbr * Don't require an admin user if there isn't a need * Fixing delete backup * Adding missing config value * Use database name validation only on listing and loading of databases * setting up the right percona pkg to be used - bug1185205 * Adding a SERVICE_REGISTRY for percona - bug 1185138 * Ephemeral volume support * Changes the conf files so redstack doesnt clobber them anymore * Fixed format string vulnerability in reddwarf call to OpenstackException handler * Added optional arg to launch child processes * Backup and Restore for reddwarf instances * Fixes some user and user-access call errors * Re-introduced support for rpc delete_queue * Refresh Oslo code - add support for ssl * Migrate to flake8 * Add flavor API support for ephemeral * Stop granting users GRANT OPTION by default * Adding the start of notifications * Controller and API changes for backups * Refreshed Oslo Code * added malformed json tests * Makes the guest work for more than just mysql * Quota tests * Adding ability to run XmlLint for each API call * s/OpenStack LLC/OpenStack Foundation/ in Copyright * Remove unused authutils * Add Apache 2.0 LICENSE file * fixing taskmanager exception after migration is complete * Fixes test in response to optional hostname fix * Updating tests to use altered quotas show method * fixing the flavor tests * Addresses xml issues for quota unit tests * Adding a running method to the Backup Model * Stop the deprecated use of Exception.message to please Python * Adding checks when updating a quota * Added support for Security Groups via a new extension * Add snapshot ORM to reddwarf * Adds optional hostname to calls involving users * Addresses failing quota int-tests * Fixing the signing dir and cleaning up the paste config * fix coverage for jenkins - temp fix * Fix for missing quota property for int tests part1 * Adding instance ID to DNS error message * Adds absolute limits to limit API call * update MANIFEST.in to include specific scripts in etc/reddwarf * Ensure to_dict() returns auth_tok * Pinning proboscis to a specific version * Test no accept headers * Update test-requires to point to rd cli tarball * Restoring the ability to run fake mode locally * Added tests for Modify User Call * Add python-keystoneclient to deployment dependency for reddwarf * Tests the API in XML mode * Refresh setup.py from Oslo * Rate limits implementation * percona image for reddwarf * Quota feature * Store the context in the local.store * Use tarball for python-reddwarfclient dependency * Fixes data type bug in get-user call * Joins oslo dict values with reddwarf values * Fixing run_tests.py so it emits an error upon failure * remove the namespace_packages from setup.py * Implement alternate root privileges * Change default service_type in tests to 'database' * Modify-user features * Added the cover directory to .gitignore * Specify Python 2.7 for cover environment in Tox * Ignore .testrepository directory * Prevent Upstart from restarting guest in Restart tests * Adds reset-task-status mgmt api instance action * Add missing Import * Fixing the coverage reports for the unittests and adding support for TestR * Adding a config option for running XML client * Add more unittests to guest agent * Negative Taskmanager Resize/Migration fixes * Add unit tests for guestagent.db.models.py * Fixing race condition during instance deletion * testr unit tests for versions api * Add unit tests for guestagent.api.py * Add unit tests for guestagent.volume.py * Checks guest status during migration * create a test adapter for entering an instance and executing a cmd * Adding flags to ssh cmd to bypass host checking * More changes to facilitate oslo * Fix for bug where returned instance status in UnprocessableEntity exception is an empty dictionary * Consolidating multiple volume flags into a single flag * Adding guest agent pkg unit tests and running them with testr * Add unit tests for guestagent.models.py Delete guestagent.utils.py since it is no longer invoked anywhere * fix int-tests running with out volume * Fixing property for pid_file from "mysqladmin --print-defaults" * Add unit test for guestagent.service.py and query.py * ADD unit tests for guestagent/manager.py * add back the mysql_base_dir variable to the guest * Check for 'reddwarf_can_have_volume' * - switch to testtools - remove pep8 warnings - more unit tests for dbaas.py, covers create_user, enable_root - refactoring * Update oslo codebase within reddwarf * Avoid null pointer. Fix pep8 mystery problems * Fixed bug 1091927: Pep8 tests are broken for reddwarf * Adding some dbaas.py unittests * Avoids using guest mgmt API functions * Part 1: Create group for simple unit tests * Correcting a log message in resize code * Better defaults that fix volume support * After migration, don't null the instance flavor * Fixing DNS issues related ot changing db_api * Updates tests to run in other configurations * Fixing the constant for mysql server validation in apt * Change validation to require non-blank name for instances * Fixes another fake mode eventlet bug * Always call 'spawn_after' for eventlet in fake mode * specify rpc version for guestagent manager fix bug # 1078976 * Update tools/install_venv.py to work w/ latest pip * No longer import db_api while parsing db __init__ * Updated the README * Adding test for volume used attribute for management api * Use reddwarf instead of reddwarf_lite * Add volume usage information to the management api detailed view * Adding tests to Reddwarf * For bug 1079863, fake commit * dummy changes * This is to fix bug 1079827. Please work * remove double colons * Removed TODO: * Removed unused import. Fixes bug #1078522 * Uses the view to circumvent an error related to the assumption that instance.server is present * Add vim formatting line * Do not update flavor if timeout occurs in resize * added a TODO: * Remove tabs in sample my.cnf * checks before deleting kwargs from the context * Removes the vestigial tests * Fix type in test-requires * Aligned tox file with project * Added .gitreview file * Get-host-by-name now uses RD instance status, not vm status, in instance list * Fix some PEP8 violations * Adding the ability to rescan server volumes to fake mode * Mgmt migrate instance call for reddwarf * Get rid of one-character-per-entry traceback logging * Require admin_context on host and volume calls * Fixes reference to host in instance views * Any() and All() don't short-circuit * Moved the agent heart beat check to inside the guest api class * Indentaion fixes all over the place * Removing the "fake-mode" tox env in favor of specifically using 2.6 * Added PEP8 to tox and repaired a hasty syntax error * PEP8 fixes, mostly whitespace and line-length changes * Adding the mgmt taskmanager * Improved ability to run fake mode in CI environments * Since the comparison inside is <=, the message ought to reflect that * Revamped the mgmt instance call * Added code to declare a topic consumer * mgmt call to get hardware info (cpu and memory) from an instance * Adds filtering for deleted instances in the mgmt instance lists. Fixes deleted and deleted_at fields * Fixed fake mode, which didn't work with our new changes * mgmt status call should allow for deleted instances and show them as shutdown * add exception handling for trying to update bad instances * Fixing bad import to reflect openstack common RPC * fake mode working with new novaclient create_server_volume method * Removed fault guest queue delete code since it's already in delete_async * Fixed small bug in fake mode * Updated metadata to support XML in the mgmt api * Removing unnnecessary line of code, which was causing mgmt/instances to not load correctly * Host list detail now includes the tenant id of each instance * Adding task_description to mgmt instance model. Wrapped a 404-happy server load to fix mgmt instance list for busted instances * Adding task_description to mgmt instance model * management api call to list all accounts with non-deleted instances * Allowed us to grab a mgmt instance that lacks a backing server * adding mgmt action to reboot an instance * Fixing xml serialization for accounts * Pruning one more tree() from the codebase * Removes defaultdict references from mgmt account and instance views * Added an admin task-manager * adding management host update action * Allowing resizes to be performed when MySQL is down * Moved mgmt instances into its own directory * Adding the deleted filter for account instances * Mgmt storage device details * fixing diagnostics tests for fake mode * Added attributes for management instance get * Mgmt instances * Adding MGMT hosts call * Adding a accounts management api extension Adding fake tests for mgmt accounts * Updated RPC code * admin users should use their correct tenant id as well * add updated attribute to list all versions, add ability to list individual versions * Adding support to use the nova create server with volume * Adds ignore_dbs to configs, and makes the models respect it * Add reset-password action and return Not Implemented * Adds root to the ignore_users list and forces user list to obey it * Fixed XML serializer to work with pagination * Trimming spaces when reading from the config as a list of comma separated values * Changing the Config get to always use a default value * Adding the ability to get types other than strings to the Config class * Fixed a syntax error * adding logging to the service conf files * Delete can now delete errored instances * Adding tox support to Reddwarf * password check shouldn't look in ignore_users * added an ignore users option, e.g. prevents such users from being deleted, also only get is root enabled info by checking the db * Changing the max_instances_per_user to 55 in the test config file * change usage table to usage_events * Adding fake mode support to the delete_queue function * adding usage table * Task manager will now detect some creation errors and mark the instances * Delete the guest queue when deleting an instance * don't raise an exception if there is an error, allow it to poll until timeout * Allowing resizes to be performed when MySQL is down * Moved functionality into wsgi.Controller * Give the nova instance the real hostname if we've got it * Adding a fault wrapper to catch and wrap exceptions that are thrown as plain text * Fixing delete polling in the taskmanager * Simple per-user instance quota checking in the create call. PEP8 fixes * Forcing HTTPS in pagination next links * Getting rid of our hack to simulate 404's for instance GETs * Polling until the server is deleted * Adding the create users and create databases to the fake mode prepare * Adds deleted and deleted_at fields to the instance table * Fixing DNS hostname code to save info to DB in taskmanager * Adding default for users * modify prepare call to create the users given on a create instance * Refactoring the instance load method Adding back the volume used parameter. Added timeouts for all guest sync calls. Refactored the instance models * Adding custom taskmanager q * Disabling local-infile in my.cnf Adding max_user_connections and updating max_connections to +10 of user_connections * Reversing the order of the dns save * Removing hostname from list instances * Farvel /flavors/detail. Also consolidating the API() into a common class * Checks for an empty body in action requests and bottoms out appropriately * Fixing no newline spaces in xml * Actually fixing the regex this time * Au revoir /instances/detail * Fixing the whitespace&newlines in xml * Preserves our contracted error response names by mapping webob exceptions to our named errors. Also repairs references to rd_exceptions that were missed in a refactor * hacks to get the example generator up and running again * Fixing xml serialization/deserialization * Adding custom metadata to properly serialize xml for our needs * Fixing the deserialization of xml objects as plurals * Adding XMLNS * Fixing the resize funk * Changes defaults to None in the Query constructor * Adding a admin context check used for the management calls * Query class now makes use of GROUP BY. This fixes a bug with list_users * Adding the user to the context * Fixed resize flavor code to correctly fetch new info * Fixing the way the common exceptions work with our webob objects * Took vcpus out of flavors view * Changed link generation code * Changing volume_size to an integer * Fixes an error in update_db * Reverting migration 007 to as it was and creating 008 * No longer call to Nova to grab server or volume info for majority of instance calls * Adding taskmanager to setup * Making db instance restart async * Async instance create operation * Fixing the 200vs202 * Async operation for instance resize/delete * Raise exceptions on user or database if a resource with the same name is found * Fixing validation to not force public RDL users to have a volume * volume list should return a list of dict and not objects * Resize live volumes attached to an instance * No longer make N calls to the volume API when listing instances + list bug fix * Minor fix for poll_until * Fixed infinite loop polling for ip address * Serialize XML correctly if instance isn't found * Resurrecting poll_until * Changing version in reddwarf.conf.sample * Adds pagination to instances, databases, and users. PEP8 fixes * Fixing validation for volume size * First beginnings of pagination * Reinstantiating the Task Manager aka Reddwarf Manager * Adding config for device mapping * Added some additional assertions to the resize flavor code * Dns bug fixes related to usernet ip * Optional hostname in view * Fixing the version * Fixing the returns for no-body calls * Fixed typo * Issue 500 if the volume fails to prov * Default hostname and minor comment * Changing rsdns_records table name to be rax independent * DNS Support for Instance IP * Updating the port in reddwarf-api * Updating reddwarf-server to use the config for ports, instead of defaulting at the command line args * Adding image_update call to reddwarf-manage * Adding fields to get better feedback from the agents * updating the volume service endpoint to version 1 instead of v2 * Added another flavor to what gets returned in fake mode * Bypassing auth/svccatalog in novaclient * Fixes a problem when an instance exists, but the root history doesn't. Properly returns Never/Nobody now * Moved root history out of instance details and into standalone mgmt call. PEP8 fixes * No longer require a volume * Fixed things suggested in the pull request * Made resize work in fake mode * Made resize almost work * Continued work on resize_flavor * Adding manifest * pep8 fixes and resolve a few other rebase issues * clean up and change the volume desription to have the instance id * Allowing a user to add a volume to the instance on create api call * Fixing the setup.py to have the new reddwarf api * Adding a reddwarf-api binscript * Fixed a fake guestagent call so the tests pass in fake mode, too * Moved X-User call to WSGI middleware * PEP8 on a comment and removed a log debug line * Fixed a comment and added InstanceStatus.ERROR since it seemed to be missing * Extending mgmt api from core instancce api * Added root history to instance details. Still need to move the root history over to mysql/extensions, but the int tests work right now * Started the mgmt framework * Fixed root history schema; skeleton of root history model * Making the guest more extensible for services * More work on the restart action * Fixed a few simple issues with the restart code * Continued to work on restart API * Added API code for actions * Making instance creation a bit more extensible * Added fakery to the various database operations * Renaming database to mysql * Syntax errors * Adding multiple addresses instead of just one * PEP8 * Adding initial dbs to the prepare call * Removed some overzealous wrapping * Removed some more wrappers around some exceptions * Fixed some RPC casts and context problems that happened during a rebase * Fixed some of the overzealous _() wrapping to exclude the parameters being handed to the format strings * Every time I see a LOG.something(, I've wrapped _() around its contents * Show ips based on a flag value * Fixing the mysql list calls * Created test fakes for Nova and the Guest. With these you can run the server with no external dependencies! * Forcing https * Removed some vestigial CONFIG and LOG lines * Moved build_links and req into the view, cut out some redundant exceptions * Used new style of context and moved the req to the constructor in the views * Fixed the discrepancy between detail and plain view; fixed links in models * Fixed the Flavors API so the URLs returned are correct and all tests are passed * Adding Flavors API: models, views, and service * Removing the link from webob in exceptions * Made the constant proper cased * Making the exception_map work for extensions * Fixing up the internal_message * Updated the context object in all services * Fixed the instance list operations * Changing 201 to 200 * Added validation to the users/databases/root calls * Adding validation to user/schema * Adding root enabled * Fixed the instance status returned during instance deletion * Fixing the queues to use uuid * Fixing the extensions to behave like existing reddwarf * Added schema calls for list/create/delete * Added the delete user call * Built out create_user call * Fixed a small bug, added error msg as suggested by cp16net * Adding list_users for mysql users * Fixed bug in status property, added message to UnprocessableEntity error * Added status code, return 422 when trying to delete a busy instance * Adding the guest conf.d writing * Now returning the correct status strings * Adding validation of the api body * Simple change to delete instance call * Getting the guest status update working * Fixing the server load call and amqp cast_with_consumer * Minor tweaks while trying to get Instance.load to work * Fixed our keystone hack * Changing instance from a subclass of Compute instance into its own object * Adding dbaas-mycnf to the contrib folder * Furthering the guest agent prepare call * Adding the guestagent * Adding getattr and setattr back to fix the bug I just added * Fixes pep8 and circular import issues * Changed the proxy_tenant_id to "reddwarf" since this is what KeyStone expects * Fixing up the instance creation issues * Fixed auth to work with keystone * Adding more tests * Changed the version name attribute to id * Adding database instance details for show/create/delete/list * Creating a model unit test to test the data() functionality * Adding basic skeleton for testing controllers. (stole from melange) * Fixing the assignment issue * Added the use of context instead of passing just the token * removing the reddwarf_tenant_id * fix the config * fixing pep8 things * updates to get create working * Added gitignore and re-fixed pep8 violations * Removed API folder * Fixed Pep8 errors * adding some buggy bugs and updates from keystone/novaclient * Added in the guest manager code * Added the bin script for the guest agent * Added a sample config file for guest * Migrated guest-agent to guestagent * Added a manager and service for the guest * Got the unit test venv environment working * keystone made some changes * Adding some basic service code from nova. * Adding the proper taskmanager bin script * Adding a taskmanager impl (needs to be a proper baseclass) * Adding novas LoopingCall to utils * Updating dummy rpc cast in the database service so it sends to the task manager * Added the taskmanager class with some testing rpc code * Fixed a bug in rpc kombu w/ a bad durable declaration * Fixed the name of the queue exchange * Added a bit of rpc code to the taskmanager service for consuming * * This is mostly experimental at this point!!! * * This should be refactored into something common!!! * Initial checkin of rpc code * Stole most of the code from nova rpc * Updated the rpc and kombu code so it works with reddwarf * Import of openstack-common, added the context from common * Extended the common context * Fleshed out the basics for the guest-agent scp firstboot. * Copying ssh keys from the host to the image * Added a bootstrap_init.sh which does the scp work to the instance * Finished the bootstrap.sh so it will upload to glance * Added a service images table for identifing the current image on create * Added some dummy guest-agent code for testing purposes * Added a delete method, which is not finished * First pass attempt at a service_image registry in the db * Added some finder logic to the base models * Added service_image to models * No longer passing in the image name in the database create call * Misc mapper stuff w/ adding the table * Adding next_steps.txt so everyone can see my thoughts * Moving the old bootstrap in anticipation of nuking it * Figured out how to create/add qcow ubuntu images * new bootstrap/bootstrap.sh shows the proper way to create a image * modified the funkybugs cuz i squashed one in nova proper * added apt-cacher-ng and such to the old bootstrap (dont use it yet) * Adding the beginnings of ubuntu bootstrap * Adding the venv/unit test framework stuff * run_tests stuff * gerrit stuff * test-requires for pip * Adding the missing reddwarf-manage binscript * Getting hooked up w/ the db. * connection db and all that jazz * migrations are working * local DB models are working (only save) * Making a data/view model based on remote objects * removing the novaclient from the actual service * Adding extensions to the app, and shell extensions for users & schema * Adding a bit of code to get novaclient working via proxy token * A bit more cleanup to remove melange code, and making the auth code work again * Making the API look more like melange. this made the api a TON cleaner than modeling it after the existing nova code. * now uses no nova imports and still has things like extensions, versions, etc. * created a new server binscript * made a new common folder with all the non openstack-common wsgi, exception, extensions etc... * using openstack-common extensively * changing the version to use v0.1 * stole some code from melange to make all this possible <3 melange team * Experimental changes to create instance. this is untested * Adding idea project folder * Getting the delete call and refactoring the req/proxy_token a bit * Got the basics of the viewbuilder working for list instances * Got a nice API shell working * uses devstacks install for nova/keystone/et al * talks to nova via novaclient. * adds a few extensions to show how its done * has a single call to list instances * found a few minor bugs to discuss w/ nova crew ** Note in order to run this you have to mod the code downloaded by devstack or have local symlinks to nova & novaclient in your src tree running trunk This will get dealt with soon (it is a weekend!) * Adding a gitignore * Adding a script for getting the environment up and running * Folder structure layout stuff * Initial commit trove-5.0.0/PKG-INFO0000664000567000056710000000400212701410521015076 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: trove Version: 5.0.0 Summary: OpenStack DBaaS Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: Trove -------- Trove is Database as a Service for OpenStack. ============================= Usage for integration testing ============================= If you'd like to start up a fake Trove API daemon for integration testing with your own tool, run: .. code-block:: bash $ ./tools/start-fake-mode.sh Stop the server with: .. code-block:: bash $ ./tools/stop-fake-mode.sh ====== Tests ====== To run all tests and PEP8, run tox, like so: .. code-block:: bash $ tox To run just the tests for Python 2.7, run: .. code-block:: bash $ tox -epy27 To run just PEP8, run: .. code-block:: bash $ tox -epep8 To generate a coverage report,run: .. code-block:: bash $ tox -ecover (note: on some boxes, the results may not be accurate unless you run it twice) If you want to run only the tests in one file you can use testtools e.g. .. code-block:: bash $ python -m testtools.run trove.tests.unittests.python.module.path Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 trove-5.0.0/run_tests.py0000664000567000056710000002133612701410316016414 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import gettext import os import sys import traceback import eventlet from oslo_log import log as logging import proboscis import six from six.moves import urllib import wsgi_intercept from wsgi_intercept.httplib2_intercept import install as wsgi_install from trove.common import cfg from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version from trove.common import utils from trove import rpc from trove.tests.config import CONFIG from trove.tests import root_logger eventlet.monkey_patch(thread=False) CONF = cfg.CONF original_excepthook = sys.excepthook def add_support_for_localization(): """Adds support for localization in the logging. If ../nova/__init__.py exists, add ../ to Python search path, so that it will override what happens to be installed in /usr/(local/)lib/python... """ path = os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir) possible_topdir = os.path.normpath(path) if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) if six.PY2: gettext.install('nova', unicode=1) else: gettext.install('nova') def initialize_trove(config_file): from trove.common import pastedeploy root_logger.DefaultRootLogger() cfg.CONF(args=[], project='trove', default_config_files=[config_file]) logging.setup(CONF, None) topic = CONF.taskmanager_queue rpc.init(CONF) taskman_service = rpc_service.RpcService( None, topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION, manager='trove.taskmanager.manager.Manager') taskman_service.start() return pastedeploy.paste_deploy_app(config_file, 'trove', {}) def datastore_init(): # Adds the datastore for mysql (needed to make most calls work). from trove.configuration.models import DatastoreConfigurationParameters from trove.datastore import models models.DBDatastore.create( id=CONFIG.dbaas_datastore_id, name=CONFIG.dbaas_datastore, default_version_id=CONFIG.dbaas_datastore_version_id) models.DBDatastore.create(id=utils.generate_uuid(), name=CONFIG.dbaas_datastore_name_no_versions, default_version_id=None) main_dsv = models.DBDatastoreVersion.create( id=CONFIG.dbaas_datastore_version_id, datastore_id=CONFIG.dbaas_datastore_id, name=CONFIG.dbaas_datastore_version, manager="mysql", image_id='c00000c0-00c0-0c00-00c0-000c000000cc', packages='test packages', active=1) models.DBDatastoreVersion.create( id="d00000d0-00d0-0d00-00d0-000d000000dd", datastore_id=CONFIG.dbaas_datastore_id, name='mysql_inactive_version', manager="mysql", image_id='c00000c0-00c0-0c00-00c0-000c000000cc', packages=None, active=0) def add_parm(name, data_type, max_size, min_size=0, restart_required=0): DatastoreConfigurationParameters.create( datastore_version_id=main_dsv.id, name=name, restart_required=restart_required, max_size=max_size, min_size=0, data_type=data_type, deleted=0, deleted_at=None) add_parm('key_buffer_size', 'integer', 4294967296) add_parm('connect_timeout', 'integer', 65535) add_parm('join_buffer_size', 'integer', 4294967296) add_parm('local_infile', 'integer', 1) add_parm('collation_server', 'string', None, None) add_parm('innodb_buffer_pool_size', 'integer', 57671680, restart_required=1) def initialize_database(): from trove.db import get_db_api from trove.db.sqlalchemy import session db_api = get_db_api() db_api.drop_db(CONF) # Destroys the database, if it exists. db_api.db_sync(CONF) session.configure_db(CONF) datastore_init() db_api.configure_db(CONF) def initialize_fakes(app): # Set up WSGI interceptor. This sets up a fake host that responds each # time httplib tries to communicate to localhost, port 8779. def wsgi_interceptor(*args, **kwargs): def call_back(env, start_response): path_info = env.get('PATH_INFO') if path_info: env['PATH_INFO'] = urllib.parse.unquote(path_info) return app.__call__(env, start_response) return call_back wsgi_intercept.add_wsgi_intercept('localhost', CONF.bind_port, wsgi_interceptor) from trove.tests.util import event_simulator event_simulator.monkey_patch() from trove.tests.fakes import taskmanager taskmanager.monkey_patch() def parse_args_for_test_config(): test_conf = 'etc/tests/localhost.test.conf' repl = False new_argv = [] for index in range(len(sys.argv)): arg = sys.argv[index] print(arg) if arg[:14] == "--test-config=": test_conf = arg[14:] elif arg == "--repl": repl = True else: new_argv.append(arg) sys.argv = new_argv return test_conf, repl def run_tests(repl): """Runs all of the tests.""" if repl: # Actually show errors in the repl. sys.excepthook = original_excepthook def no_thanks(exit_code): print("Tests finished with exit code %d." % exit_code) sys.exit = no_thanks proboscis.TestProgram().run_and_exit() if repl: import code code.interact() def import_tests(): # F401 unused imports needed for tox tests from trove.tests.api import backups # noqa from trove.tests.api import configurations # noqa from trove.tests.api import databases # noqa from trove.tests.api import datastores # noqa from trove.tests.api import flavors # noqa from trove.tests.api import header # noqa from trove.tests.api import instances as rd_instances # noqa from trove.tests.api import instances_actions as rd_actions # noqa from trove.tests.api import instances_delete # noqa from trove.tests.api import instances_mysql_down # noqa from trove.tests.api import instances_resize # noqa from trove.tests.api import limits # noqa from trove.tests.api.mgmt import accounts # noqa from trove.tests.api.mgmt import admin_required # noqa from trove.tests.api.mgmt import hosts # noqa from trove.tests.api.mgmt import instances as mgmt_instances # noqa from trove.tests.api.mgmt import instances_actions as mgmt_actions # noqa from trove.tests.api.mgmt import malformed_json # noqa from trove.tests.api.mgmt import storage # noqa from trove.tests.api import replication # noqa from trove.tests.api import root # noqa from trove.tests.api import root_on_create # noqa from trove.tests.api import user_access # noqa from trove.tests.api import users # noqa from trove.tests.api import versions # noqa from trove.tests.db import migrations # noqa def main(import_func): try: wsgi_install() add_support_for_localization() # Load Trove app # Paste file needs absolute path config_file = os.path.realpath('etc/trove/trove.conf.test') # 'etc/trove/test-api-paste.ini' app = initialize_trove(config_file) # Initialize sqlite database. initialize_database() # Swap out WSGI, httplib, and other components with test doubles. initialize_fakes(app) # Initialize the test configuration. test_config_file, repl = parse_args_for_test_config() CONFIG.load_from_file(test_config_file) import_func() from trove.tests.util import event_simulator event_simulator.run_main(functools.partial(run_tests, repl)) except Exception as e: # Printing the error manually like this is necessary due to oddities # with sys.excepthook. print("Run tests failed: %s" % e) traceback.print_exc() raise if __name__ == "__main__": main(import_tests) trove-5.0.0/MANIFEST.in0000664000567000056710000000042312701410316015544 0ustar jenkinsjenkins00000000000000include AUTHORS include ChangeLog include trove/db/sqlalchemy/migrate_repo/migrate.cfg include trove/db/sqlalchemy/migrate_repo/README include trove/db/sqlalchemy/migrate_repo/versions/*.sql include etc/trove/*.* exclude .gitignore exclude .gitreview global-exclude *.pyc trove-5.0.0/tox.ini0000664000567000056710000000642012701410320015317 0ustar jenkinsjenkins00000000000000[tox] envlist = py34,py27,pep8,checkbuild,checklinks minversion = 1.6 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} usedevelop = True install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/mitaka} -U {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = find ./trove -type f -name "*.pyc" -delete {envpython} run_tests.py python setup.py testr --slowest {envpython} generate_examples.py whitelist_externals = bash find [tox:jenkins] sitepackages = True [testenv:pep8] commands = flake8 # Check that .po and .pot files are valid: bash -c "find trove -type f -regex '.*\.pot?' -print0 | \ xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:debug] commands = oslo_debug_helper {posargs} [testenv:cover] # NOTE(amrith) The setting of the install_command in this location # is only required because currently infra does not actually # support constraints files for the cover job, and while # the environment variable UPPER_CONSTRAINTS_FILE is set, there's # no file there. It can be removed when infra changes this. install_command = pip install -U {opts} {packages} basepython = python2.7 commands = coverage erase python setup.py testr --coverage coverage run -a run_tests.py coverage html coverage xml coverage report [testenv:venv] # NOTE(amrith) The setting of the install_command in this location # is only required because currently infra does not actually # support constraints files for the venv job, and while # the environment variable UPPER_CONSTRAINTS_FILE is set, there's # no file there. It can be removed when infra changes this. install_command = pip install -U {opts} {packages} commands = {posargs} [flake8] show-source = True # H301 is ignored on purpose. # The rest of the ignores are TODOs. ignore = F821,H237,H238,H301,H404,H405,H501 builtins = _ exclude=.venv,.tox,dist,doc,openstack,*egg,tools,etc,build,*.po,*.pot filename=*.py,trove-* [testenv:checklinks] commands = openstack-doc-test --check-links {posargs} [testenv:checkbuild] commands = openstack-doc-test --check-niceness --check-syntax --check-deletions {posargs} openstack-doc-test --check-build {posargs} [testenv:publishdocs] # NOTE(amrith) The setting of the install_command in this location # is only required because currently infra does not actually # support constraints files for the publishdocs job, and while # the environment variable UPPER_CONSTRAINTS_FILE is set, there's # no file there. It can be removed when infra changes this. install_command = pip install -U {opts} {packages} commands = openstack-doc-test --check-build --publish --force [testenv:releasenotes] # NOTE(amrith) The setting of the install_command in this location # is only required because currently infra does not actually # support constraints files for the release notes job, and while # the environment variable UPPER_CONSTRAINTS_FILE is set, there's # no file there. It can be removed when infra changes this. install_command = pip install -U {opts} {packages} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html trove-5.0.0/contrib/0000775000567000056710000000000012701410521015445 5ustar jenkinsjenkins00000000000000trove-5.0.0/contrib/trove-guestagent0000775000567000056710000000232712701410316020704 0ustar jenkinsjenkins00000000000000#!/usr/bin/python # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This is necessary currently because the guest needs a init script. When the guest is moved out of the application, this will no longer be needed in the project.""" import os import sys possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'trove', '__init__.py')): sys.path.insert(0, possible_topdir) from trove.cmd.guest import main if __name__ == "__main__": sys.exit(main()) trove-5.0.0/.testr.conf0000664000567000056710000000024712701410316016100 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=${PYTHON:-python} -m subunit.run discover ./trove/tests/unittests $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list trove-5.0.0/AUTHORS0000664000567000056710000001775212701410517015076 0ustar jenkinsjenkins00000000000000Aaron Crickenberger Abitha Palaniappan Adam Gandelman Alex Tomic Alexander Ignatov Amrith Kumar Andreas Jaeger Andrew Bramley Andrey Shestakov Anna Philips Anna Shen Ashleigh Farnham Attila Fazekas Auston McReynolds Bertrand Lallau Bertrand Lallau Bo Wang Boden R Brian Hunter Chang Bo Guo ChangBo Guo(gcb) Chaozhe.Chen Christian Berendt Conrad Weidenkeller Corey Bryant Craig Craig Vyvial Cyril Roelandt DJ Johnstone Dan Nguyen Daniel Salinas David Fecker David Sariel Debasish Chowdhury Denis M Denis Makogon Denys Makogon Dina Belova Dirk Mueller Dmitriy Ukhlov Dong Ma Doug Hellmann Doug Shelley Dror Kagan Duk Loi Ed Cranford Edmond Kotowski Erik Redding Felipe Reyes Felipe Reyes Gauvain Pocentek George Peristerakis Greg Hill Greg Lucas Gábor Antal Haomai Wang He Yongli Iccha Sethi Ihar Hrachyshka Illia Khudoshyn Ilya Sviridov IonuÈ› ArțăriÈ™i IonuÈ› ArțăriÈ™i Ishita Mandhan J Daniel Ritchie James E. Blair James E. Blair James Page Jamie Lennox Jared Rohe Jeremy Stanley Jian Xu Joe Cruz Joe Cruz Joe Cruz Joe Gordon Josh Dorothy Joshua Harlow JuPing Julien Danjou Julien Vey KIYOHIRO ADACHI Kaleb Pomeroy Kamil Rykowski Kapil Saxena Kenneth Wilke Kevin Conway Khyati Sheth Laurel Michaels Li Ma LiuNanke Luigi Toscano Mariam John Mark Biciunas Mark McLoughlin Martin Kletzander Masaki Matsushita Mat Lowery Matt Riedemann Matt Van Dijk Mayuri Ganguly Michael Basnight Michael Krotscheck Michael Still Michael Yu Monty Taylor Morgan Jones Morgan Jones Nikhil Nikhil Manchanda Nilakhya Chatterjee Nirav Shah Nirmal Ranganathan OTSUKA, Yuanying OctopusZhang Oleksandr Kyrylchuk Olga Kopylova OndÅ™ej Nový Paul Lodronio Paul Marshall Paul Marshall Peter MacKinnon Peter Stachowski Petr Malik Petra Sargent Pierre RAMBAUD Pradeep Chandani Ramashri Umale Riddhi Shah Riley Bastrop Robert Myers Russell Bryant Sam Morrison Saurabh Surana Sebastien Badia Sergey Gotliv Sergey Lukjanov Sergey Vilgelm Shuichiro MAKIGAKI Shuquan Huang Simon Chang Sonali Goyal Sreedhar Chidambaram Steve Leon Steve Leon Sudarshan Acharya Sushil Kumar Sushil Kumar Sushil Kumar SushilKM Swapnil Kulkarni (coolsvap) Swapnil Kulkarni Takashi NATSUME Tanis De Luna Telles Nobrega Theron Voran Thierry Carrez Tim Simpson Timothy He Tristan Cacqueray Victor Stinner Victoria Martinez de la Cruz Victoria Martinez de la Cruz Vincent Untz Vipul Sabhaya Viswa Vutharkar XiaBing Yao Zhenguo Niu Zhenguo Niu Zhi Yan Liu ZhiQiang Fan Zhongyue Luo alex amcrn baz boden caoyue dagnello dangming daniel-a-nguyen daniel-a-nguyen debasish ekotowski ewilson-tesora guang-yee jcannava justin-hopper liuqing lvdongbing mariam john mariamj mariamj@us.ibm.com mbasnight ming dang <743759846@qq.com> pradeep rameshsahu rico.lin ridhi.j.shah@gmail.com ruiyuan-shen rumale rumale rvemula sandrely26 saradpatel sarvesh-ranjan shalini khandelwal sharika shashank-gl shayne-burgess svenkataramanaia svenkataramanaia tanlin tianqing ting.wang venkatamahesh yangyapeng trove-5.0.0/requirements.txt0000664000567000056710000000322512701410316017275 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.6 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT eventlet!=0.18.3,>=0.18.2 # MIT keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7' # MIT Routes!=2.0,>=1.12.3;python_version!='2.7' # MIT WebOb>=1.2.3 # MIT PasteDeploy>=1.5.0 # MIT Paste # MIT sqlalchemy-migrate>=0.9.6 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD netifaces>=0.10.4 # MIT httplib2>=0.7.5 # MIT lxml>=2.3 # BSD passlib>=1.6 # BSD python-heatclient>=0.6.0 # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 python-cinderclient>=1.3.1 # Apache-2.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 python-designateclient>=1.5.0 # Apache-2.0 python-neutronclient!=4.1.0,>=2.6.0 # Apache-2.0 iso8601>=0.1.9 # MIT jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT Jinja2>=2.8 # BSD License (3 clause) pexpect!=3.3,>=3.1 # ISC License oslo.config>=3.7.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 MySQL-python;python_version=='2.7' # GPL Babel>=1.3 # BSD six>=1.9.0 # MIT stevedore>=1.5.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0 osprofiler>=1.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD trove-5.0.0/CONTRIBUTING.rst0000664000567000056710000001172112701410316016452 0ustar jenkinsjenkins00000000000000============ Contributing ============ Our community welcomes all people interested in open source cloud computing, and encourages you to join the `OpenStack Foundation `_. If you would like to contribute to the development of OpenStack, you must follow the steps documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow (Pull requests submitted through GitHub will be ignored.) Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/trove We welcome all types of contributions, from blueprint designs to documentation to testing to deployment scripts. The best way to get involved with the community is to talk with others online or at a meetup and offer contributions through our processes, the `OpenStack wiki `_, blogs, or on IRC at ``#openstack-trove`` on ``irc.freenode.net``. Code Reviews ============ We value your contribution in reviewing code changes submitted by others, as this helps increase the quality of the product as well. The Trove project encourages the guidelines (below). - A rating of +1 on a code review is indicated if: * It is your opinion that the change, as proposed, should be considered for merging. - A rating of 0 on a code review is indicated if: * The reason why you believe that the proposed change needs improvement is merely an opinion, * You have a question, or need a clarification from the author, * The proposed change is functional but you believe that there is a different, better, or more appropriate way in which to achieve the end result being sought by the proposed change, * There is an issue of some kind with the Commit Message, including violations of the Commit Message guidelines, * There is a typographical or formatting error in the commit message or the body of the change itself, * There could be improvements in the test cases provided as part of the proposed change. - A rating of -1 on a code review is indicated if: * The reason why you believe that the proposed change needs improvement is irrefutable, or it is a widely shared opinion as indicated by a number of +0 comments, * The subject matter of the change (not the commit message) violates some well understood OpenStack procedure(s), * The change contains content that is demonstrably inappropriate, * The test cases do not exercise the change(s) being proposed. Some other reviewing guidelines: - In general, when in doubt, a rating of 0 is advised, - The code style guidelines accepted by the project are part of tox.ini, a violation of some other hacking rule(s), or pep8 is not a reason to -1 a change. Other references: - https://wiki.openstack.org/wiki/CodeReviewGuidelines - http://docs.openstack.org/infra/manual/developers.html - https://wiki.openstack.org/wiki/ReviewChecklist - https://wiki.openstack.org/wiki/GitCommitMessages - http://docs.openstack.org/developer/hacking/ - https://review.openstack.org/#/c/116176/ Trove Documentation =================== This repository also contains the following OpenStack manual: * Database Services API Reference Prerequisites for Building the Documentation -------------------------------------------- `Apache Maven `_ must be installed to build the documentation. To install Maven 3 for Ubuntu 12.04 and later, and Debian wheezy and later:: apt-get install maven On Fedora 15 and later:: yum install maven3 Building -------- The manuals are in the ``apidocs`` directory. To build a specific guide, look for a ``pom.xml`` file within a subdirectory, then run the ``mvn`` command in that directory. For example:: cd apidocs mvn clean generate-sources The generated PDF documentation file is:: apidocs/target/docbkx/webhelp/cdb-devguide/cdb-devguide-reviewer.pdf The root of the generated HTML documentation is:: apidocs/target/docbkx/webhelp/cdb-devguide/content/index.html Testing of changes and building of the manual ---------------------------------------------- Install the python tox package and run ``tox`` from the top-level directory to use the same tests that are done as part of our Jenkins gating jobs. If you like to run individual tests, run: * ``tox -e checkniceness`` - to run the niceness tests * ``tox -e checksyntax`` - to run syntax checks * ``tox -e checkdeletions`` - to check that no deleted files are referenced * ``tox -e checkbuild`` - to actually build the manual tox will use the `openstack-doc-tools package `_ for execution of these tests. openstack-doc-tools has a requirement on maven for the build check. trove-5.0.0/releasenotes/0000775000567000056710000000000012701410521016476 5ustar jenkinsjenkins00000000000000trove-5.0.0/releasenotes/notes/0000775000567000056710000000000012701410521017626 5ustar jenkinsjenkins00000000000000trove-5.0.0/releasenotes/notes/use-osprofiler-options-58263c311617b127.yaml0000664000567000056710000000021712701410316027072 0ustar jenkinsjenkins00000000000000--- other: - Starting with 1.0.0 osprofiler release config options needed for its workability are consolidated inside osprofiler itself. trove-5.0.0/releasenotes/notes/couchdb-backup-restore-0cc3324c3088f947.yaml0000664000567000056710000000011112701410316027117 0ustar jenkinsjenkins00000000000000--- features: - Support has been added for CouchDB Backup and Restore. trove-5.0.0/releasenotes/notes/couchdb-user-db-functions-fa41ac47fce095cb.yaml0000664000567000056710000000012212701410316030114 0ustar jenkinsjenkins00000000000000--- features: - Support has been added for CouchDB database and user functions. trove-5.0.0/releasenotes/notes/mariadb-gtid-replication-1ea972bcfe909773.yaml0000664000567000056710000000052312701410316027600 0ustar jenkinsjenkins00000000000000--- features: - Implements replication based on GTIDs for MariaDB. Adds GTID replication strategy for MariaDB. Implements MariaDB specific GTID handling in guestagent. Configures MariaDB config template to support bin logging. Adds MariaDB helper overrides to eliminate configuration group tests from scenario tests. trove-5.0.0/releasenotes/notes/fix-apply-configuration-on-prepare-4cff827b7f3c4d33.yaml0000664000567000056710000000071512701410316031647 0ustar jenkinsjenkins00000000000000--- fixes: - If given, apply the configuration overrides in prepare, just before creating initial users and/or databases. Failure to apply the given configuration should flip the instance into a failed state. Default implementation saves the overrides and restarts the database service to apply the changes. Datastores that do not require restart may potentially override the base implementation in 'apply_overrides_on_prepare()'. trove-5.0.0/releasenotes/notes/improve-mysql-user-list-pagination-71457d934500f817.yaml0000664000567000056710000000017112701410316031337 0ustar jenkinsjenkins00000000000000--- fixes: - Filter ignored users in the original query before the result gets paginated (like in list_databases). trove-5.0.0/releasenotes/notes/implement-cassandra-clustering-9f7bc3ae6817c19e.yaml0000664000567000056710000000024212701410316031127 0ustar jenkinsjenkins00000000000000--- features: - OpenStack Trove now supports clustering for Cassandra datastores. You can access clustering capabilities through the Trove cluster API. trove-5.0.0/releasenotes/notes/fix-trove-events-8ce54233504065cf.yaml0000664000567000056710000000012612701410316026013 0ustar jenkinsjenkins00000000000000--- fixes: - Generate trove events for the current period, and not a future period. trove-5.0.0/releasenotes/notes/add-cors-support-fe3ecbecb68f7efd.yaml0000664000567000056710000000004312701410316026610 0ustar jenkinsjenkins00000000000000--- other: - Added CORS support. trove-5.0.0/releasenotes/notes/percona-2.3-support-2eab8f12167e44bc.yaml0000664000567000056710000000047412701410316026452 0ustar jenkinsjenkins00000000000000--- features: - Support has been added for Percona XtraBackup version 2.3. fixes: - Fixes bug 1558794. The 2.3 version of Percona XtraBackup performs some additional validations of the command line options passed to innobackupex. The Trove code now complies with the new validations being performed. trove-5.0.0/releasenotes/notes/.placeholder0000664000567000056710000000000012701410316022101 0ustar jenkinsjenkins00000000000000trove-5.0.0/releasenotes/notes/fix-mongo-cluster-grow-8fa4788af0ce5309.yaml0000664000567000056710000000025612701410316027304 0ustar jenkinsjenkins00000000000000--- fixes: - Fixes bug 1526024, a failure in growing a mongodb cluster because of a problem in the way in which passwords were synchronized with new query routers. trove-5.0.0/releasenotes/notes/cassandra-configuration-groups-e6bcf4014a79f14f.yaml0000664000567000056710000000026612701410316031137 0ustar jenkinsjenkins00000000000000--- features: - Implement configuration groups for Cassandra 2.1. You can now manage configuration of Cassandra datastores using the Trove configuration groups capability. trove-5.0.0/releasenotes/notes/db2-backup-restore-96ab214cddd15181.yaml0000664000567000056710000000011712701410316026313 0ustar jenkinsjenkins00000000000000--- features: - Support has been added for DB2 Express-C Backup and Restore. trove-5.0.0/releasenotes/notes/pxc-grow-shrink-0b1ee689cbc77743.yaml0000664000567000056710000000114612701410316026005 0ustar jenkinsjenkins00000000000000--- features: - The adds support for pxc to grow a cluster. * api and taskmanager support for shrinking a cluster * validate that the networks given are the same for each instance in the cluster. * make sure to add the existing networks on an instance in the cluster. * add new Error task for grow and shrink. * nova client version configuration changed to a string option rather than an int option because the nova microversions change nova api output. This was needed for the network interfaces on existing instances. * testing for grow and shrink cluster trove-5.0.0/releasenotes/notes/secure-mongodb-instances-1e6d7df3febab8f4.yaml0000664000567000056710000000035312701410316030136 0ustar jenkinsjenkins00000000000000--- security: - Fixes bug 1507841, provides a configuration setting to enable Role Based Access Control (RBAC) for MongoDB clusters. If mongodb.cluster_secure is set to False (default is True) then RBAC will be disabled. trove-5.0.0/releasenotes/notes/module-management-66d3979cc45ed440.yaml0000664000567000056710000000062312701410316026265 0ustar jenkinsjenkins00000000000000--- features: - A new feature called 'module management' has been added to Trove. Users can now create, update, list and delete modules. A module is a file that is provided to Trove, and when a database instance is launched, that file is deposited on the guest instance. This feature can be used for depositing files like, for example, license files onto guest database instances. trove-5.0.0/releasenotes/notes/pxc-cluster-root-enable-30c366e3b5bcda51.yaml0000664000567000056710000000010312701410316027445 0ustar jenkinsjenkins00000000000000--- features: - Adding the ability to root enable a pxc cluster. trove-5.0.0/releasenotes/notes/vertica-grow-shrink-cluster-e32d48f5b2e1bfab.yaml0000664000567000056710000000032012701410316030522 0ustar jenkinsjenkins00000000000000--- features: - Implemented grow and shrink for clusters of Vertica datastore. The number of nodes in the cluster must be greater than the number required to satisfy the min_ksafety configuration setting. trove-5.0.0/releasenotes/notes/cassandra-backup-and-restore-00de234de67ea5ee.yaml0000664000567000056710000000015212701410316030507 0ustar jenkinsjenkins00000000000000--- features: - Support has been added for Cassandra backup and resture using the Nodetool utility. trove-5.0.0/releasenotes/notes/cassandra-user-functions-041abfa4f4baa591.yaml0000664000567000056710000000735212701410316030001 0ustar jenkinsjenkins00000000000000--- features: - This patch set implements the following functionality for Cassandra datastore. create/delete/get user list users change password grant/revoke/list access update attributes create/delete database list databases Notes on Cassandra users In Cassandra only SUPERUSERS can create other users and grant permissions to database resources. Trove uses the 'os_admin' superuser to perform its administrative tasks. It proactively removes the built-in 'cassandra' superuser on prepare. The users it creates are all 'normal' (NOSUPERUSER) accounts. The permissions it can grant are also limited to non-superuser operations. This is to prevent anybody from creating a new superuser via the Trove API. Updatable attributes include username and password. The configuration template had to be updated to enable authentication and authorization support (original configuration allowed anonymous connections). Default implementations used are authenticator org.apache.cassandra.auth.PasswordAuthenticator authorizer org.apache.cassandra.auth.CassandraAuthorizer The superuser password is set to a random Trove password which is then stored in a Trove-read-only file in '~/.cassandra/cqlshrc' which is also the default location for client settings. Notes on Cassandra keyspaces Cassandra stores replicas on multiple nodes to ensure reliability and fault tolerance. All replicas are equally important; there is no primary or master. A replication strategy determines the nodes where replicas are placed. The total number of replicas across the cluster is referred to as the replication factor. The above 'create database' implementation uses 'SimpleStrategy' with just a single replica on the guest machine. This is a very simplistic configuration only good for the most basic applications and demonstration purposes. SimpleStrategy is for a single data center only. The following system keyspaces have been included in the default 'ignore_dbs' configuration list and therefore excluded from all database operations 'system', 'system_auth', 'system_traces' Notes on user rename Cassandra does not have a native way for renaming users. The reason why Cassandra itself does not implement rename is apparently just lack of demand for that feature. We implement it by creating a new user, transferring permissions and dropping the old one (which also removes its existing permissions). I asked about the sanity of this rename approach on the Cassandra mailing list and IRC channel and there should not be anything inherently wrong with the proposed procedure. This method, however, requires the user to always provide a password. Additional notes Trove uses the official open-source Python driver for Cassandra to connect to the database and execute queries. The connection is implemented in CassandraConnection. It is now also used to obtain the current database status as opposed to the original method of parsing output of the client tool. The 'common/operating_system' module was extended with two new functions for reading/writing ini-style and YAML configuration files to/from Python dicts. Unit tests were added to 'guestagent/test_operating_system'. The existing Manager unit tests were extended to include the added functionality. Also includes some minor improvements to comments and log messages. Used the existing operating_system interface to update file ownership. The system module was removed and its contents moved to the Application class. This is to reduce the number of files and help facilitate overriding. trove-5.0.0/releasenotes/notes/mysql-user-list-pagination-9496c401c180f605.yaml0000664000567000056710000000032012701410316027726 0ustar jenkinsjenkins00000000000000--- fixes: - Fix bug 1537986 which corrects the pagination in the mysql user list command. When internal users (ignore_users) are eliminated from the list, the pagination was not correctly handled. trove-5.0.0/releasenotes/notes/implement-mariadb-clustering-088ac2f6012689fb.yaml0000664000567000056710000000024012701410316030415 0ustar jenkinsjenkins00000000000000--- features: - OpenStack Trove now supports clustering for MariaDB datastores. You can access clustering capabilities through the Trove cluster API. trove-5.0.0/releasenotes/notes/vertica-configuration-groups-710c892c1e3d6a90.yaml0000664000567000056710000000012612701410316030465 0ustar jenkinsjenkins00000000000000--- features: - Implemented configuration groups capability for Vertica datastores. trove-5.0.0/releasenotes/notes/fix-bad-swift-endpoint-in-guestlog-05f7483509dacbbf.yaml0000664000567000056710000000043412701410316031530 0ustar jenkinsjenkins00000000000000--- fixes: - The guest log code raises a non-serializable exception if the given Swift endpoint is invalid. This causes an ambiguous "Circular reference detected" error on the guest, and a timeout on the caller. This case is now caught and the correct exception raised. trove-5.0.0/releasenotes/notes/vertica-load-via-curl-call-4d47c4e0b1b53471.yaml0000664000567000056710000000070212701410316027643 0ustar jenkinsjenkins00000000000000--- features: - Vertica comes with a User Defined Load function that takes a URL as a load source. This can be used to load files that are stored in Swift. As this is a common use case, it is valuable to enable this by default. This can be done in the post-prepare method for Vertica. A new UDL_LIBS list has been added that describes any UDLs to be loaded into the database. This change only has one entry - the curl function. trove-5.0.0/releasenotes/notes/drop-python-26-support-39dff0c5636edc74.yaml0000664000567000056710000000006612701410316027247 0ustar jenkinsjenkins00000000000000--- deprecations: - Dropping support for python 2.6 trove-5.0.0/releasenotes/notes/mongo-cluster-grow-use-az-and-nic-values-207b041113e7b4fb.yaml0000664000567000056710000000025012701410316032405 0ustar jenkinsjenkins00000000000000--- fixes: - Mongo cluster grow operations were not creating instances with the provided az and nic values. These should be used if the caller provided them. trove-5.0.0/releasenotes/notes/dbaas-ceilometer-notifications-5a623d0d6520be72.yaml0000664000567000056710000000024712701410316030711 0ustar jenkinsjenkins00000000000000--- features: - Additional Ceilometer notifications have been provided by Trove including create, end, error notifications for all state-changing API calls. trove-5.0.0/releasenotes/notes/implement-cassandra-root-b0870d23dbf1a848.yaml0000664000567000056710000000021412701410316027632 0ustar jenkinsjenkins00000000000000--- features: - OpenStack Trove now supports superuser access for the Cassandra datastore via the root-enable and root-disable API's. trove-5.0.0/releasenotes/notes/datastore-manager-refactor-5aeac4e6bfa6e07b.yaml0000664000567000056710000000007212701410316030424 0ustar jenkinsjenkins00000000000000--- other: - Refactor the datastore guest manager code. trove-5.0.0/releasenotes/source/0000775000567000056710000000000012701410521017776 5ustar jenkinsjenkins00000000000000trove-5.0.0/releasenotes/source/index.rst0000664000567000056710000000017312701410320021635 0ustar jenkinsjenkins00000000000000====================== Trove Release Notes ====================== .. toctree:: :maxdepth: 1 liberty unreleased trove-5.0.0/releasenotes/source/_templates/0000775000567000056710000000000012701410521022133 5ustar jenkinsjenkins00000000000000trove-5.0.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000012701410316024406 0ustar jenkinsjenkins00000000000000trove-5.0.0/releasenotes/source/unreleased.rst0000664000567000056710000000016012701410316022656 0ustar jenkinsjenkins00000000000000============================== Current Series Release Notes ============================== .. release-notes:: trove-5.0.0/releasenotes/source/liberty.rst0000664000567000056710000000022212701410316022200 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty trove-5.0.0/releasenotes/source/conf.py0000664000567000056710000002153612701410316021306 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Trove Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Trove Release Notes' copyright = u'2015, Trove Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from trove.version import version_info as trove_version # The full version, including alpha/beta/rc tags. release = trove_version.version_string_with_vcs() # The short X.Y version. version = trove_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'TroveReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'TroveReleaseNotes.tex', u'Trove Release Notes Documentation', u'Trove Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'trovereleasenotes', u'Trove Release Notes Documentation', [u'Trove Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'TroveReleaseNotes', u'Trove Release Notes Documentation', u'Trove Developers', 'TroveReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False trove-5.0.0/releasenotes/source/_static/0000775000567000056710000000000012701410521021424 5ustar jenkinsjenkins00000000000000trove-5.0.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000012701410316023677 0ustar jenkinsjenkins00000000000000trove-5.0.0/doc-test.conf0000664000567000056710000000037212701410316016402 0ustar jenkinsjenkins00000000000000[DEFAULT] repo_name = trove api_site = True # From apidocs file_exception = includewars.xml # These two options need to come as triplets: book = apidocs target_dir = target/docbkx/webhelp/cdb-devguide-external publish_dir = api/openstack-databases trove-5.0.0/README.rst0000664000567000056710000000170012701410316015474 0ustar jenkinsjenkins00000000000000Trove -------- Trove is Database as a Service for OpenStack. ============================= Usage for integration testing ============================= If you'd like to start up a fake Trove API daemon for integration testing with your own tool, run: .. code-block:: bash $ ./tools/start-fake-mode.sh Stop the server with: .. code-block:: bash $ ./tools/stop-fake-mode.sh ====== Tests ====== To run all tests and PEP8, run tox, like so: .. code-block:: bash $ tox To run just the tests for Python 2.7, run: .. code-block:: bash $ tox -epy27 To run just PEP8, run: .. code-block:: bash $ tox -epep8 To generate a coverage report,run: .. code-block:: bash $ tox -ecover (note: on some boxes, the results may not be accurate unless you run it twice) If you want to run only the tests in one file you can use testtools e.g. .. code-block:: bash $ python -m testtools.run trove.tests.unittests.python.module.path trove-5.0.0/setup.py0000664000567000056710000000200412701410316015515 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) trove-5.0.0/apidocs/0000775000567000056710000000000012701410521015427 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/pom.xml0000664000567000056710000001642112701410316016752 0ustar jenkinsjenkins00000000000000 4.0.0 com.rackspace.cloud.dbaas dbaas-docs 1.0.0-SNAPSHOT Database Public API Spec jar net.sourceforge.saxon saxon 9.1.0.8 com.rackspace.cloud.api wadl-tools 1.0.9 com.rackspace.cloud.api clouddocs-maven-plugin 2.0.2 cdb-devguide generate-webhelp generate-sources src false 2 0 UA-23102455-4 cdb-devguide.xml http://docs.openstack.org/api/openstack-database/content mike.asthalter@rackspace.com 1 mike.asthalter@rackspace.com openstack org.apache.maven.plugins maven-dependency-plugin unpack-shared-resources unpack-dependencies generate-sources ${project.build.directory}/generated-resources com.rackspace.cloud.api wadl-tools true **/xsl/*.xsl org.apache.maven.plugins maven-antrun-plugin 1.5 ant generate-sources run org.apache.maven.plugins maven-assembly-plugin 2.3 includewars.xml assembly single package Rackspace Research Repositories true rackspace-research Rackspace Research Repository https://maven.research.rackspacecloud.com/content/groups/public/ rackspace-research Rackspace Research Repository https://maven.research.rackspacecloud.com/content/groups/public/ trove-5.0.0/apidocs/replacements.config0000664000567000056710000000022712701410316021303 0ustar jenkinsjenkins00000000000000XPATH=//text() accountId->tenantId accountID->tenantID account ID->tenant ID ord.databases.api.rackspacecloud.com->openstack.example.com .*Repose.*\n->trove-5.0.0/apidocs/.gitignore0000664000567000056710000000000012701410316017407 0ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/0000775000567000056710000000000012701410521016216 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/images/0000775000567000056710000000000012701410521017463 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/images/Choose_CS_Image_CCP.png0000664000567000056710000006064412701410316023601 0ustar jenkinsjenkins00000000000000‰PNG  IHDRoî®Ë? iCCPICC ProfilexÕ–w\ÙÇï̤ZBèj轤H·5Bè`EWp-¨ˆ€² KW¥ˆ ±`alØ7È" ¬‹,¨¼ |>»ûÞçíïŸwîçÜûs9sÿø@þÈMKKeHfŠB¼Ý˜‘QÑLü€5`syi®AAþàmö>ºµ;&’Zÿ¸í¿'dùq<  4ËÏॢ| eK^š(å ”s2ÓP†»P¦‹ÐD¹_‚EK8v‘ß/ì q!@ p¹"d:gfóh²5ÊæB~¢e>Êμ.º’ëQ6NM]+áA”õcÿVGð7ærcÿ¬Éå þäÅoAïD쑘‘–ÂÍ[¸ø_N©)Yh¿LÒuJœ0l9º2PWÀø£ƒ ‚€%:,Ðá@f\.úݸ¯MË% 2™®èŸŠ3fú y¦ÆLKs KIúÿÆ$gtñeß=X8{ƒðWl z6¬ÍÑ3¢óWL˜À™nHnÅô÷ ƒöíêU^–({±F²` H:Pê@è´“¶ÀpÐî. DÕ€@*°lÅ ìû@%¨‡A8N€Np\WÁM0îÇ@ ÆÀ+0 fÁAxˆ Ñ %HÒ…Œ Kˆ 9Cž?EA1BYÐ:h T •A•P-Ô ý †.Bס!è!4MBo¡Ï0S`:¬ëÁf0v…ýàPx,€Óá|¸ÞWÀuðQ¸¾ß„ïÁbø<ƒ„Œ0MÄa#îH Ä#"dR‚”#uH+Òô!w12…|Âà04 c‚qÄø`Â0-/'o-.Ÿ+_%N^Ì@z _F c'ãã>㳂š‚«BœÂ6…V…Û U9ŠqŠ%ŠmŠ÷?+1•<•’•v+u*=UÆ(*+ç(R¾¢<¥BWqTá©”¨œPy¤ «ª†¨¨VíWQSWóVKS; vImJ¡ÎQORß«~^}Rƒ¦á¬‘¨±Wã‚ÆK¦<Ó•™Â¬`^fNkªjúhfiÖjhÎi±´Â´ µÚ´žj“´ÙÚñÚ{µ{µ§u4ttÖé´è<Ò%ê²ut÷ëöé~ÐcéEèmÕëÔ›`)²|Yù¬Ö}ª¾‹~º~þ]œÛ Ùà Á !lhc˜`XexË6²5J4:h4dŒ5¶7×›PL\M²MZLFL¦þ¦…¦¦¯ÍtÌ¢Ív›õ™}3·1O1¯7l!g±Ô¢Ð¢Ûâ­¥¡%ϲÊò®ÕÊËj£U—Õk#ë8ëCÖlh66[mzm¾ÚÚÙŠl[m'ítìbìªí†Ùtv{;ûš=ÖÞÍ~£ýYûO¶™'þp4qLv<â8±„µ$nIý’Q'-'®S­“Ø™éãü£³ØEÓ…ëRçòœ£Íás8ã®®I®G]_»™»‰ÜÚÝ>¸;¸¯wïñ@<¼=J<<å<Ã<+=Ÿyiy ¼Z¼¦½m¼ ¼{|°>~>»}†}Õ|y¾Í¾ÓKí–®_zÙâ·Ü¯Òﹿ¡¿È¿;X°'àÉ2ÝeÂe Ð7pOàÓ VPzЙ`\pPpUð‹‹u!}ËiË×,?²|6Ô-tgèã0ý°¬°Þpéð•áÍá"<"Ê"Ä‘f‘ë#oF)G%FuEã£Ã£¢gVx®Ø·bl¥ÍÊâ•÷W±V宺¾ZyuÊêsk¤×pלŒÁÆDĉù äÖqgb}c«c§yî¼ý¼W|/2Î)®,n<Þ)¾,~Bà$Ø#˜LpI(O˜JtO¬L|“ä“T“ô!90¹1y>%"¥-•“zZ('L^^«¾6wíPšQZqš8Ý!}_ú´ÈOÔe¬Êèʤ£b ?K?뻬‘lçìªì9á9'ses…¹ýy†yÛòÆó½ò*Àð z×i®Û¼nd½ëúÚ Ð†Ø ½µ7mÛ佩i3isòæ_ Í Ë ßo‰ØÒ]¤V´©hô;ïïZŠ¥ŠEÅÃ[·Ö|ù>ñûmVÛlûVÂ/¹Qj^Z^úe;oû,~¨øa~Gü޶;íÂíÛewS™lY~Ù螀={™{Kö¾ß·fßõrëòšý¤ýYûÅþ]tì:ð¥2¡ò^•[U[µjõ¶êùoâj­Q«)­ùücâj½k;êôêÊãg~Q^ß÷û§æå†Ò†¯ÂFqSHÓåf»ææ#ªGv¶À-Y-“GW<æq¬«Õ¤µ¶ÑVzÏ:þò瘟ïŸð;Ñ{’}²õ”î©êvZ{IÔ‘×1Ý™Ð)îŠê:½ôto·cwûÓ3g5ÏV“?·ó<é|Ñùù ùfzÒz¦. .Žö®é}|)òÒÝËÁ—®ø]¹vÕëê¥>×¾ ל®½îpýô öΛ¶7;úmúÛ±ù¥}Àv ã–Ý­®AûÁî¡%Cço»Ü¾xÇãÎÕ»¾woÞ[voè~ØýÃ+‡Åø&¦<|ó(ûÑÜãMO°OJžÊ<-¦ú¬îWƒ_ÛĶâs##ýÏ—?<Ê}õ[Æo_ÆŠ^P_”kŒ7OXNœôš|¹â娫´WsSÅ¿Ëþ^ýZÿõ©?8ôOGN½½™»ýÒ»Æ÷Öï{g‚fžÍ¦ÎÎ}(ù¨ô±éûSßçˆÏãs9_ð_*¾|íþæ÷íÉ|êü|WÄ]Ð:Ãññ¼m€ Õ„$©E ¹°ZÔ½(K”˜Ä%ö¼¨32¶Ôn Š@ê5¨³R=¡Ê°•ÕŸŽf$–oµ(Î r'*MÊççßE€7àëðüü\çüü×Të> gvQ»Jî’9 ‡keaã½`ƒ$ðoö/†û¸–Bs¬ IDATxíœÕÙ‡Š(¨ˆˆˆ€4Vì½[ÔØ[¬1‰ú©IŒšØ{‰½·Øk4±w+bGÁŠØA¥)"ê7Ï‘s3{¹»{ïî]ØeŸ÷÷»Ü¹3gΜyf™™ÿyßóž™~Î,h€$  H@€$Ь Ìܬ[gã$  H@€$  H P¼ù‡  H@€$  H P¼µ€‹d%  H@€$  (Þü€$  H@€$Ð(ÞZÀE²‰€$  H@€oþ H@€$  H@ho-à"ÙD H@€$  H@Š7ÿ$  H@€$  ´Š·p‘l¢$  H@€$ Å›€$  H@€ZÅ[ ¸H6Q€$  H@@›Æ 2dHxõÕWc‹,²HXyå•SûJ@€$  H@@-%Þ ®¼òÊXõV[m¥x«²«%  H@€$  4–€a“%èþ€$  H@€¦ÅÛ4€ì!$  H@€$  4–@£Â&k;øÈ‘#øqãâæþýû‡Ÿþ9 6,~úôé_|ñ0묳Æí”{ùå—cùe–Y&tëÖ­dµß|óMxå•W—_~ë›gžyå;wî\²ü¤I“ÂСCÃ{ï½z÷î–Xb‰Ð¶mÛðÖ[oÅýgŸ}ö@[ò6yòäðÎ;ï„·ß~;Ì1Çq|=zô3Í4S¾XaùÃ?ŒõñűüüóÏ–]vÙЦM“`-× H@€$  H õh•qÞyç…Gy$Ò¼á†ÂQGEQÂÛ·oßp饗†^x!sÌ1áûï¿O›Â–[nŽ8âˆÂo.¿üòpÅWYÞUtPØyçó«£HüÛßþWɺtéÎ=÷ܰ×^{Åã!*i[²wß}7~øá5ÚɶUVY%{챡S§N©h?~|l÷c=VX—æwÞpæ™gF±˜Öù- H@€$  H ±šeá³Ï> »í¶[A¸á•ãƒ=óÌ3q[^8žrÊ)…c"êVXa…°À Äò£GŽbuâĉñ·ÿH@€$  H@¨&o¬o¼1ÜsÏ=ᢋ.*´1E8ã<î»ï¾°Í6Û¶=ùä“…ågŸ}¶byÁÄÜã?6ß|óB¦,HvÙe—EÏ¿—Zj©XÿwÞn¾ùæÐ®]»ðÓO?Å¢´+Ù9眒ØÚzë­c[iï[l‹|úé§á?ÿùO\&´òÑGË /¼pxðÁãyÝu×]a‡vˆaœÝ»wo¾ùfªÞo H@€$  H@&ÐäâFD¶üòËŽχz(|õÕWáàƒ÷ß Ï\n¹å ûº  H@€$  H ±šdÌ[¾QÅIAð~%/W¿~ý EçœsÎÂ2Þ­¼úÈ83ƽ½ôÒKá7Þ($D¡Ü?þ‹³!kÑEËéŸ%—\2-¾I®’¼p´áª«®*lcÄ%&Lxß0„ãlÅ$û!Öø`‹-¶XXk­µÂ¶Ûn[qƒÿH@€$  H@h$&oiìXj'â'B.Y~}ZÇ÷çŸ=tÇ/¬Æ+FÆÊš˜*®¾ÆTóÍ7_8þøã§*“VÞ‰!Üpûî»o4hPÌšùüóϼ~ˆ@©üóŸÿL»ú- H@€$  H Qš\¼5¦uxÝW†‘1’1eI¨¥ðH¶å…Âîõ×_guÌZÉ6ŒD%xÄŠä"„erÉrÌG"Œ±w·ÜrK`¼Ó ÞðÔ‘ …l–Œ×#D2×CÈpÀqß”í2þð H@€$  H@$ЬÅ[Ê É9"ÖëF(⫯¾ç‰KçN¸d22>¦yâ˜Kޱj/2T2x±J¹é¦›†;î¸#nb€ßÿþ÷¡W¯^q¾¶4žm»í¶ ‡rHṫ–‰·nÕUWS0~.Y~<_Zç·$  H@€$ †hÖâ¹Ó–Xb‰(Úr»ï¾{L‚G,?–Y2’‰0½ãÑ0Rø'Ûd“MâTüN<–É*‰7mĈáÅ_ŒÖ'Cˆ¾‰áÙC ÞtÓMq|Þ@æÃs—Ä&ž¼ýöÛ/íî·$  H@€$ Fhò©ÛBø zåg,Î6ùÚk¯…=öØ#î‚(;ÿüó‹wãç>þøã8ž?’ Ôf =zẗÉx8Æå•ÊhYÛþ®—€$  H@€$P.f6™? ÄŸúìòË/ó®ºÈø8ƽ‘Õ¡ÅdÚÉÒÄáéwú&œ²Gégß”%Á M€$  H@@Sh1ž·r!<øàƒáïÿ{¡8ó²õìÙ3à‘K‰M/wë­·ÆpÉBA$  H@€$  4c3œxƒõÅ_®½öÚh§k€—ì¯ýkXo½õÒ*¿%  H@€$  4{3¤xƒú7ß|ž{î¹8wÛØ±cch#cÒHvÒ®]»fal $  H@€$ <V¼åOÒe H@€$  H@-@³Ÿ* ¥¶ý€$  H@€ªA@ñV ŠÖ! H@€$  H ‰ (Þš°ÕK@€$  H@¨Å[5(Z‡$  H@€$ && xkbÀV/ H@€$  H oÕ h€$  H@€š˜€â­‰[½$  H@€$ jP¼Uƒ¢uH@€$  H@hbŠ·&lõ€$  H@€ªA@ñV ŠÖ! H@€$  H ‰ (Þš°ÕK@€$  H@¨Å[5(Z‡$  H@€$ && xkbÀV/ H@€$  H oÕ h€$  H@€š˜€â­‰[½$  H@€$ jP¼Uƒ¢uH@€$  H@hbŠ·&lõ€$  H@€ªA@ñV ŠÖ! H@€$  H ‰ (Þš°ÕK@€$  H@¨Å[5(Z‡$  H@€$ && xkbÀV/ H@€$  H oÕ h€$  H@€š˜€â­‰[½$  H@€$ jP¼Uƒ¢uH@€$  H@hbŠ·&lõ€$  H@€ªA@ñV ŠÖ! H@€$  H ‰ (Þš°ÕK@€$  H@¨Å[5(Z‡$  H@€$ && xkbÀV/ H@€$  H oÕ h€$  H@€š˜€â­‰[½$  H@€$ jP¼Uƒ¢uH@€$  H@hbŠ·&lõ€$  H@€ªA MC+Bn—€$  H@€$Ð (ÞšÁE° €$  H@€ê# x«Û%  H@€$  4Š·fpl‚$  H@€$ ú(Þê#äv H@€$  H@Í€€â­\›  H@€$  H >Š·ú¹]€$  H@@3  xkÁ&H@€$  H@¨€â­>Bn—€$  H@€$Ð (ÞšÁE° €$  H@€ê# x«Û%  H@€$  4Š·fpl‚$  H@€$ ú(Þê#äv H@€$  H@Í€@›i݆>ú(üðÃ5Û¶mÛ0÷Üs‡¹æš«ÆújüøòË/øqãBÇŽC§NªQe³®¾óÍ7_˜}öÙk´sòäÉaìØ±%üüóÏq[ûöíì³ÎZc¿Ï?ÿ<°¾)®MÕòãûï¿~øa ;w½{÷íÚµ«¥ô´_=zôè0~üøÐ¡C‡0Ï<óÔÙ€Ï>û,òçï=o?þøc¬ƒÿɾûŽk©I@€$  HÓ\¼ýßÿý_9rdIú §œrJèÚµkÉí YyÁ„»ï¾;ì¹çžaß}÷mH-fŸ¯¾ú*¼øâ‹aË-·œªÍO<ñDøæ›oÂo~ó›ÛFŒ؆X˜e–YB·nÝÂ:ë¬f›m¶Xî§Ÿ~ŠÛ7ÝtÓûM‹wÜqG¸è¢‹â&"rŸ}ö ¿úÕ¯Òªéú}Í5ׄ{ï½7l·Ýváw¿û]­m3fLxã7ºë®;U®Âzƒ 6(lCà½ôÒKaÍ5×lVbµÐ@$  H@€¦9é6Ù§OŸ°òÊ+ÇϲË.…Ã믿>à ¬¦¼ÂÏ?ÿ|X~ùåÃL3ÍT8 ^Î'Ÿ|2¼ûî»…uiAñðÃGOÝ6Ûl¶Új«€X{ä‘Gâ7åX`xmðèMK4hP8ÿüóã!ùûøõ¯½nxQÏ<óÌðæ›oNËæ4úX¯½öZXb‰%j\¼¡ƒ¥Ø"¤Yd‘Àÿ M€$  H@˜æž·„}§vŠb!ý~æ™g^9uéÒ%®ç¢B|x £Cü”jO:'ê(H;Þ{ï½þ‹Ruä Ú£dxÚð a´ƒ:óFñj­¶Új+®¸b¸í¶Û¢ èÕ«W,¾Øb‹…çž{.ôèÑ£†øÈ×UíågŸ}6V¹É&›„8 .svÛm·@(ìSO=_|ñ‡M×¥{÷î1 —$®–k$T>!ˆ% KxK…Ùò·†Øbÿüßhܹ–¾øâ‹XoÞ£L›ðxb„F_Ös-_y啨NÂ~5 H@€$ ÖM`º‰·bìˆ ‘Â+ìå—_Çw\UqEöÏ¢‹.Î>ûì8þ)•9ꨣÂ'Ÿ|‹0nh³Í6 úÓŸÒ.5¾/»ì²pÉ%—DÆ2â骫® —^zihDÇÆðŽì¿ÿþa…Vˆ/õwÝuW\Ï õ‰'žVZi¥ø{âĉáÏþs wC4`ˆCÊ 0ÃñÇüñø»M›6a™e– 'Ÿ|rA\à ;á„â˜' !$ÙgàÀqŸRÿb‡73oß~ûm³K-µTl‚3oJ°lc\"•1fI¼!Ú}ôÑ(&Zh¡|M¶œÚ„×°ÿþa•UV‰‚Œk‡pÊ ^¼S§Ÿ~zø©Aˆx˜1öï·¿ýmàÚœ{î¹!Š 2$zè¡ñüo¸á†(Ð/¾øâpçw¦*¢óC)\Dï¿ÿ~,‹¼ ,ìX´€—A™7:à˹±AZlJßСCãùo÷·$  H@@ë"P·‹ª Yàɹþúëõ×^xiþË_þ†@B@à½JÂmÇw x`|~ë­·â>ÆÓrÐAEáF¸à^{íCÔûßÿþwªÖ#ºxùçÅŸy„á„„ç!bŽ<òÈpì±ÇF«‡z¨F/¼ðB AÜe—]â¾±Ã;,~ˆq„."Æþú׿†ÕW_= °“N:©P¢ áÆK9íÅ{ʧüã± íùûßÿÏŸPF>xé…é8…ʦ, fxùO8mgü<Óøµ´>}Ï1Ç‘!Â&õàáʯCD K Œ´_µ¿·Øb‹(*a|Æg„m·Ý6ì·ß~Q\áýJ‰UðX%áÆx>ÆÁ¯â-·Ü8GÆðaùëùÀÄum´QôºÝ|óͱn¼`»ï¾{@ðr]Î9çœXŽN;í´(Ü`¿5ÊÔe\®_qâê`Œgqò’âºØ/yŠ‹·ù[€$  H u˜nž7¼K|’áiᥙpJŒ—sÆ6‘¡±q„÷±ÌK6^! / ᎄ—!œ°%—\2¾P ¼gW_}uôì!(‡¥cÅ¢//å„Wâ™!¼°9^âñ|=öØcñ¸Ôƒ×aJR„$¢éÆoŒãÍC «Ûo¿=І¸2÷¡~´¡R‰Ñ¾W_}5 RÆ•Q±ÆrÞ’Ð˯kÊåùçŸ?rÁ+JH-brذañsß}÷ÅÄ6x¹Nˆ|„%žIÚ o’Œ5*6‘d+ìû?þñ‘óÓO?·m¼ñÆñûÖ[oßxXW]uÕ°õÖ[GáL9®"†ÀçzqíH“º±@îþ¦›É›œÛTÖ"ûñ·ÉßF¬eíh! H@€$ ŽÀto¼ô‰ Á›F x2ö¥qF„O"€Ä <¡qé%9 RÈcùpB¼]|бCÉò =qˆ(„‚ ÃsC6Ã=öØ£† ¢=3 ƒW‹p”p‚¶#´$Œ1‰„/ð|ðAÜ—1V„õaÄÃ?<.óO*Ãù"4°WæþÁ‰¥PÃܦ:çwÞ°öÚkGÉq9'Dm-öQ7ãꦥá9Ä ‰p"á^M2‡"Þ¯¼òÊè)M'Œ‘»é¦›âßRñß ‰?[ï¼óN ‚Ï.7x+ðˆ, o“<0BrÓ˜4®W e;uyß’·´1â6 й^š$  H@@ë%0ÝÄ™÷ÈnÈØ²]wÝ5Š3Rù_wÝuqœ"ˆÐ4¼mˆ±ƒ>8Фä=ã’¥$x_’¥±fùuiõà ÃóFh%/ߌ‰ºâŠ+bb<3Œ…â˜F¶'Ex@éEo–Ž…q%ÜQH¸#F[S{ùÎ H„Dò"1BGûöí—Ów|qeîŸ$2’¨ÍmªwQCýˆŽM]¤´çòFÝé8ùõM±Ì5$¼oÓ,½ôÒQh!¶H˜GÚˆ!Žð–ámÃëÉß"mž1ã Ù±˜ð·‡ñ7À¹qŽŒMKs䥄$pI"ŒëOu×Ç<1«¯\lH‰Ò~©žE\% H@€$ÐJL·1o‰/Bу Áûà 6†‡…—ƒuxêÒ‹,ž!,‰<IHþ¶ÆkÄжXhÊ?Œ‡B´‘Ô 1¼~9ög/÷ˆŒ”ùyCT0V£%6!ñ ãæøà¡„ÃrË-GUSBcÿJ qÄùÒ6!˜à„rñ>üŒ=ºAÞ­|]ðæï­®ëÇø@¬ÜPF„>Cx]ëú›È·ƒe®Éý÷ß§: =•“¼O‹ÿ£•¶Ëò€$  H@ #Ð,Â&ëj:!„õ 7ö¯4Ób©c"zêòn¥}µ‰Ê”‡×«.áF=„Ë•kŒÉ‚"«6¯c]u¥$,Åe $œeC„[q} ùM”ȥ¶ýù;)‡{mû§õ$"©¯žrE[ª“ë‚×’9üðÖ•kxIšÒáVî1,' H@€$Ðr4›°É–ƒ¬ù¶”ùðo)aG5ZŠà@<¤„Õ¨³5ÖAÆLX–{mðãMMSS´Ffž³$  H@@MÍÞóV³¹ÓçãáO7½/ H@€$  ´Š·Ör¥=O H@€$  H EP¼µèËgã%  H@€$ ÖB@ñÖZ®´ç) H@€$  ´hŠ·}ùl¼$  H@€$ÐZ(ÞZË•ö<%  H@€$ M M‹n½o1Þ{ï½ÓÖ©¡ƒž‘NÇs‘€$  H@3–}NŠ·²QY°¡*ùƒlè1ÜO€$  H@-@¥í†M¶´+l{%  H@€$ VI@ñÖ*/»'- H@€$  ´4Š·–vÅl¯$  H@€$Ð* (ÞZåe÷¤%  H@€$ –F Õ',ùòË/ö(á IDAT?üæž{î0Çs®ßO?ý>ûì³ø»[·n…õ.H@€$вðLÿúë¯Ã¤I“B‡j<ï[™ÐþñãÇÇvÏ2Ë,S59mÏo˜uÖYÃl³ÍfžÙ~ú<—%ÐÒ ´zñvþùçG‘¶ð ‡ƒ>¸p=¿ýöÛpì±ÇÆß^xa(u³,vA€$ fG€ÎÙ^x!>ç8ØÏ?ÿºwïVZi¥&¶s¬wÞy'vwéÒ¥Á|&L˜xà°îºë†yçwªz¾û½xÃL3Ízõê–Xb‰Ð¾}ûâ͵þ®V»k=€$ hõâ-‘>|xxæ™gÂ*«¬’Vù- H@@ %0yòäðÔSO…‰'†VX!Ì?ÿüñL>ýôÓ@jî7Þx#,µÔRMzv´áå—_«®ºj“'U¾ôÒK‡X þ伿ùæ›ðÚk¯ÅïõÖ[¯l/Ü´nwj¿ß@ýô¥çÝzë­Þ­úlìØ±I§ aÈ=|lㆉ5*|üñÇ!ßÛÇïÑ£Gçw+,³ÿˆ#jô6º  H@@Ù-_}õUì”]pÁCÛ¶mãOÔâ‹/† E òÜæYMÔ Ÿ¼ñlæ¹]¼>•ÁKÅ»Ï|Êðãûûï¿ËÔÁ'o”¥Þâõ© û§PÏ´®¾ïÙgŸ=Ì5×\ñ3ß|ó¢ŠÖZk­øn2tèл7U»kÄ@Õ èy›‚”°HÄØm·ÝvÙe—’  }¸úê«Ã_|QØÞ³gϰÿþûLj_|1\uÕUaùå—„0Ы‡žAì›ÆÑ 0 ì»ï¾… z¯½öÚ¸ûtìØ1ìµ×^¡ÿþü,Ë~üñÇðæ›o†¾}û†víÚ•ÝÃVVå’€$  ´ ŒiG¨ñ<-6ž­º4Öýî»ï‹,²Hxë­·bÑuÖY'†'¾þúëq]›6mÞ(á–´zyvslÞ sÄ>üðÃ0dÈ((ÙÎs½¡F˜%Þ8<Ž„ObMÕÑý$ ò èy›Âjà 7ŒKO>ùdôª#¤g, 7B¶Þzëйsçè){ðÁkç&ÍÍv»í¶‹¢oÛ©§žºví6ÞxãØóÇᥗ^ŠûqC¿ì²Ëbï=d|Æ.¸à‚²Þ6Þ#úõë6ß|óx\žï1’²D››ªÝiŸûJ@åÐó6…Õb‹-ø©^ýõáÀ¬A‘µýöÛ/Þ” ·@,Žñè£Æm¾0a ”%DƒxsÄ‚ßÿþ÷±'ŽÞ/bàéuÃyä‘x3å¾ÓN;ÅuxñƯ¿ÑFÅuµý“ÚòØcÅ0 jÆãGV­Ô“WÛþ®—€$  ÌHˆ~ÁȶX®á¥Ëg—&¬’ßDØ`tØòü4hPì`%È2Ë,=m5ŒquˆÂtü¸2÷ÂŒg?Þ»yæ™'néÓ§OøüóÏ£€cù£> sÎ9gôÄQ€ã2–Îå†Zòð!>yŸiŠv7´mî' TF@ñ–ãµí¶Ûƽ#GŽŒ¢,·)Š.nÔ¯¼òJ âý÷ß=g”A<å „†Ã¸¡§t½)ãSŠ…O¡”ÔyÖYgÅòi\\>D3n(ú‡ O<ñD!܃}~øáУG€G‘‡€& H@h-xîñÌ%Š¥\c¬X2¢gÇÆw>º&=ï©—ò<Û‰x¡3—uxûx.§r©¾ôÚÇ¢É6ÞVxŨƒŽ×¼á9kŒq.¸ônÒí¶£¸1WÈ}%P>Å[Ž7Ë­¶Ú*Üpà S¥ÜÅvòÉ'Çô’K.¶ß~û8öÔ½Å7¬ OÕi[l¹ÃÓ6nÎ)KTúF€Õe„Vðà ä"oÜü äa@ØF‘ù2.K@€fD7ž©< Kž1"[k–C$õðlÇÞ@y„“u3žŽg3Q6Œw#Іqï·DÓplÞˆtÒ–ct2#DYJH†x£“1ƺ 6Ø &+A˜=ûì³q<|ìG9 !HfJÊÐYLˆ¸W_}5n¯ëŽ“†Qpžx ™€(ž$$›ªÝuµËm@õÌ”õÎü€]adBL½DîÚ¢‹ãå¢' áÔÆ b.‰IFš‚®uJ@@k%@h"áŠxÎvåÏd"lØ/  ü¾i;B©¶z9.Ûòãêî€w,{Ï×™–þ€—¯ÔqS™†~7e»Ú&÷“@k$P©¦ÒóVá_ 7Цn4¥¶øû ›iq H@€rв-æV×»ˆèªëÙ\ßvP*ú!W—pc?cSYS¶»©Úl½@Ž- H@€$  H@hþoÍÿÙB H@€$  H@zÞü€$  H@€$Ðèyk WÉ6J@€$  H@­ž€â­Õÿ @€$  H@h •m’Ô–š$  H@€$  4=F‰·Ö8Ï[Ó_  H@€$  ´ÿýïÃ?ü:vì&Nœç”d:æiœuÖYãt!|§e¦ Is=R¾k”x«ä@–•€$  H@€$0£@ŒuëÖ-tïÞ=0'4BîÛo¿BŽßI¸¥oÖýôÓOá»ï¾ #FŒ¨‡â­"\–€$  H@€$ð?ˆ7Y^¨µoß>Œ?>Џ6mÚÄm”K+KJ@€$  H@¨“!‘&Lˆ¼küNvL"¯ÎJjÙ¨x«Œ«%  H@€$  ÔG/lÒ¤IaÔ¨QQ¸ñoëð¾Í1ÇQ¸±ÜPS¼5”œûI@€$  H@­ž!Ø÷ßFŽÞxãðù矇1cÆÄqodìÔ©SèÓ§OXzé¥C—.]bød|•T¼UB˲€$  H@€r‰$ÉСCÃ!Cb¶É¹çž;ôíÛ7zÞÆŽ>ûì³0hР0lذ°öÚk‡…^¸Aᓊ·x%  H@€$  TB¡’o¾ùf dÚB$É89û쳇®]»†„áÇGq÷ÐCÅê{õêUÉabÙ_||ïæ€$  H@€$>ø Ž{c¢îvíÚÅ ºn„Lò›±o„L®°Â aܸqQÄñ]©éy«”˜å%  H@€$  L!ÀX·‰'Öð¶!ØHL‚xC¸1.Žïå—_>|úé§áƒLì}òÉ'qŸJ@*Þ*¡eY H@€$  H@9d“dÜÛ\sÍ]tÑèiûá‡Âwß}W˜”Ç´º%–X"¼öÚká믿V¼å8º( H@€$  H I ¬#›$ãÝÇ:’”0I7·4Y7Ë‹-¶XœJÑW©éy«”˜å%  H@€$  ä ÊæwÞè]c5ŽPID[ú¤0J¼p„Z6dª–ä »( H@€$  H ˆ4>ˆ³™fš)îš„¡’¬K!“©^Â,)S©U¾G¥G°¼$  H@€$ ˜@pœbp|'á†xË‹5Ö7ÄóÖ¬Â&؇ ‘ÑÉ“'.oq¬haƒ €$  H@€¦3ÄX^œ¥å$àŠ›W\¾x{m¿›xC¸½ñÆqæq÷‘}%YçÎÃJ+­\pÁ°È"‹Ä¸Ñ´­šß¤ë¤ ™0¯T;¾øâ‹(DX`R›ë\GvÚ3Ï<ó„N:•,‹À%Õh= ¿TÁ/¿ü2®žo¾ù¦ÚŒXþè£B¿~ý¦ÚVÛŠúö)·]µÕïz H@@c ð.ñí·ß–¬†±)<;˱÷ß?ðB¹Jí«¯¾Šï3Ý»wõïOØÏzzä™Ä——¹†X}Ç¡Îo¾ù&fëí"÷wÞ ={öŒ Òºô]ÎqRY¿%ÐÚ|Áÿkþ/ó%ÇwCÿçY6 ñÆËþã?|ðÁÐ¥K—пÿ(Ò˜ØC*ÍG}4¦Ý\guÂZk­•?‡ª,sœvØ!Þèo¿ýö²oêuü¼óÎ £G^xa]ŦÚvÆg„ýë_q=^Èe–Y&wÜq1{ +q±žrÊ)áˆ|æœsÎpüñLJUW]uªº˜íý·¿ýmØvÛmÞ{î9ÕvêåÁtà 7Lµ­¶µíSI»j«Ûõ€$ j8ᄳÏ>[²ª…^8Üxã%·¯ÜqÇÃa‡¶ÜrËâMµþþüóÏÃá‡^~ùå(Ìx>n¿ýöá ƒ*$32dH8òÈ#½¼äõîÝ;œxâ‰ñý§ÖŠ‹6”sœ‘#GXÐÞ)HQÎ{‰Ší‘G ‡zhøÏþ\Ú^ÎqRY¿%К ð= ´$Ú¶[*[¼¾®ßÓ}Ì õõ×_ÂmõÕWÜ$·Új«89ó$ \¶Ûn»ð›ßü&ΛððÇ·ß~;Þ€ê:±J·Q/½jxöþýïWº{ÕÊ<8Ü|óÍá¨£ŽŠ‚öòË/½vûÛß Ç¸å–[Âc=Ž8âˆðä“OFfþóŸ£®P([ ÷lÿý÷Éó–ßFo$7s`¹Vß>å¶«ÜãYN€$Ð¤ãæ™^ü9óÌ3Sm½û^uÕUaüøñg8Óˆ4ÄbŒLæ{È!‡„¾}û†»ï¾;ÜqÇ¡cÇŽq]ê­¯÷ YúŽÃË"®´t’3…cÛ=÷ÜŽ>úèâÕñw}Ç)¹“+%Ð $1VJ¨U Çtox¼ž~úéèq[n¹åÂüóÏ'«#¤Œ-|#ª …ÜÜsÏžzê©c⪃ç*«¬6ÜpÃp×]w…ây¸ O˜0!zº† Vc€!tèСqfõÚÚBÏW9¢óÚk¯!¢›nºi ±@¼î²Ë.1¤/ \pA´ë¯¿~ä³×^{ÅŽ¼èÄ{È~„aÀ,oÌèŽ ¦G’YÞ˱úö)·]åË2€$ j ‚‡¡ÅŸâá <ã‰ò!Ô²6CpÑÙœÖQª,ቼGì´ÓNñ½…6l¾ùæaÅWŒ¯ìÃ3•zvÝu×øþCX%Ñ?üq £,UoñºrŽÃsþ¥—^ t#Šqà†W^y%¼õÖ[…*ñÁ³öÚkÖ¥…rŽ“Êú-ÖNñ†‡+îˆI®±Œ¦{Ø$'ˆ;žzè*esÌ1G¼éwŽª¦¢1bD 'Ø}÷ÝãØ/ Øxã M9餓bÌ*7wn®Üd>øà(¤èIÃ5ʹ¬±Æ1ìøulìØ±áüc <‚ðPÎñ…‡¯”á +¾Ø„^rÁÛµkÃ0‘ŒLÆ6D•dìsÌ1Ç„_ýêW5΃íˆ@ÎpÊK/½4àí«ÏêÛ‡ã•Ó®úŽãv H@À´"Àsû¢‹. W_}uì0¥CyàÀ12%?NüùçŸgŸ}v|Ž#â¡$¼NæbëСC µ¸ã”ñbtPc½³I:§yþ®°Â qÏp†AÐéZŽ•sœ?ü0Ö—Ë¿ä’KÆsåxD8a¼_\sÍ5±úþûï¯qørŽScH È¿Ã‡MV Ët÷¼q’ô>qA¸ñ»¶7k$Tµ ÜyçÑs…ç›&7Ò[o½uªê¹¡m´ÑF1 ‚ØuÂnÜÀ »¼þúë£7ë¶Ûn+ìKÏÖÒK/{ÛXñœsÎ)l/^àÆÍ2½<Ö\sÍè‘d`3FïYÞ…¨dxãn¥ ¡·ß~ûU4øº¾}ÊmW©ö¸N€$Ðx_àZüIÇ"bÑBÇ)cëÿùÏÆŽÈc=6‰ßO<ñD÷öX6dï¼”2^ØèhÎ ;<` eHÏe¼q§vZ çÜ{ï½Ç÷Ýw_Ï^['vñ±Ê9ͼ[ñ;ÃÉ'Ÿó —ãw9Ç)µŸë$ÐZ ð&ïeË/WƒIi7W5j®°ndX]5NžÉ«iôºoŽ'*yËo`Ì7Ú|F÷î»ï¾…ÃS†²|0zÒðÐå/7ð?üáq;Ù2×]wÝxƒŽ+êùOÖŸþô§ØÓ—Ƽñ ÂðDæ­}ûöaܸqùUÓt¹¹¶kšBð`€$Ьбxî¹çÖhÏè”|„NW:Gé˜ÅèlELYCTY±•W^¹ ¼x_ ù‚ÐÈúìÍ7ߌ¡)¢‡wÞ1ðôÑ17q刭†Z©ãðl¦S¸ØóÎPê8Åõû[­™@^䗫Ťو7n`Œ«ë$ÙÎ ®šöÌ3ÏDOÉàˆ¥cà}#ÃT2ÄW2¼ƒÃ‡›m¶YZ¿‹3>æ÷¡!åˆ,’Œ—NØåÅ_{ñØ?…ap“Ï÷¦!ôŠÓËšk»¦+ H@ÓŸÀRK-.»ì²’ al;âŽéo ‘Œ!!‡I¼‘P-otì2VžwzÙk3ÆèóAd»dxòÎ?ÿüpÉ%—Ä0MÖãÕC4òÞÀø¸J¬¶ãðlf¨I±5ô¡¶ã×ïo ´f8¢’žIN©ô»\¦»xÃÛ…èxõÕW 7ÉÚNŒðJƒ €ª›/õ†È £nÂ8ï½÷ÞpÀz¨0zÇòÆ >]”üúür>l"¿¾®en´xø±$ÃS^”¥|wëÖ­P͘1cjü.l˜F ͵]Óèô=Œ$  ´0éù°Z»D¢ŽüØtžÇy#½?‘@u 7 {ÎT=d„Η%Q[Ÿ>} º™‰¡¤JÄ[]ÇáÙÌûAÞ83Ôuœ|ý.K 5Èë:w0þÏ¥ûM5ØÔÞ]TÚ˨ƒpI’o>ðî»ïÆ‚t’ùo&£f;JãÝ*7&¼®& Sª}Òãæ?ô”áÝâfUʸ Ó;–ÏÖD94ÓsÖPã< Ù`p1½…yáFdÈBh2x:œøMØæô²æÚ®éÅÃãJ@@ó&€ø¢”÷ „Sú°ŽädùÎWÞQòFÔâ«6Ki÷‰ á /ÜØ‡wÞ1ò†7ŒŽY™”kõ‡LÝxóÞ7:ˉ0ªä¡¾ã”Û^ËI`F'À;ypÅÿïÙV ›îâ †âFÆ ”› 1Ú„Qâ ã›Â²´º¼_”/ÒÜŒˆ;O±îù:¸áqc.•¸$•Ûyç£wîºë®‹¡ Hfðó&›l’ŠTü}ê©§Æ:u<÷Üsq5ƒ¨ù¼„‡ ÉR˜¯…¸sÂ>» ¤”öTËȾ…ç±\›Ví*·=–“€$  ÔG€¬ËŒ_#Q ºt3'SûäƒñÌ¥“”ç0s·ÑqK’‘RÆ;Ì?þñ8~ŽŽÍô ç›÷Œ!$;ýôÓ£ qÅ2ï$i\ï=L¦Íw)+ç8tŽâI{†Á>t2óÞÓ¿ÿRÕNµ®œãLµ“+$ÐJ $áV-¡V ãt›D¼q!‘ÇC={‡ð6q³¡÷ qB¨$‡n„60WI5Œ,“„&¤¿â:·Øb‹x“£—ª”ýú׿¤þEèp3¤·µÎ:ë”*^ï:¦L \ãáQl<<`ż0 jÞm·Ý¢ˆešbéÓ¸³âýòÊíJ„è´hWCÎÅ}$  H@¥lµÕVAÆør†IFɼgycLÜG;¼i¼”²x ÖÉA|òFç3™§šÁ8{:lo¾ùæREÈ$bm¡…Š»ÐaMFk¢JyÉÊ9q¢‰Ö[o½ø^ų½’¡r“?O—%ÐZ Þè„ÁÉ”Â&‹$ìÒï†~Ï”)Ãù𘛄¹PªiôtÆÈ +Ÿöž1iܼ¸éUK¸U³Ý\$âß‹'ý¬æ1JÕ…W’^ÀÚÄg©}¦ÅºæÚ®iqîC€Z^²è@åyZÛ° ^—x?éÒ¥K­eræ—½RÏr:fÉh™]CêOûàACœ¦qüi½ß@ã 0}÷<çLù•"ùÿM†xRDæÃ¢ñü“ù–¡R•hªéîyËãÂÛF¨"7ȼZ‰Mˆü>Ía™¶MkáÆyã‰,N¢Òx4×v56¶A€šÞ/ê{ŽÓkžOV­³ ƒº”.IÇp]cëJíWÛºâ1ôµ•s½$Ð8ÜO¸_$ÿXµzân|æ™gž(èþWKyKŠ·ò8YJ€$  H@@I8Æ´Í1ÇaÞyç¡“L@è$cßX‡wÑFRĹJMñV)1ËK@€$  H@È DAFâILXÇwú:É2†p#ûd¥¦x«”˜å%  H@€$  ” x'<ùø£áóÏ> cÆ|Ý‹ äë4ï|aµÕ× Ýº/º/Ø»0ЯÚ×èÇÉ“ÃÛo cÇŒ ½z÷ të^íCÔYçúÙ§Ÿ„÷ß7»OŸ…ê,?nÜØðî;ò,6“ÃB õË8u®³<9F©[XÖgãÇ ß~;!t™¿k}EÝ. H@˜®>ÿü³ðÝ·ß–lCÛÙÚ†nÝz”ÜV¼òÃÞϲÄusÎ5Wñ¦zýõWÙN_‡è³ÍïÀóøÓì¹ÿKŠñ®ñ½§¸L9¿ë;uŒÉæ’úé矲Lwê¬òý÷Þ Ý{ô,™Â¼œãÔY¹% Fhâmr&šžyêñðøcÅô™={õ }û¬•Ý8fÍæHø!¼=lXxøÁûÂìíÚ‡ÕÖX'¬ºúZ>ñ|’SO<&¼ðü3qN†ìî~ʼk¬¹N8üèKÞÀòûW²ÌÍó²KÎzD݆¾ùz8å„£ÃG}˜o¶¬߇y:ÍÖ]oÃðÇýÿ7¦™žV¸íß7ÇUÄÐ"Ê–¸B8êØ“³4¤SÑ©¾o¾ñÚpéEçNµþ’^î¿èTëYñÍ×_‡“Ž?2¼øÂ³Ù¦MVn‘°ûž¿+®¼jÉò®”€$  LogœvBxñùgK6c¡…—]ucÉmÅ+÷Þ}ÇpÐ_ ›n¾eñ¦ZùÅçáøc¯¿ör˜™ÎÑì½ÕÖÛ‡?ìwPjìøê+CÂÉÙ³õË/¿ˆª={õGdïý^¤Öz‹7”sœ?ÎÌX¼öêË1²iÑÅ–ÇŸ|F蔽cÛ=Ž9òÐpÍ ÿ =ìYØ\Îq …]€š”Àto·ß…ÛÒK-[l±0W‡QÀ0¸^©³‰ì>xÿý8#ùS?zõêèÞ³† i(%æ_øû!…¯¾þzØQaùWŽU=;è©pÆi'†«®¸$ìó‡ÿkhõSí÷ôS…ï¿»†x{ið á¿ìVXaåpì‰ÿˆ^¿QÙÍ|ÐSOd"íÌ8?Žd×_{e¸ã¶[Â!;:¶wöÙfƒ_|.œwÎéáˆÃþλèŠTtªïw‡ –\&ì´Ëî5¶uë^{ä1G½‘×ß|Gh— èË/½ œuúIáÚ›no2/hÆùC€$ÐýY,~Ô Sí9k–®)í†ë®Ê"UƇs/¸<ôË:FûïCá´“ óu™?l·Ã.Ù„½Ã1Ghß¹Ù3{r}trÖ{t¶îÚo+ûý¦¾ãбûSŽ 3Ï4s¸ôŠBÇŽó„£ÿk<Îyþ³‚ï¿'œ}ÆÉ5Ö¥õ'•ó[hzÓ=aÉÄlÖñ§žx,NZ׿ÿ,,¡C˜iæYÂÄï'…/¾¾Ï/ úF8ú¸SÃ:™—k®ìø|6Øh“°Ûž¿ ÿÊwñá1œr»ì¹=ß|]âP‰­·Ý!|úÉÇ1Œ²¶ºóëË9žGžá:øoñýâ—gøá×^ ǽU¨î‰¼ã³!*kÖ¥…rŽ“Êú- 4=fáycŒ[׺†öÙ„vLU7ñ»‰™'ì«L„|ºté’åêfËf,gû\YÌ9ãÂJÛj®—‡ l¸IX¨ßÂSí¾Ýö»d‚n£nò„ œ˜…8p#œ½]»(¾vØy·°çÞŒ½dÏ?7(œvÒ±aÇl¡‘3g"ôÇ,ñÊ{ÿ!ìºÛÞáá‡ï—\xNlûv[mþvıaîŽ7ñÓϾ¨d¬ûúü*ÜzóõáùL°möë­bX [xù¥ÃÇ#?ÊâÒ,´û7Ûì~½åÖÙ䀳Öåèéûhć±§c2^®ÿ¢‹‡îuxÝ%?ㆨäáFl~Ïž½³¶—¯Úe H@@‹#@‚´+/¿(ÜxýÕa¶LèÑ¡¼ô2³!'ÄÎÑtB/ ~>\|ÁٱÕg)!”üéÐ0könRlt_óíYÑÜ56}½×tÉ:f±³ç(Þ¿W^–ˆ<`ofÂŽTž¹åX9Çá™ß¥K×(^S‹/±d” ×HÃ%fŸ½]¸èòkÂ÷™@}${WÉ[9ÇÉ—wYhZÓ]¼1xöë¯G‡å–[6z¤&Oþ1Œ=:ó®=…Âbƒ 7 3g¢¬“]æ›/ïÄ1^EÃX·²ä ›l¶EɪfÉf@O·ñY’‹/¿6ôéÛ/<ôà½áü³ÿ‘ zîžÕñK,ÜvëÍ…ë÷ûýCß,û#†¨ûÏ­7Eo!¡ÅöEæÍ#,´®6tϲlòú.ël—yüð|‘`„°Œgž~2ÜrÓuá†l NŽ<æÄ°Ì²Ë&þ7vlè=löüݾaõ5×Î2z~®¹êòpÍ•—…%–X*¬°Ò*Sí7zÔ—áõló,™ñÂK³žÉÌ«wEÖKI“¾Ù€oÂQ4 H@@s$@´Hq’.’|¥×[²È†!¬»þF±ù–\:FÊuøÁaäG# I;–ÏÆ¤¯—EÂ`ëo¸qLþqo& K‰·X(÷ÏÛo½Ž=ê°Øáɾ¿÷ß'Lœ˜=׳á x¼¾ûîÛ6™Ûµ¢ÅRÇáݡ԰ˆöíÛ‡qYé†X©ã4¤÷‘€F Yˆ7Ô~ŸqC¼f@¨d÷=²°À‘™8èãÁ9=nn”«–‘I‰ ê³#>ˆE; ÜM†š0~|i]×nÝÒbè0÷/ad,eˆ)B+Ge)Cà‘Pá–¬CƉAÏià3ោ~ü§ýÂ%—_W2 ”ìù ‘d¥Ü#ËywÖóøä–o„‡"š÷?ðàlÜaïxx–ŸÌÆ>þèÃa—ÝöJMò[€$Ь °T8ûüËJ¶iÒ¤I±c•w½vÛ¡P&©çÝ e\\i•Õ ÛYèÛ·_¸ïž;b4 ž©ÚìÙgž ÇeÂm…W ?òøB±A™'ﲋÏg{IXzÙq=ãÿtØ2>®«í8¼ƒ|’e›,¶ YäQçÎõO/T¼_mÇ).ço H éLwñ†GgÞÎ]ˆ>ŠÉ0èjÓfÖ°ÑF2.vÎÂ$$<ù‡IaLæ÷égŸ…ÎYÌ8=g5¼y„?¾—…2”²Ñ™A¼$ÉÔY,²ýŽ»d"kžRÅ ëfÍÚ_® XjéX”¾xÊ aûz6°x‰ì„1ÞïÒ,ÉÊ{ý¡àõ¶ÒÊ«e·a«ÍÖϼqO”oìßüœnÌ[ƒ0$ܳ”á-$ü# 7ʰŽ}¾Îæ®Ñ$  H@-’À”çúŠYÔÉjk¬=Õ)ôˆ,$c¼}Þ¾õE!+v~}~™)ŽN9阰åVÛ†}ÿïÏ52H>÷ÌÓ1³tnì·ÚkÅñi¤JÄ[]ÇéœÍ“;6{wÊïÌgÛµëÿ:šóÛk[®ë8µíãz H újï.ªþ±JÖÈáÕ³c«ð´‘²læÍb<ÛÈÎ8nܘðq–id&ððP­·Þ% —<@=+×^wƒ,Ûå£á¹,!H±]uÅÅ1ÓüYVªž½úÄÍÉÕ²yæÒ‡‰«É>Y¶˜ÌDç”çE¬IB—̲Q]zñ¹1,²¸ „F¾3üí°æZëÆMófÙ%ŸvP¸ëÎÿÍæ¦û)ëü9†ONµ1[qI6þn‹MÖÍÆíý/T‚¤'L@Šˆ-edÁü* »•…O&#=y‹L Mëý–€$  ´mg›-K–Ö-¾[¤g:߈š7_-ä;b‹;y_ÈÆ¶÷ìýË{A©ó%íþ)'þ°ï1r¥Ø;ǘ·o‹&罇hž¶LcPßq[b@ìôe®·dŒk#éÊ‚S¢iÒúº¾ë;N]ûºM¨.é.Þ[\ 0Lðu–fŸlLxtøF¸‘zÿ›L°}•%1“Ù¢\=ËKõð«¶zÖãv±‡‡›²lS¥²Ôº V¾ïž;ÃÆY¢¼^ýY4&!>þ±,\G¨á?N=>ŠÌzSØÜ.ËèD˜$,M#pÂ)gÆd,ûì¹S¬û“OFÆtÄv8=«ÿ.ô ÂkëmwŒ(O>á¨ðt6¹9S0±æAÿ·Oœ6`ÕÕ×,/¿°V6{bvþ(›à1FÜú9gž÷Ùb«mbQnðœ{FXÍ ’’àe;ᘿ‡(Ú²¶]˜mŸ7óÆ•#—?žË€$ æL`‡~'Ò¾îêÆ÷÷³$f'Ÿpd`ZÒê'»õæâ÷ñÙ0‰k¯¾dÍdŒ_¿~ýKU;ÕºrŽ3ÕN®€šŒÀô›œe–Ð}ÁÞ™8Y'<ñèCaBÖE¯ÓÙ`Z"#ñRËn–Œ+C¸­ºúÚ¡ã¼S'þh(!vL2ŸiÞ˜ŠçÄ㎈s³ñ¼¥SµÔPöyäáâÔ:¯¾2$ðÉÓû0 7ÂÇJì@½ýß7Çç1SÒ¡OŸ_²O3ÏãWZµÆÐ…T_9Ç¡ìQÇÇÝm¹ézYÔRÛ°üŠ+Çö§zêû.÷8õÕãv H :fÊ^ü3yT¹ <8,¼ø/ƒl+ß»ô#G¼{§˜¤òë,|2Bï)ë».Ð#­n’oz¦e`.–ÚB!™\ï`¹s±”j(¡‹Ì[WJAY¼b2¡JHG}F[¾ÌÆ2¥S”c¤û'Ô“ã“ø¤\£Í“²±‡²¬–š$  H`F!Ÿ‹YçeçlhãáK¯K*Æ}—û¼-UOñ:æ)Kz±‹‰³·Ù~§‚ +Þ^ÉoÆñ“e²]öÑ$ êx}È è|š-{w'Á ß|pHáᾂCmÁ‡$Gâ3lذ0p`ùšªôªºçSvmÝzôÊnR»ÆìMùI¸8œp)¡Svåed2J>u¢ª1ºIR›•º×V–¶ä'ê®­\~=ñVjuµ¹Òº,/ H@h.âs1ë­Ëxájȳ³®:ÙFu)‘e¹•%Fé5eÌ}©2•¬›·Ù%+©ß²À´!ЬÄ7Ïi!Ц Z" H@€F€)£ô½¨aüÜK3*f%ÞfTÈž—$  H@¨„G|4 H@yÓ=Ûd¾1.K@€$  H@@iŠ·Ò\\+ H@€$  H YP¼5«Ëac$  H@€$  ”& x+Íŵ€$  H@€šÅ[³º6F€$  H@@iŠ·Ò\\+ H@€$  H YP¼5«Ëac$  H@€$  ”& x+Íŵ€$  H@€šÅ[³º6F€$  H@@iŠ·Ò\\+ H@€$  H YP¼5«Ëac$  H@€$  ”& x+Íŵ€$  H@€šÅ[³º6F€$  H@@iŠ·Ò\\+ H@€$  H YP¼5«Ëac$  H@€$  ”&ЦôêòÖspy-% H@€$  H@"Ð(ñ6pàÀFÜ%  H@€$  ´dƒ šfÍ7lrš¡ö@€$  H@€N@ñÖpvî) H@€$  H`šP¼M3ÔH€$  H@@à (ÞÎÎ=%  H@€$  L3Š·i†ÚI@€$  H@h8ÿ›ˆsµ¡¥'…IEND®B`‚trove-5.0.0/apidocs/src/images/Create_CS.png0000664000567000056710000007623612701410316022001 0ustar jenkinsjenkins00000000000000‰PNG  IHDRoR¤:8fîiCCPICC Profilex…TÏkAþ6n©Ð"Zk²x"IY«hEÔ6ýbk Û¶Ed3IÖn6ëî&µ¥ˆäâÑ*ÞEí¡ÿ€zðd/J…ZE(Þ«(b¡-ñÍnL¶¥êÀÎ~óÞ7ï}ovß rÒ4õ€ä ÇR¢il|BjüˆŽ¢ A4%UÛìN$Aƒsù{çØz[VÃ{ûw²w­šÒ¶š„ý@àGšÙ*°ïq Yˆ<ß¡)ÇtßãØòì9NyxÁµ+=ÄY"|@5-ÎM¸SÍ%Ó@ƒH8”õqR>œ×‹”×infÆÈ½O¦»Ìî«b¡œNö½ô~N³Þ>Â! ­?F¸žõŒÕ?âaá¤æÄ†=5ôø`·©ø5Â_M'¢TqÙ. ñ˜®ýVòJ‚p8Êda€sZHO×Lnøº‡}&ׯâwVQáygÞÔÝïEÚ¯0  š HPEa˜°P@†<14²r?#«“{2u$j»tbD±A{6Ü=·Q¤Ý<þ("q”Cµ’üAþ*¯ÉOåyùË\°ØV÷”­›šºòà;Å噹×ÓÈãsM^|•Ôv“WG–¬yz¼šì?ìW—1æ‚5Äs°ûñ-_•Ì—)ŒÅãUóêK„uZ17ߟl;=â.Ï.µÖs­‰‹7V›—gýjHû“æUùO^õñügÍÄcâ)1&vŠç!‰—Å.ñ’ØK« â`mÇ•†)Òm‘ú$Õ``š¼õ/]?[x½F õQ”ÌÒT‰÷Â*d4¹oúÛÇüä÷ŠçŸ(/làÈ™ºmSqï¡e¥ns®¿Ñ}ð¶nk£~8üX<«­R5Ÿ ¼v‡zè)˜Ó––Í9R‡,Ÿ“ºéÊbRÌPÛCRR×%×eK³™UbévØ™Ón¡9B÷ħJe“ú¯ñ°ý°Rùù¬RÙ~NÖ—úoÀ¼ýEÀx‹‰ pHYsgŸÒR IDATxìÝ|g™?ðßö]µUïÝ’{ïNâ§CèR€)”ã8þƒƒÇÑŽrä ô =„$„@—$v\âØŽmÙVï½K»«mÿç]iäñzW+Dz½v~ÃGžÙwÞ÷w¾³BOÞwÞÃÛ¿´åmø7‚ (@ P€ @\  柚Uà>×-eã(@ P€ `0¶˜5‡Gïݬmž´®¯¯‡×ëEIIÉIéü@ P€ (pn¶ìüÃ÷Ÿ8vƒ:šñÜ’G¡(@ P€˜ o3¡È:(@ P€ À9`ðvŽ y P€ (0 ÞfB‘uP€ (@s$ÀàíAó0 (@ P`&¼Í„"ë (@ P€çH€ÁÛ9‚æa(@ P€ ÀL0x› EÖA P€ Αƒ·sÍÃP€ (@™˜|ÃÂLTvqÖá†ßçE_[ÆAx'O҃ъ´¼4XÌ&Ø'Ó/ö y™Üðº=èï€ ˜íV8sRC&¨ý^¸‡†àÁׇ3‰©‰¡ýü¯…‹ýûÁó£(@³-Àà-–°q7úÚ«ð…Í_ÁþQ›Ì? Î¥øü³_ÄÜâL\ªb–×Åâ“÷rlC;}øÆ[¾ƒç°E—.Çÿ{ôS!ƒLÈ~Cöüá!üóÇàgu›põ'ß·ßól£„×…O’ (pö¼Eµ“ ¤¯üå¨}µ {»Ò‘´z®Ú0³qUÛ:qt×^<ýËíhZXŽÙoY‚T©Ëµ¾‹i‡ÿF0€ƒ04[±å¹N,XšŠL…€|c£ð ¢¿#no¨Çòuß^L—šçB P€q'ÀQ¬¨—D‚7´cߣÏbÛ¯žÃaÿlänznùügñÙ{oÆWc£{þ²»þq=’ûÄjÔJ/ÂÕè=†mO5¢§×£úݸP€ (pØó×-=o5hmAS§ÖK7`vi™ ý™anÆUZƒuïtÁmN‚ÅfE†ÔózÅmkÃÑŸýÛßüI¸f•Ⲩ¦ÜA P€ À™ ¼^ãi¸$>_cc].Œy½…AnÑOûÝÔO¤jü’؃†êEŸ| Œ0Ør‘”’„Ùss¥¹u ªw¯<ï€ ý;Ì>Lc>™G‚‡ }}©p¤$£xqÉÄЬîƒ^ ¤–¢(;ÅÙꎲ^Œô…êì•Oê,B‹5¶„$Ì[‡É(ûªUÝèjèBgcOèØ`“‰8a´'¢buEÌ¡`czìÁDdyjÑ×ëB‹tAÒ'Žiå— ØÝŽ£Ç:18ä– Úb ;Ró –Ÿ<™bŽç=x°. šÇœÙÈrwÂä÷ Ý„ÌÑdBÚ¬rzûìîykçì,^ŒÄäTT¨È:´(·Þñsn89¯Áž?ymÔäÓxþK P€ˆ;oÓ¹$ hv¾ˆÚ«Jð‚aÞ ACô{Û$à0nÁCŸü öl«’ú'†S6˜ËnÇòuKðàOoE¥¤[%p›Ì»£ÏWV E´Î~¹«îÌžW‡ŠÊZ<ûÏ (Y¹Ÿûç½Ø(ÇÎ’ÐN+·ww7v\þ_øäM«ðïïš#é;Ñðê^|骯ài9ÆèÄù™ŠßƒüŠÅxäww¢<Ù.u¨‰ÏcëÿïÿëÑбƒc^ÕÕ¡ë‘P2÷¿âx•DXÙV¾UzñަߠùÐF$¢zÛ"dÔ’$p Öÿßþȯ°co£nˆ­À†;îÆ5wßÛ+ HõŽçýÌ ÖëDÛeï 'y  ?­õÁ#Qš%!oøîÿÀôÔßáùÓŸBÞÚ9oü÷§1Å|÷Í*0T‹r›8çÿ<9¯yÖG&¯M©äLTÙ¹P€ âP€÷¼E½(Ò]x#>þÑëð¥{WÃa߆¿Ýÿ¯øÈÜEX|õãËïD”=é>7 œzZÿˆV|¿k^ŒšK>ƒGöÄÎãcÿÞ/â¾ÇaýÛxËu÷aWÇ šõÇ–Š¼Çò«?†·>qª~€'¿û|có¥HwìFo÷nüý/AYK¡Q‚ý;öôôa·Ãu›¯AY¥®á¿à ›>ƒ/~ðOxV‚‘ïo߃gïÀ±êÿÁ]…Q±ë{xßÛïüj»ÖC¥ºç¼µ@Šs5®ýëQ<ùÊÃØùÜ—pŠSu¢©æQ‚‚‚øøG®DûÞçðì#¡jÌ#ý[ዜ ñ9?ôG|ìàyÛ[a¸ñA;v ÇŽÿ;¶~oInFïSOâGŸù!öžì^=×ÓÕ(øôƒXÿäNì{ò“øÂÍ«Qâòà™Ïü }ik°â‰#xþðßñ쟾ŽW˜ÑôÐxò«_Âß$vë¸6÷,¿ßûU^^üo'ùè¯ÍSšQ/ÍŸô ?~¦(@ œGö¼EÅWg‰p–ÌG^ЄË6ÑÑT‹¡®zÔÕ½ŠƒûìØQâG“ Yf$'bQi*‚}ëlÂ’]””ü ”— (Ù ÛÐæÈ@ió0vTW£U†;“eصP{v†V5gÀ™™‡âŠb”`5äc`¤Yæèu¢©¾žåið[}ènh Û¯”)(IDŠ¥¾ÎFT7t¢aHB®ERG ÊsÈ5bv£Í.쬯Ekûb´K÷”›X 0˜d˜6)GŽ]‚’<òÕ¸îtu ]†;+ç!ý©vô  ¾s%zG «Þ/,ŽLdU.Á’…³`ÍÍA°«cho“áÞÀ†úÑß܆~ŸÃú*ŒfNdf  0ņ<ä¦'!EÌúQ ìÉÈ·âBûᔎ<ß`?Fm=PMñɵñ˵9&ÃÄ]©óa”k£÷9éÚô¹P(ã¹…2~jÔ:íômá6(@ Pà< 0x‹oXú>ä-þüVàÕß»~óU|òéßâ/Èσ °Ëåú•óñø/ö€çð¤Hà$Á¡ÅètÂvéz,Kqb¸¡´ÉiNäIÛ„ÜŠBn¶hÁÖĵÙ!A¡]‚ÚŠµëNòÑ_›uïƒ!7(ׯyþ2 P€ @\ È_A.Ó(Ût 2æ_Š9ÿÖˆ½>†ÝýÛ÷šƒmøUí&,¬ëÄpK[¨ºÖg¾ƒÞÉxËã ù¸ø1ÒzC#j’B-M>ä$ ÅG— Ïd°aõª$X[|ØÑÔˆ6ïKH–g¨mÞ‡¤UW"£rÖ›-°È3ךZd‚…®Ž£¨ùõ¸i[¬æñhÆÕ~ ®yëÁXPž½Ö¦i’MÑ„éí’.ª`–È„ŠD‡Ýõ*ä'W¯î±ÛƒúýGñ‹{XžçÆ %ÆK¿/ÜÛ!=høá‡~ŒcÒM¦F†gzik— rm2)£÷à_q°y/nzú„ÏHóÁÉkÓÑî–ë#>åÒ o3})X(@ œ¡ƒ·ÓLÌ’à$«y¹Aìèôo7ãé¶&Œô Zæ{ü¡àIUééFpL†:}V&{ƒRÔ„J””IÑ©YS/‰0³eX4 í¾1˜vc`¸ ]2 µ­Û {^2åÑY2ãÒí—Y±¸I\ˆ äh• Ò:8†b“é›6Ë„5SVÒ&›5u#¦±w|ˆ9£ £Ýp¿T‡¡Þ^ÈȰjÎÄ"3vG›1Ø~T&rlÃAg.<™ÅX}ÕZ,][ƒþ,Ý\ÍF©K‚QõÅœÉ/§×ïŸôñ»‡àî¿ÞçĵqÚ,¡ÙÀ3ç£pM P€8s™üûxæ­¹ jn4ée*/+‡oC‘Ìz¬ƒËÕ‚Z™lpƒ3 …r‡PtÝç1kÁ*ü胳dS]o'/G²V±û˜Ô3ä6ÜrwÖâ‘Ƕàðö14d9Ð(]vkWάµ%0›H—!DIÜs ysPöÎïâ×®D®ºù+l1YdˆÖb€ýÔ]a9Oïcá[®…µ¾kîÿ:ö ᥠÜ㘨Ã+åÖèßs84#tí~€ÙË.•Wl%!ÁÞŠ^ééR‹15 Ö’R›,r¯áxÚLü[ ÷×¥õÈ£EŽ"sñ (ßôе‰äc¶%Â$=–6þvÌ=ë (@àŸ§h #]òH‹­øÕ¶4ÈèYÉf¬_\€ËdI‰^ôöõ¢¶n@zÚJ‘˜X„9 H6–ÁÒ6ˆ¹òŽ‘¶.ô¦vÀ$¯ÍJƒm¬{~ÿgtôúq ¿×}ì:d¥J/\´ãkéÒeÉ›/“¼˜Ÿñ8ª¶¿$7á'£3s ó“17×ê]3ædúh*,ÏÃêv£§® nÇb%xK•G’¼òØãhoêÆ¾þÌ^¿ 7TN9b«þtÖF‹¼ï5ÙŒ«Ò0pÈ‹#jˆX·ø¼ú}¡ºm]Hjí@²Ý‰¾ý¯¢íè>“žÃ®1ü}}è—aæ!]Ñ3Ý4U”IO§LÜ0nEWÿz[Ågá¤ÏÎGþoòÚl|ßFä—d¡LÊQÓ3•gy P€˜ioQDƒ®^ý ¿è¼Xž±åXT(ÝU†:Ô7µâ`Õ –5HËœ ™Ÿ€$ƒü¹Oñ`¹ Kn—™©½òl·ú¾U2†:ˆä‘zlùå÷d&ªõn@þû7anÚ4‚7ƒ„³‘˜Ó‹…y}øÅN™M:–뜕ó&b¶<Ñ$`dçȬÏùX˜é„¹Ý‡í‡^ACÏZ­¶mÀŽ?>„C{ªñ‡ž+pÕÇÆJ”G9÷ל,…Ýf“™¹…8(31ÊÃµŽ·PfŒ2t› „-uU2»ÖŽ4î8 ï݇ZSƒžX{;Ñ%ÜY3Øóf(+ƒÍäÁ’ôdììAó‘â³bÒGm¬W,ÄòÒ,”Èñ¼½æo R€ ÀY`ðÖQÃõ÷ãëÝűý»ñÁ_ÿó¼ß¿G ʽm@™,óqó¾ŠÅ³ Bî5×!˜¿ß«±à+o»û^ø3n^ömÉ”·)åEí+Q±n)îÛõxs’IA6yc•/X.⽸öš üõÿêÑ:àÀìk®Åì´ôPïj‚³åQEøòV+m%tü»×ÿn¹áÎ$-]*o.x¾´ç+X•åÄ" LdätÆK‚W]Ž95TníŒ&/ W| 9ÏãÛ{¶à;»~€ê=TþÅÿØ$'•âæ÷ñò^àåýh¬ñ"urªVɬë.׿[ûñÇÿþ ~÷ÍoãîõLúx†×N^›Í 6ÈdÓ½çî Z΢ (@“¼Ä¡û z¼d6dþª·ÁV´ÿ^м*ëÄ’(÷ÿ§aÅ¢,äfØ&Þ¸ Ï"3Êd€”U¸ê}·cÁÆîÐ{>ÕK™ÆûpÊY’•)ãÌê9þÁØøÞ»Pyù0.u.ÃÒU%¡÷¤†²ÉƒaK+Dá•wáöì>tzìÈ\)Ï;K·IH¨-òœ8ƒö¤¥(žŸ›>kÇjÙ%s&–$¤fa­ôÌåË ù2ɱçaþúwà]Ÿ[ŒµrìÌ¢\yóÂToPU©`r6²Š“pÃç XU¾ ùªÔøb09`ÊÝ„Õo,’Çm\)̳0gý|Ì `‘ž¶”‚J¬¾é³øÈ¥~tk¯BÁc»<ÖdÙò~,‘¨òêv+V”$#7YÚh؈›ÿÕ>_†Š 8K=œNºîTÛ7Ü€w–`uê*,Y)ÏÛ“¶™‚HÍ[‚Mw}é2<íMÌÂ| Ä$3ʵq$/Çò+­roàB\"5𩘼6Ò!ÊÀmürò_ P€ˆCÃ[¿ôÌwåoÛǽwsÄæÕ××Ã+ïô,QS$¹P€ (@ œs-;ÿðý'ŽÝ`0ßv¢ãæœ7ƒ¤(@ P€8]o§+Æü (@ Pà< 0x;ø<4(@ P€8]o§+Æü (@ Pà< 0x;ø<4(@ P€8]o§+Æü (@ Pà< 0x;ø<4(@ P€8]o§+Æü (@ Pà< 0x;ø<4(@ P€8]o§+Æü (@ Pà< ðݦ:ü`0õã÷d­v„þÑåà&(@ P€çBÀ`0È;» ò^êñµÚæ2.ÀàM÷MðxÆÐÞщ¦¦V¸Ün èör“ (@s! µ””d¤§§"#= ùy90›²hö”Ð$d­zÝ|þ Œ&3lV IºÜ¤(@ Pà `±Xäï²c^m4ìú8 ƒ7ÝE2M°ZP9»6›YéNÝ^nR€ (p.TgJ[gF]Œyå2Õ»s‚^cb[méÎä{˜D P€ ÀÙPæ9™©HJpœíC]õ3x‹pÙL&#Ô P€ ΀ú;¬&+p9U€Ê©&L¡(@ P€q+Àà-n/ F P€ N`ðvª S(@ P€ @Ü 0x‹ÛKÆQ€ (@S¼j P€ (· ÞâöÒ°a (@ PàTo§š0… (@ Ä­ƒ·¸½4l(@ P€8U€ÁÛ©&L¡(@ P€q+Àà-n/ F P€ N`ðvª S(@ P€ @Ü 0x‹ÛKÆQ€ (@S¼j P€ (· ÞâöÒ°a (@ PàTo§š0… (@ Ä­ƒ·¸½4l(@ P€8U€ÁÛ©&L¡(@ P€q+Àà-n/ F P€ N`ðvª S(@ P€ @Ü 0x‹ÛKÆQ€ (@S¼j P€ (· ÞâöÒ°a (@ PàTo§š0… (@ Ä­ƒ·¸½4l(@ P€8U€ÁÛ©&L¡(@ P€q+Àà-n/ F P€ N`ðvª S(@ P€ @Ü 0x‹ÛKÆQ€ (@S¼j P€ (· Þ"\š¡F\î{˜D P€ À¹…gÌ{.uÁÃ|Áµø,6Ø u  ºa÷[à°ÛÎâÑX5(@ P€‘‚Á †FÜózC—¡þ@s™`ð6I˜Í&¤:0äöaÈåð«W·—› (@ œ+À9ìv¤%a2r PïÎàM§¡zÝl6 ‚&#ä;Ã… (@ó(`6`‘¿Éò癋N€Á›Ã(_«ÕúÑ%s“ (@ Äû!ãæR°! (@ P ¶ƒ·ØFÌA P€ âF€Á[Ü\ 6„ (@ Ä`ðÛˆ9(@ P€ @Ü0x‹›KÁ†P€ (@Ø Þb1(@ P€ˆoqs)Ø P€ ([€Á[l#æ (@ P€q#Àà-n.B P€ b 0x‹mÄ (@ P n¼ÅÍ¥`C(@ P€ @lo±˜ƒ (@ ă·¸¹l(@ P€ˆ-Àà-¶sP€ (@¸`ð7—‚ ¡(@ P€±¼Å6b P€ (7 ÞâæR°! (@ P ¶ƒ·ØFÌA P€ âF€Á[Ü\ 6„ (@ Ä`ðÛˆ9(@ P€ @Ü0x‹›KÁ†P€ (@Ø Þb1(@ P€ˆoqs)Ø P€ ([€Á[l#æ (@ P€q#Àà-n.B P€ b 0x‹mÄ (@ P n¼ÅÍ¥`C(@ P€ @lo±˜ƒ (@ ă·¸¹l(@ P€ˆ-Àà-¶sP€ (@¸`ð7—‚ ¡(@ P€±¼Å6b P€ (7 ÞâæR°! (@ P ¶ƒ·ØFÌA P€ âF€Á[Ü\ 6„ (@ Ä`ðÛˆ9(@ P€ @Ü0x‹›KÁ†P€ (@Ø Þb1(@ P€ˆoqs)Ø P€ ([€Á[l#æ (@ P€q#Àà-n.B P€ b 0x‹mÄ (@ P n¼ÅÍ¥`C(@ P€ @lo±˜ƒ (@ ă·¸¹l(@ P€ˆ-Àà-¶sP€ (@¸`ð7—‚ ¡(@ P€±¼Å6b P€ (7 ÞâæR°! (@ P ¶ƒ·ØFÌA P€ âF€Á[Ü\ 6„ (@ Ä`ðÛˆ9(@ P€ @Ü0x‹›KÁ†P€ (@Ø Þb1(@ P€ˆoqs)Ø P€ ([€Á[l#æ (@ P€q#Àà-n.B P€ b 0x‹mÄ (@ P n¼ÅÍ¥`C(@ P€ @lo±˜ƒ (@ ă·¸¹l(@ P€ˆ-Àà-¶sP€ (@¸0ÇMKâ®!cÒ¢~4hÄPÿ(úäS0ÔFƒü›Ž¬’lùÉBš|2…Ò_oÿŒûTïªÁˆÛ‹ÉÓWÿ=†LñÉŸ ùd€¿ ­‡`x4€NÅ‹K’š(’\(@ P€8oQµúã<ôɯ`϶*<'ù¼¡¼ù÷¼ýó7áÆÏ߀+%¢KŒZÇżcÜç»ïùŽ4tãùÉSM­Íx‡ø¼[|Þ >CÆ\µøÃ§ßƒÕcx¤á|îŸ÷bå†ù¸n<"ž,Í P€ ¦`ðÉÇP‡æ{%ظ¨À@ÁÍøò_?EÉÍ(>€{7|5ðÙWº‘ñ½›Qš“‚ÂHõ\”iéJk˜ôy¢c1ÒWÏÇ}ü Ö`'‚­5øúõ÷£z§ø·¡8éõà^”_ž(@ œgo/À<®´VwaȺ¬bÌ).A¥ÓŽœaV¬_‰S9zòœp˜Œx}!ª®²>#I9ÈÎ)Ƽ’”Iì¶S|ºÅÇn0Ȱ²F“¸‹×À“îņ’ù(q&pÈ4âw‰ (@©^_qÇÔ'öZàë@CƒK#eÞ \j4Ã(Jñé?_{"ïënKõ¼ð1]µ Y çc£Šé‚Kì¥âóÖ“U‚VÀ‘оþH(ýÓÚ^™j\S€ ¦-pAo¿<ôjúªÑ5Ô…`P‚Š‹Ãê@³ïš{#Š’‹#ä˜:Éûê ŽðBà ,0XP2UÀa|ýíõ¸ï–ïá T«ÝÄop.FbÞJ|î oBAfrÕ!;Q¿ÿ(~ñoãp^FÌfäµ´ ?P„asV­ÙƒÖ–|ttWâöï@in*–¨cëŽQµð$”ÌÁÿÞµ ‰Žf¸GªðÃ;ІöþÐñÕa ö³¯Â¿}êÌŸ“‹|U…¡>”Wµ³ÚlG‹Ó:¶×—ˆ6,Æ{¿ý>ÌYRŠ5S«ª\–±=»Ñe÷a«áòPûRÇ“Oþ×бÑV<ö¥¯ ¦Ù‹v,?Æ‚L¬nú¾ó­§qèhZ¤ÔÉWÑ.)yxûW¿‚Y…™¸:O&‹Lœ¿:ÏcÃn4N)¢ñÉ­à' P€ ÀE!pAoÕ½5Øßñ Zûˆ¼%Ú“ÑïÀˆwô4.X,–df&À4Ð OW#Ž66Àš’£;Yr›Å¨†µÅ/n ÜjÑV[…=/îGmVF¬jÀpnÉkl°áXãºPp’.œ=hÀámÛ°³¸ Cv;æ »$¯c6Šs^Ááí8\7ŒÍýÕqؤsËÕw_L¢Ø†£þ²• IDAT–+‘j*„?0"uµ «ù0^yi?º†CÇWÇödè·&U5K*C•Yɰ™Fdög;ª^x¯˜Q›‘:6Š~Kúeö¨K;µˆë>èmÃpG†LZh@Š1Y+2å'ûŒÉñå>¹—pT&,lk0bÃÀ(²Ô5óö£»½- -¡@Ì/×)à‘kêBÀsÒRlñ!×§Œ=“Æ/ïØz‰å:’íb<ÑX}¹ù,œˆ‰ (p \ÐÁ[Ïp‡ü‘oBkO“ûl´k’äHF‚5c>–{Xò’t|ã+;qäÛ»ðâñ—ñŸ—?Kú:¤–\Ž_ÿñn”&Ù9Y“ÜëeÜ.½^ßÄË;[°½ìv|ó›°bY&ÊŒÏâ±o>†?}ç'øôAÌ[³÷ɾY“e_3`ÎÍÇÆý›ÊœX—êéYþ{ô߆ŸsÁ3/ˆ«7J´²kÜGö†f¿®]¶ s—¯€Éºÿøþðûo=…gÓÞ‰Šëà·êÆ¿£æ…ýøÆÛîÇýÿîFÂìÅøéC·¡4YF'Ž蕀ЛŒK~ô,(ÏûJ-°%Ú`ŽÚë&³mu>ÿìVÔîÝϬülïBþ¬…ø‘:ÆI>º“Õoš“a(¿Ÿ—ü~ Øüûþ„+¾¼½Ž2”½óËxÏ¢t¤õŸdülú;ñîÛ7ãÖ®A…œg$cÕϪ§þ°Ü¦(@ \Ètð¥ÇMîK“À-%xSûT¯\pâ)mÓ»Xf˜’ó‘¸ê&Üö‘K°¾± ÇŸþ T¡î¨?ùF2ò"S~>põ,81Œ`ÃëèÆa—©ó—¢(+¥ÎT¤V`~Å«è^½ »kŽ¢«1‡:‚(Ê>Ñcrl™˜_žŽâÜ8í—,DÞ m˜åoBCÕqdÚ$šÚX†ºš:T©“Ç–ÌCÙìÌ„ñØ!´65ãÀöUóU1å2šfX†Â|?®¸Ô‰cÒ3Ö[oÁ‘v¥;ªdâðG&,•˜[–Ù%NH±i,'|þõÿ­’^³jÔmy/ô½„¦ƒõâÓ†ŒüEÈÌ]€_?6¹å-â"“`²#!I ªžµ^Ô÷uãÕW1æ)…³`6\[g¢V×ÉÆIë!¯¨eδ¨Æi Ü#™‰ (@ Và‚ÞΪzb Þ÷.Fº›ðÌÀãxt/ZíÆ¯¿õ oBê®_W» Û÷BÃÈê,Ì)/BBÀ/C®. Ê=[©¹X¼,惵êJÇ¡và õôÚ‰Åà,†5sæd‘zhœ †yó‘“»s~l;V…¬ƒ„7…8^Ûˆ#UÍ0Ú®Eù¬$Ì.CðÉ*t¶wá¸Ì|­,ʃ3+ Ö )³a0äcÙêL$Ôµ ³;ˆª?ŠS$xsŒ\oæŒÙ¨È’ûù"Þ°¦µ2l=áóQñéªÚmƒO¢yg=Žv׈Ï?&|Þ›¯˜…Œi|Ë‚Ac8^]ƒg_¨…'x% ³çaÓåÙ°KÜìOg\YR€d‡ –á¨Æ+Ä2‹Á[Ø…ãG P€¸Ð¦ñgõB?Å3oBz®ûÖK¸Òß…¯Öáû7Ü‚-M/cß?ñ‹CQnqaMM=F]nx{qøû×ãm?1Ab©ÐôÉ=_2l;"#·™£98^%·zÍžª]jhòT”ï۞ŽfyËC›O‚³!lÛÓêývÌÿðǰ9¿‹Æ†ÐX߈ž¾~¹WlÇþ jå»Ç¿8qGž‘þ±Q¸ä…¶ 7jŽЫ^ Q4ÕñOo_få*¼ù{‡q_z÷Zkñ³ÛîÄ–Æ}!ŸW]ÍåA,WkS,Ê­öçŸÃ“?ŠŸ7Ë=7¿Ë—V†ò«¾¤Ê¶^g\õÓqï/Œø/³ôÞÉÉØ•';äs\(@ P€“ƒ·i\MƒL"°8d’‚LQ°H@¶x^ªFhlA[§éIfd¤¥Â"3F6¹iÕÍxãÒTä§žÚí“Y‚Òùr/–ÄgS/ä–—aÉÆUÈùq#Ì-CxõUšè7ä`Þ²"¤È}e6ãÒRH “‹ß‚œôÐñÃë·$¤¢|‰ s³Â÷œÙgƒÉ K‚S|ŠaHžäÓÚÀ :ÞTÁ›Af¿ºkñü–:Ô¶J·`æ"\»©‹+³¥ÎñEÙê3—߀³Ò°¢4ÔUyÒ hÆ™§î:)?P€ .Do‘®šÏ ŸÌnñøäž,›<`ÖŒ$»Y† 03P^–ŽôºA¹Ï­ƒx¬VdççÂ!7wÙe¦hÖšÛðÛ+±¢+ÇWÇú}ò˜¼A3‚’×.Ÿ:¶{0Ò‰O'MÆ0¥qÒÇì€YºÅÙ¡æ ŸÚ¡Ï@P³©ëõyª0 ¶n«C]]š*pÉå¬xC: ×nÆòô4¬’H¥æ¹gPÓß/¾û¸äU[{~ÿMüï ë±ù’ãÝwýÿ”‘¾ñÌxKÓË`™µkË»‘íÞ‡}æ'ØçÎAOùZl®0 Mî[380\±ee¸ÔçCÇŽP»o/j¥Ÿ¼ÆªõðãxðÝëðæõwâš·|•g£ÕŽ4NS#,›× ½Ïeû?ü˃»Ç3ZáöŒûÔÖÉ=~â³áj Ê*§>à®<„¿õ<ëÊGÚWáÖ{ßù©É¡gÒiG7ˆ­Þ¸iûVÔÔס^ÔhÆ2˜ (@ Pࢸ {Þò,¶D[ŠôŠ%Oùœ·$kLÒë4íÅ‘‹=ÁnÔ7‚ÇãÁóÛåýœö!yÁzŽtÊ=kòàßÒÊe(vÚ#Ï£0J1gÙlŒì¨j®Âѽ;Îg‘µàÀ¾n…1/ŽœÔÐã+¦g&ZjJ”aØLäçØÑÑoÀ@ý€<ì7 öœÂÐøãCŠÒ#˜XŒ‚Ê ,_W‹ÆV ‚ر-M†&tËLÕÕÃðæ$ÃQ.3M “C‘ÓöÐgT3Du>Ç«÷£J‚ÚmÛTp+Ï|ëi ùøRÅg¶ø$›àT ­¤gs´µí2Q¡Ã~ †ÚÐS¿{}Èhth1Øó‘”âÃìù¥¨˜0®•w¨vI•ó g$cþ—ɸÿ¥(@‹Kà‚Þ R ÐïëG‚-!jð¦·ry ‡Ã21½r×ϰöc˜“»?lß‹;þ#^Þ3‚·>¡T!<ÂâŽ7ã=wß+Ò H•‘D7áÖïÈ̈Ãhªø~ôчѭÁ\$87àkOJÞš5þ*©éDoAé½2™°zU16™íp ¹‹¡tñ*”NÖ-7“.ÃÆ÷J¯ß[æb`ÅàÈ?ºñ¡|{"‡zÝz|î§ÃÊ óq…´Õ"?§óÈâÉC© yH±ÞçÍßû!ö¼äÅUh¹T¤Vw¼I|îÄåvƒ ýFŒÝà“ÇÔÿÏînÁ޽nŒ¡ûÿßÐÏO´êÔ!g}DÓ%xð§·âÝb<*Æ=rž/ßÿüæ~-ccm× (@‹HÀðÖ/=ó]ù[þñGïÝñ´êëëáõzQ"/·¥i¨IÞœ0zo´ç¸™Œ&8侬ü¤‚ÐzºçàwÁÕ~ Ç;Ýr«gi‹ŠºìH•™žiùùÈ“ØéÄýkò(÷ªwÕ„^¥ú¢ÆySƒ<¶ly™L,°Ê{ ÔÒƒ‘þ!4hÄ€5Æ„$Ì[zÑýxÚxÉ ÌuµÁÀðh¨-öìÙò\4'†£#ðûQ÷r=FdˆW{5—„jR‘Å‹K’š5ÑÔ 6Qy«wÕbØ-müØ6ó”ó Æ[4þ¯æójë¨øË7hr÷qæ ½ ÒL£Á+oXpÉî Ðé™hOŠ é®f=Ö‰Á!w(ÀÓפU9Þó&E™›+íë‡_ŒÕyÉ}‰'‚ÐHÆZ \S€ .l-;ÿðý'ŽÝ “(ßvAoöe`ë)@ P€ ÀôôÁo šžsQ€ (@¸`ð— (@ P€Ó`ð6='æ¢(@ P€q!Àà-..A P€ ¦'ÀàmzNÌE P€ âB€Á[\\6‚ (@ LO€ÁÛôœ˜‹ (@ Ä…ƒ·¸¸ l(@ P€˜žƒ·é91(@ P€ˆ oqqØ P€ (0= úÅôÓ;ÅÓÏÕÕÕ¿_ÿ>ÓÓ¯ƒ%(@ P€ ÀL MVÅàm’âÄÆ¶mÛ0<<|"[ (@ Pà< 4¹’åèÙ¡0x‹r!222°nݺ({™I  Býx<´´´ ££kÖ¬Ù̯Y$/¦Q€ ¦+ðÂÑ>ØÖÊοªQÔ¬V+òòò¢ìer$-xÁàà z{{‘›› ‹Å);Ó(@ P€˜¦@z—Ar2x›’K"^¯wÊ<Üy²€Éd:9A>Ñð&P€ N[ÀçõM–álÓI nP€ (@øà°é×Hõ¾i‹Á º+º§K­µÏj[-‘òŽï9ño¤<‘ÒÎv}ªþðök­ oOøg-ŸZë÷éÓõÛ*–O;¦öYåÓÒÔ¶>]}V‹~¿úžGí×§…ç/3ýúúTùð2úýáû¢O_fª:_k}áuê^§~ßtËéË„×~Îú:£•‹–®ÊªE¿<åÄuÐö…·CKWùÃ÷iuDÊ)Må–®Õ-O´rútUvºmÔ— /£ß­N}­|¤´ðóQŸÕ¢•ÿt²‹¶?Z}‘ʄקò„—×ÖŽ¡Õ)¿~_´ýÓ­s:í ?†v|}Yýñôé‘Êê÷G+-];¶~¿JÓêÔ§kiámЧG«/Ráå¢K«3Úq£•‹–>õéÏG__¤6NÕý¾huêóhf‘Ò­µK+£}VkozÓØÖÃOUlºùT3÷|Ö7•‰~ßùlãt=ÓùNçZO÷ØÓ­3Þë›îyœ|§SçtóžOï ¡Óõ™é|Óµ9|ÓÍ;Ýs9_õM÷¸g#ßéÔ9ݼçÓûl´QÕ©›Ž;ð_ P€ (pA°ç킸Li#Õ°tÀƒQ—WŠ@ ÂiŽw›a2›`M°…ºŠÇ°õ™½ðz|ðyäÁÊ 0°jÿYrÒ1d×â€Ýj†Í¢e¯Çç‚OŠùÍãÇ1Ù-­'æ6(@ ă·i^Šð®×ð1èðýZµú|ú<úíXyµýúµªW_‡~[ËíØÚ~ýZË«¯G¿­òjy´ráûµôhëSòûäiÑ áËÿþg¼| R0ò{-6 rÍb|ì‘cu ˆtÝ‚Á1é?~ÿøñ‹xêgGPvßý(-ËÁÛ‹&2É1u?Ç—?ógì;îBûš{p×[—àî7VNžj×Ö¯½Žà{–ã£ÿ ¯©Äe[ª_éž·–Ožúm]³O«N}úm}}j[;¾ÚŽ–Oå ßþY•WËtëSyÃëÿ<Ýú´|‘ʇ§éÛ© *M-ú|ú:ôÛã9£çÕöë×á–3QŸª__~[íÓŸKx^õ9|9m oSøçð6ª6…çÑÚ©åÕï×o‡çÓ>GÊ£öM·>}ÞéÔ©?ž~[+«­µã«ÏÑò©<áûÂ?Ÿn}‘Ž©Îé´OÕ©‘Ž¡¯/Ò~•¦}>}»ôÛã9£çÕöë×á휉úTýúzôÛjŸþ\ÂóªÏáËL·Q«ŸÁ›&Áõ¹J_ÛX?zZ‡ÐÕ>ÛÊYH4‘Ö’Ñæ°ww=ÊfçÀ‘l‡]ò„zऎ`WFú;Ð9Ô`Û,In HåeòÝhkE׬NôŽ`Tv©FHð‡>y¨p'šeC†<^¸TQùáB P€ˆ7oñvE^‡í b1Rrqû£ßÀò”Ìרÿj©úîõØyü >|ÛC(þé­0®*E¹ä =UNÆ:ƒÛ^”.»øWåbï+­ðŽÈÎå*lj%ˆ< £òo‹ôîõ£Q"¿r‰Î¬² ãìjKCCWJ¥ÈDØw¢0·(@ P€q$ÀàmŠ‹Þ]ªÏúZ÷éëߎVg´ôðòáŸÃËé?‡wý†—ôY_>ÒþHiªLÔrê~4õ3±¨|é)“‘ÑТu7­Y‰vS'Šn@ŸÇ…fé+‘uŒh墥GjWxZ´²ÑÒÃˇŽV.ZzxùðÏÑÊEK/þyªrSí ¯Gÿ9Z¹héú²‘¶ÃËé?_È¿ƒ Þ"]mISX‘£d»à’Õ9½–ÿCŽv¢š‘Vgø/ÃtÇÃ)GKám49¬°Ø,0{á—ÈnL~T™ ô¶ùÜÝ8Ö„i‘Yeùèwí†u0ˆÖá R€m¢áAÙ2¬HOè–ÉntôáË•ùc2Ñ¡sæ”D˜êòkÕ íÑNœéxáßï×XÍd1ýïàdânÌtϰ9,N˜éï÷¹úTÚÂàM“ˆ°„zm"ì8$}0£]àÓ(5«,EÍ0ÅŽHíÐÚißUMîŠVNFu”}*H–$ }–4•¢ëÑ:þÌó¨ªö¢®ü¤Ûl(V_`Éäï:Oã6lñ]‚+R7àªYKQ^ù9´‹ñÀcwá3—óÒNÃá0ãòÍ%h°§¢öxÞrFºGQóx5òV–Û.Çø‡L›úƒ=€'~U&O5êÆ™FªøL®ó¹ªOG½"÷tÒÎÆ9kÇ—vFjÇ™~¢Õ)]ó˜j©ÜÙº6‘Ž5UÛ¢í;SÃHõžsž©óUí½PÎY³és­õE*w¦–Ñꌔ®yLµ/7þ÷r¼ƒ·)ä48µÖ.êÙ#îÒꈸó gºÞ™®O;5UoÔºC=mª›ë :ºñíwA‚LXHÔ O¬=rä–—á{?z'.­ÌF–|j²BCšw½ ¬¸©E%(5‘³zö¶Ûð?Û¶``ÙS%gè8R@7b(ÌÇ@u/š«¶alc\ý­Ø¾=ˆŒu€GÝ+§–‰viÅÆcÿõ}¾©¾ ú|gr-´²3]ß¹0Ô·y*+íÃ×úòÚ>•öZêÒ—×¶µõ™Ô©Z½g²žézgº>un3Y'{Þ¦ñmQàêGë52ʳøL- ¼ÂÿCsŒXRõp…†']ðûÑÓÔ ÷¥`QÝj®>x¼~tø‘ºb˰jE±Ü¹8TÏ›Ááþ~t7¶cÌ<†‘á^ô6»ð™1âJ]õvÏ‘Y¥6¹ïM ³JF‚ÎT+õ#Ð+ü6¸}£hp`…Mzáì’'”s<¿œŽ*uÑ/ê;®¾ßêZÍÄ¢êS߃ðïÂk­›¿ƒ¯Uîü–›éïÕù=›³{ô™¶âïàÙ½^ñP;{Þ¦¸ ~ÿ‰§Žé·§(Â]aÊ-ZàTOÅ•‡óúƒË’·w>ùY,’Ù¦•^yÇÖ¯açfÜx_ Þþ¯7aQeJT]R¿?ôxzÔÔTãÙgŽ ªîC8ò°ß•ëÂ+`0n¯»zWÀçÍÅæÐ1‚ð›¬^ŠÂ£Íò8’´Jz¯<Í·¾xî§Ì5ù\ò ` Ø¥] åî·×Í+Hfúû=S þë4ÓmÔ×Íí³#Àk6}×™¶âïàôí/”œ6Æ•š²Çhå_YÂÿÏBû¬­#™Œ÷õÈp¦ÑGJ %xs.z#fYŽâ¶KþuOo…¯©kç®G±T’à—‰}GÐ1hE­á¼ûîH²˜'‡[G»{Pÿü lÅÑãÀåjŒU-3‚Æ\$«aðU¡f—<×Í•‰®ì…rüÖñGH6Õ¦€ê…šqUE¹P€ âE€=oñr%ØŽÈ*¯F¾#·_ñn~àoè¬]€—o]‹t»9¼ùÛ£w4-IWàcŸøŠ“lÈœ¨¡ûè<ßÿ¶¶ a( á] U-=sÁ,$=ÒóvU/Hðf-ÇpÞfU¯ P€ À À¿XÀEz½6Ñ‘W‚Ê~7Î?Ž¹î­øêü u}ð¹Ü¨Ý¾gÊn~/fY­HÓ!¥¥:qåÆõh­¯ÁÞòß°¥¼4ëV≿v`ÇŽAlØ< 6»%,?R€ âS€=oS\—©†û¦(Æ]:eÕQíÓ¦¨±J5TJ›¨Àh•篢rNÆÚ€gŽîÃÑÚGÍn’e6d—'Ã*ÛF]9£Í Gq!ì/tÂ4(oS £žù1q ‹]ö§'Ã7Ò ã˜™f=r¯ÛDkBíUõ©æèÎ…› (@ó%ÀÙ¦ÓWÀõ7êm>MU-]¿O;´¾l¬rú¼Zy­ŒöY­Ãóiyôé*MÿY+¯åÕ>GªO¥…—×Ê©:#Õ« õyôõÃ'SB3AÕÄyœß'æò#7›×¥þÛ¢oºãM(Ü}O~ì›øAq é…X»5 ù¥IXµn|ƪªJ-¡c%'!xÙZT~ów0VõàáúÍèpëŽ!ä Ê#C Ì^Xìé(:Q£f«ÔZ&,H;|¹Å+”ª"¹‰%êù„í×ò«µÞF•ÿžG_V;ž>-Vùðcè˪íð:§SŸ*§Ï§>kKx}*]Ÿ7Z{´rú¼ª¬–®¶µEŸGÛ¯OSù¢¥ë÷N}Z¹ðãhéZ]Ú:<_¤ö¨´ð|Ó­O˧/éZ>­]ÚZ_NK‹ÔžHiÑêTéúz#µ'Z}ú6hÛÓ­/<Ÿ¾¼Ö}ZxÃ?Ÿi}áåãýœ5ÍJï¡öiéZ>µÖçÑöëÓTžhéú}j[-ú²±ÊéóŽ—>q,ísxês¤zUÚk­O«S_>Ò1´|j­_ôå´ôHí‰”Æ šØ4Ö‘ #¥©ª¢¥kû¢]àXå"5S•‰VŸ–?R½‘Ò´ö©u¤/ŒVŸ>Ÿ–­>m¿¶Ž˜Ïœ ”Ý…/üR¥€I‰XC½]'%UÞP| æxñ»K>c¢“ ¶»®…1É“úÏÐq‚jõjÜõÃË$>ôÜjÇ­«Õ›&Ž‘°æ…‹ðÀS>™Ã`F’Î^t dûå>$f;a‘4Uo¤vGJSí _¢å Oÿ^ö9Z¾Hé‘Ò´z´u´<‘Ò#¥iõhë©òDÛ-]«S­#剔-¯V—*íw&Z}SÕ9U}úcjÛÚ:Ú±´ô™úÔ×ë<´¶ik­¬öY[«ôh†ú<Ú¶¶ŽVŸ¶_­£å‰”)M_—¶-_xzøg­|ø:Z¾Hé‘ÒÎW}ê¸ÑÚ-]ßÖHy"¥Mum_´ïO´ú´rúöhÛªL´úôy´mmíXZz<ÿjçÀaSM‚ës/ Յʼn õÒ© ò¬òMÍKL?ñ‹ê ”¯¾u+§ÌZÍN˜LtžØ”4yõ¼ÉŽly·©ZTßZpò¡$þC P€ˆ[oS\- Ÿ"ËiíŠ÷úÔÉœ6Îd3Y—vñfºÎx¯ïl]gÍs&Ö3m¨µi¦ëéú^¯×f¦ã½¾×ëu¾ÎûBi#g›ª+Å… (@ \  Þ. ÅfR€ (@%ÀaÓ)¾Ú;§ÈÂ]1”!c q7(@ P †HíîééÁ“O>9œÌ¢ ØíöÐ{L½^/z{{Ñ//ŽWŽÑÞmª•ãš (@©†ÕD½ñw ±ç-ŠU__?öï?e/“§+pìXõt³2(@ P€Q¼ÎY@ÎúÐ^oQŠKfãº7|8Ê^&S€ (@s'ðr}þ´·&t@oQÜÕPŸÝ~ÒÃÁ¢äd2(@ P€8»«mòœm:IÁ P€ (ÿ Þâÿ±… (@ P`R€ÁÛ$7(@ P€ @ü 0x‹ÿkÄR€ (@Io“Ü (@ P€ñ/Àà-þ¯[H P€ &¼MRpƒ (@ Ä¿ƒ·ø¿Fl!(@ P€˜`ð6IÁ P€ (ÿ Þâÿ±… (@ P`R€ÁÛ$7(@ P€ @ü 0x‹ÿý´r  IDATkÄR€ (@Io“Ü (@ P€ñ/`Žÿ&^- ªsÂå†ßïÃØ˜>ß‚¡tÀl¶Àh4ÁjµË ‹ 0.‚q AànFmM;†GzIˆ(¸EÊ £ºEÇ-ï¿ï}ßå(–8Í"ŒuØúûqäå½øe‰²X’óQþ®ßã–M³pÓºœ‰ŠO]»þ†Þúýøâ‡DãÈ,Œ&¯ÀŸ‹’P9pO–2íÃPßaÜwËǰ ‚yøô“߯¢±W×ø¾ñÑQß1€¦Éú ‹|¨ÄÚ[>Š ï½ï+lì ×q› "¼"„g}Ž’‚Òµær¡©± MMGžž"½I¬[» 0™L¡–ô÷÷atÔ…¶öTW·àСV¬¼Fº$XÌœ\Œ‹éEôwVán½îë‚•7®Â÷WKðÒò†«žÀGy'š !%ï›x{är!XwÝ•w¥¹xìÃäPÁ,f'“ìH”ÀJ}¡ûëbïÏoÄo«/EæÕxèDnÛ0ÜR‹ÿúƽxÅð^òoÅõRoÔßµ£iéÝHÏý'vnñ"ež¤H€¥_{_¨z/–Ï³Õ Ùy"þRulBéÒR|ðþO`­D¦ªgP[‚î`÷·ð›/à¡Gá£_Á¼DJÃD­× (@ˆú§‹:g* ‘öõu`Ô5 £ $'9 Á›Q‚60€``|hÔ`0ÉP©)ɉ2¤:&ßúúÚeX5ÎdÞ»‡P:àímCõ@**ò Q0«…¤YÍ«@†ÿYx]éN+ø%ÈòzÇÐßц€}=lÉÅ(((•à-üë«¢žx\]h©î‡9£D†#¢¤ ™Öùµ&bÁ<2s¡ Í8åÈ´ Á’‘˜8Œ„Ì1 ôÀíRƒ¦'·vu`@~ìÙy°tÛÂ~¡Ô0ÛœH—6äKðvÒ°«'¾\8_n‡©«ý#¸Tõá§%I\(@ P€šÿLh3¼Hpævã•}Ï"-5 e¥˜;w>¼>?jëQ]ß @ ©Î$IP7ÁBtt´£¹©öoAEÅr$ÏI½(‡P‡^[zÒvå½ïXž‹åKdXSb/CÑRØ2’qMé£è4u ¥Q‚·ÙÀààvî|–¼[‘]ƒñDÿÖ‰K'ã¦=‚Ý»Š°üoGÎò•(T1]æu°g_üɉÜSo©_ *ó­(YêÆóT=¥'·>ü*Zê^ÅÊUw£¥©aê*Ã÷Úl0®¹…;žÁ²À+èj`H–ê¾ãB P€ˆ"Àà- Ì™&wÈ}m##}rÿZ*òórå~¶\­®Ã˜×‡€ §&§¤„zÔ Ò«f” Îíö êh5RR’PQ)½OÃjrÃ(ššJ1;4©áLÛOåónCv© Ìs ¼<I`‡cýÒãØ€ö6zlãÁ-ŒMâÖ‹Ö–"”o(EBE1LæÁ›êÍli†»£m(Æüld.À-ÉE%È[.³&~¸Å´r-m5£_Å—QUUˆæ:3VÝbÀà?eþÅàiÎãAàà hó§âèŠwã#EVäÊׂ (@ P`*oSéœÁ¾‘ÑA¹mP†Ýà«Ì ‘Y¦~8¤—Í*䙽¨þ§&3ýŒÈ}]É2têp8dê¥êC`¢‡î šESJa“@eõÉYx†àlÇ€7næL—˜Fã|È %À(“Ì.´µÊ¦ÌJD$gæÂf6À&3z1<¿Ëƒ!s L:õ´¢¥Û:5‹ÕžR‡Õív¹X0–D™Áš•G ¼#蓪2ÕoM@Í~­GÏh zÇŒX/½e6¹O›1;^¯´G&Oø<èmi@k¤{Þª›Ð㙋Ѭrä%‘õHuè(Ž´b¬ûèv\»Ø‰++Âícj,E+±Òúi»­À.iCq±ŽƒTïù'ެAkV2. 4à)©CnÑÓ-2Œ‹ÐxPf¾á¡·²Ipç“€}I6òV—J¡7QW7)@ P€J€ÁÛYú¨gº¹åÇét¢»o@†P]°9dö¨Izh̰ØÌ0KOê³ZL¼`–‰ õ 8¹9?#=Í-mRn@†Y'†ÏR[ã¡Z¿ô¸õ” íÅýع'Ë®—Ç~”ÉMþò 5õval =¹ädáji´Ñ0"C¬2¼úèchظ©ëËP¨z)}X Ôáèè2ä%¯Ám·ÉSöš·À%3z_xægÈ\&³HWaaª„Ä1ã%§L0ÉÇìYAÔ‡püX'¼y™˜ ñÈ«Òë÷68Šå©!ôÔ¶“-Uå¥HÍMÂü6¡,4ܪË´{QÓ5‚–}O`oW%fe˜¤>]nR€ Â¼…ÌÔG·Ç·<¼5))½ƒÃpIP–’š&šnÀ9¬–УBìV«ÜÏf’^ö“íàP 4ã4999Ô3§&=jøíâ]ÞQxÛÐôâϰcw"žª+Ç=ÿ}fåg"]b±àÀ(‚£c“’K>ˆâ%Ëðöb53õU¸†÷á —>ŠÆ4éѼ¬l|:On<| ÁÀ˜¶Ÿ¿u¶ô’™eêüùk?Ç«V8­Beòt‚7é%5e£d¶uCè«jÅØ%‰Òa6„#Gj~Y.²rçÈÅy2ÂR*^8×}ü Ø$çrR\æ òAüæ{ñûíbOûG`³Û¼Ed(@ œ`ðvÂâ¬m™ FØå- I6+l? k(xSQ½nêG½qÁ'o`ý¡øª‰ 2Aîë:kM‹‹Š[þö)4Víŧ~3‚Š7¾ ·Ýó!\‚†Cá1//€ŸoòÁd—ûÞ$&²Nö˜©7b°·͵23UTÚ Cî}Ï —bÙ’òPÆ7 ¥`1núÝûåa"½h¬k 剾1£ q—­¿©óûýI4¾§\&&ô`Û–+±ìýiÈ_&U4ŬæÔ R¯¡¬ δÝò‘ch® _æF@fÅr¡(@ D`ðMæ Óí6‡LBp`Xn ʰgbB¢Œó”@Ì&÷¹©Wb™d‚YýW7¼ÉâóùÆ{å¤÷edD†¥ÇÍ.C­Æ‰ýgؤ¸+®†Jûö=Œ¿½Ð‰¦®t¬¼õF,Zz –& A —j-–·(¨ÛÁ£ZþÐÐr@ᦦÃ"ÔƒƒòZ« Üj\Tòj,ƒ<ðØ*×À(¼*mºš|srà0GJ×q4T[0Ò@WR>åuf¡ Z[Ow-‘¨Á ½{† 8S KÏ… (0¥ƒ·)y^ûN»¼î*ðÈóÉC=jIÉ2mR‚ƒÜ“¥†IÕzÂö–õ6µ¨Ïª7N•S·o9ÉòÇ}²›éµ7(ÞJÇàíFÓöàé}è0ÍÁ=÷ äTr{ÙÉ‹ÜÔï÷!#§rß =ô˜P`§î¤€Ñ,Á˜J0lÈÈ—‡øú‘7t#n:Ç$z¬ˆ_Þ%+ï“ ÈW^f¨JÌ,®'&ê'ÉhÈÈ“ûDÚàaÔÊÄ; }ò³D™„’µ`¬òâ4y\ˆ?€€)YYj2K¬2ÜO P€¯w‹0*ˆKš!ADFFÌm–Y‘~äæÈޘܧî…;ù5åêQ ê=§ªçMõÄ%$$ ¡±QÒ ÈË+“€î⋱ƒÁÀñ‡ñ_1ï¦ãÆÿül°;Bï& ¿‚Ý_Áþ_ÿ?¼éúOâׇj°[¾µjt4Ø×c Úr.ƒ³¨ «K­°¿ÙñÎK·âЋ¯âÉ?Ô…ª ì C/ü ª[þ´R¬^/à´{¹¤ËÏ…L”ÈÄÒeÇñÔÏ~‰gžxÆË®”ç²¥¡ <Ø ?(Ÿ}ò,¿†ßþ ëú±§p–—˜Q$“(¸P€ ¦¸ø¢‚©ÎöîKL”›—¤g¨§·#23Ñãq˘-ôfõ·^jª·M ™jÏqSŠðz½òÀ^wè™pv[š<ï-=Ô«t›~N5Ø^ƒî¦ÃhAJw ¬-’ˆÌ¢ûÏ ³Ã‰”ü¥(²eÈìÜQéá:Ц#;eBGk¨WÒ4x c}CH[4™…YH³È‚ÁØ9(]<ú0TÄ®—š¸å/mE’|Ô?ðÅ7Ú‹öÃ. [Ê‘¿d.2F8T—bäʦÝRf¤(@‹[€ÁÛYº¾99¥MJÇÑ£{á—€lLzÛ.\z¯©ºÎ%’UÁšÅªîs³„¶­2Û´³³õõ òÚNy=V!ŠŠæÊ¾‹ï2ÚúOTØ&÷ù{ÑôûBWá—a×"}ÖF¬ûÈÜ{å'0+{¾Öõ|æwãÕ–‘‰œóáH^/>ÿE,’áÖü‰ 'Efw^ò‰£õžÛqä彸å窟N† ÓJ°ážßbí'VžVà*ŽÂÂb¤¬\Û3õò€áR¬º¤\:µïäGóŽçVÿªÉ;Ñrdÿ{ÓC*!l‘WÊLÔ¸ûãïB™´ÉÌÀ-̈)@ P \àâ‹ ÂÏð<}6ª¦ö$fÛ,YêêdøTþ0«7.ddd S~Ôým*€ëïï—`NîÏêê’¼=òRúA,^² ©©92Yaò¶ýót&gç° nø6ʯÀÜ;¢G+ã=oòzxÞ4eÍAñ;~†ÿÚèˆG‹¼ÔÌÓ$”Ê]þ‰'U#ä”aã?Àª¡!\ãßi”·\8 “‘4õ9²®CzÒ&|ñ§£HÌÍÍzUWÁP° I©óq_™kÖ$éí²TWšQ²\pîøO<>²T{¬KaU‚O?p=Üc~œYïC·¼>Ëz(™<Ý"-[ò%Éä„rÉ·™Y…åPiT(î (@ P`Ú Þ¦MõÚ3ªíšå9cE…sB[Yé"yé¼<ºbâñ ¡g¾É}mVyf˜Õj¿h‡J_» KR€ (  0xÓ$Îæ:ô<1 ¡»ÛÏæ‘X7(@ Pàÿ·w&Ðq]ežÿ×¾—J%•öÕ²-[¶cËK²’0C÷¤IÒ„p8Ò¦z†¦º'CÍihÖÎtÃdº'ì $q !„8†„ÅqÇr¼j)•ö­TûöÞ|·¤’U¶dlÙ’#éÏ)WÕ{ï.ïwKçýý}÷û. ¬p³3¬ð;åí‘ À @ñ¶&‘·@$@$@$°zP¼­ž¹æ’ ¬o+`y $@$@$@«‡ÅÛê™kÞ) À @ñ¶&‘·@$@$@$°zP¼­ž¹æ’ ¬o+`y $@$@$@«‡ÅÛê™kÞ) À Àæ™Ät.‰èéyÎò0 ,p":ÓÅÛ Šâš–C2+>Èo$@$@$@$p(£R¡ÐmZ Áw   X(Þ–Á$qˆ$@$@$@$P @ñV Áw   X(Þ–Á$qˆ$@$@$@$P @ñV Áw   X(Þ–Á$qˆ$@$@$@$P @ñV Áw   X(Þ–Á$qˆ$@$@$@$P @ñV Áw   X(Þ–Á$qˆ$@$@$@$P @ñV Áw   X(Þ–Á$qˆ$@$@$@$P Àé $ø¾ôrqèCOào=‹®®QŒÈ4y Ê÷ ¾¥w}àzä˜iöèT½á}xnß8^x>ÿþ=¨uØPY¸&=}ô™™v‡¦[ÝåØpÇgÐÞìÄUµ…‹Ï}WuÃC=øîÿÞ‘dÒŽf¼Cú¨ŸÝG¡šá(¢c}xø¯¾SØ”Õàþû;QŸëDÉðá|ÃqŒ®/zW~X»çflxó[pC5`6]À/$@$@$pŠ·s,Ö¹\º®AËM½=ß™ÁhÁb€Ñd‚Q>ERe±tåÛÕ²@¤]§Nãõ£Èxí¢ÜdXF+Lú$Üåqdå»>…iz¼)hÙ&C'|} ¯Ò±EÓP–?«.L!›G$x =AtvM *Í"5 ‹ÓSç ª¼h©tÁ!˜s‰%ÙH'_9„d%RÞc9ø-ÒÎÙ1rm&҃㯾‚c9#LUYÜ ã È=ñNîÀ@8‰H™6©^Ô.†ïälMëacG¥N“áœ.¤ Ì8ûQ4s‚.]Ô‡&Âmd°ÉX á±Ä&ÃÐå!¯ŠÃíÕfGI ¯Ï·×—q—ooЖ„ ‚!„›oƒ±¡_ùÈn¸ìò“Ì+K^ÌÚDùöG™<ïÜóŽŽW¡Ï¶qZ«{1hzýGðÿx™w~›Þ± ¼NN¼C=§ð‰o~¹?¼ñ’;ðïÄúæš÷/@¨—ù±òy¼úb%­Àš5ªŸ3Eí(Ò"54ÂßgC©œ:#ÐT;PÓV‡·|íƒh—oöYBTáfxñxà×øégzÑôÀŸc­Ë!½² Àüæ}tÍ_…g.˜€ˆ6em‹NŽ#ž€Ežê.»þ†zØ,Í3ùt6‡œ¹¤¼ÇÆGåÚqTT×Áh#–¸•Z²™ ‚'OÁh»g5Üœ¶¹’Ùè"ÇÄUzò(†ÇòU"›ñ“™%–”žB¢ '¬WáÚ– ÔoðÀåë]ËxíNlt={f#âÇÔª„ìÜÝÉ ÅÝ_©ˆ¶¦”î $ãêb‡¼Î”`/åU»f-´q=?§gÄ›ºÎ,"Ô ›t—7eœ)òEokAÙá.TÆú1.õãªÛâ.f.ç  Pæ}tÏ¥P·T2‰°(…±á~4ÕÖÀãò ªº±²™ÄMªÊØØbñ8úC! Žb",.·?ì—ë¥ä Ù‚†l6Ó§ºa©«£ß/®ãù…j62‚ñƒàÉ_¢kÒ€úí[aÓM°ö‹7½wé±,Æj®CC³šäæE4j×Áf³aci£z¢§1møœ‡ŽK |~u"Þ^Qˆ9dMžcÚ¨LhY„Bƒ £iÝôuÈ`fYÖæiøÌa³ ††:xýC¨1õ#Ö‘uÉiŠ·3Œø‰H€Hàoç ¹<4QéT§B‰Ç… k[к~Ò™,NuñóýòŸUo뛪á÷y°yË ‹ ¡ûõ×dí{=*ëV¦õÍ0,ÖÆa „jÑðæ:8×V‹˜-¶YÍ„MÂjþïÿÜfè3jƉ}?Á¯ûÅôV(f; íDëÿ|‹%>',1e+hŸ-±pJ`€Zv!ÅSW‹ªv/ô¯¿„ÉZ†rTH›fCXŒs‡pòh {\Øñ^Y¾·ˆËá .ÉôW`0çÅñ„»,ð)ñÆB$@$@ç!@ñv8—r*“N‹xKÁ$—Óo &¢q„'£8zª=}CH§3y·ª–KÃ_êEi©Oº4 ÔçC_ÿô\&o¹³;D”œÇ*u)ã¼rucÐô(¢Q3rç‘sdq$"KÖ ÊÍh‡_—ÙÅêèŸ6<-vØpVÖÁ$–©²¬M\¬Êr9K¼)§¥ÝWP)®ÒÙ%=2„ÄÐ ÆMUp‰õ3 ‹Ó.Ä#mtx`)­€7Õ-ÅX(WÊOæ Ñ>Œ=·ÛP!c²Ê\Çgwš7Ã…‘Œô¡ûÀKyƒZ‘5ÞÄPd#Œ%åpYŒù6Ššà  8‹ÅÛY@.××Éq‰BL&PU@C]=•UøÿöNv…ðÔ³P!Ç%b±h¿z&»€ÐÀ®Ûµí›Ö##.VºñáTÔÖÃlžß¥x¹Æ¼¤íFÄò6€¡>`ø‘/ fIàÁnåÊôKThnû—°¦¾o• U¬²®f×ÕS_$’³`P›:ðûÿyö9„:OãHé-¸©µoÞ!ÞÉ iÄSSõf´™ƒmòŒ6ID5"cxéz*w`¨Ìƒ«ôüR†1Q4 ÈÀ! àÛwÿ¤èÌì/þö:Ôî®1yËãì üL$@$°ª P¼-Òô‡ÇF‘K%жc;â‰4^:ØGŸ|›ï|ßû±ýš]˜Èfñ”Šƒ?xã½½Øûø~±ÄeQêq ©±==èîíCyUøé”=iå}`Y¹7Ñnh½ëP±qn=k1õ ìÆ#÷| §jjàûŸˆ0ºp7çÙ„²±QôýÛßྟZ0]‹÷~ñÝØ(A´yA YtŸ¸HëÑ~õ |.ˆ[Õ˜ Oâȳ¿EåÚ;(k‘n{ÎîZ¾«v£vs5î’ɬû‰ûo%VÕš@»Èò²Fùº€_êÈoïGèÄ|ùgÀÛî¹ -WmÂVQOEëÎæèýìC‰ ®jß ×/$ð¡0œ­“´!>°Û>ìFÕV©1—áíì†Îþ.íjªáòD¹ÂpŸŽ˜¬Å“MXH€H€H`^ x$ÎÛOÌC@YÐLpDîÃÏŸ "&kÜ$­›$H‰´ì¸™JØk’‹ˆ6MËÉKŽI]–‹# ,nJ¸}ïg $Öãý÷ý v45! ‘œpñEí€Q]·$ö÷ŸÆñ—ÅB6iÂP•/«uzw‡‹oöL ¯Ls-*j "äÎå'  ˜‹ÅÛ\T.Ã1%Øty©ä»&yø;ìÖ¼ KÄuÂXV)çMy—+–Šæ·j²Y-p;í’Mm¥¥ç]§+RÀÉvU9Yó72 ¸eë(³Õ<•¾C V9ž3‹…R^3.\МˆÅNö=MG‡1(·¾èzŒ[ëкcꤾgÁÞgПŒEƒGÂppDÜ›¤|¥°ÉZDçíÜ‹ÔVi9ô™ŒiK)¼nägÂB$@$@ç%@ñv^< ?iuº$›„ #’·ÍçqbÇæx|ÿóˆÉB÷Èo~ÓÎ÷Ið‚'5™;ü AXÌV4Éî »Û7!™DF„ŒÍ£¬2+Ïú¦w<„‘®NÜ}/ðnÙ:jݶflqe>Zÿ ß¡³2IDATDpn†SÖ‰mk7ç…Þ¾–†>ð(ú;ºqÿ^ÉŸ÷_߉ÊÍmhWí.|*¥¦ÔÎíFUÍalØÒƒï}O‚*J6¢ò-r™Ÿ ]E•^|ÉÅâLñSï)EGívÜYoFó¼]4´VÀ!ÁFȘ‹»˜þ&VDtcBrúíÿ?ßÀ ¹höœ–Na²C¬nk°{÷Õ¨´›à¼P¡:gKW¤x“¤mpz›ñök~Œz\òß JXi¾4ÃînÃú×-¨—OßܪHTžŠâ´Ã)®FeUSKÙ´œ†c²ãA?2ȷؑÿ÷Ì?7ڰκ›$ËœâÍ ¢Z;ÜI*î\Õ¾ÒT¾²ÌkÛd³ú ~‰”]ç‡OMÜÝ ²ÈG\âù·»dk´‘(^úÁëÓÎ~S±§WãºÝ­ØsG;Êä^U]  8Ãíû«¯É3ãcÜsÓœ×uuuÉšœ %ïØj){÷î•]+¨^×¶à[VùÚÒbE:öÊKȾuU¸nýú¼5­+Ø£'•e-+6¶4‰µÍ‹ºê  ·7ˆ#ÇŽ£\¶ÇªjhÎïº]§*I±–I %"U-u›*"ÃDøØ6IÑ1%Ì gμË/VËÈïRí*AN[~m Qì_¹”¬”ÆÒçñde-Q¬žVQJsz¤¥má›LÉzE‹ìø ëóÿË‘ã²POÖ»Iÿ²ŽQ‰G!«e8Å}jµXQêuÊ. ­Ò³.:E" e•ºI„J_/ú004¥lb_â[¹ÂMq—\£Õ%.ã‹=F+oþu¦¶ˆ=›+o½º o虊şÄb¦v#“-T‹‹WÖ4ç9«ñˆå­`…Ë×’Ê&3ì",YH€H€Hàr8ûñt9Û^Õm)K™YR~jd ¨ ôÂ*;&x$9oìÐ,[?™ÍSø•k5‹¡§7„ѱ1LÊçu›[a÷銴¸­ê_ožH€H€.ÅÛ¥ñ;om%¼¬bU«ªoD™ìm–ÍÑÆ'%1o¯ì"p&ù®MN™%_˜SÖ·•ˆ¥®Êí…ÚŒ^Ì9çmŸ'I€H€H€VŠ·Ežó‚Î(Éw­v§r”ÊÎiR²ÒôB/£E¹‹x³ˆßÍj·Ã&/ƒòÛ± ÀY(Þβ_•€3Éú§2ÉÈÏB$@$@$@—B€æK¡Çº$@$@$@$°Ä(Þ–8»#   K!@ñv)ôX—H€H€H€–˜ÅÛgw$@$@$@$p)(Þ.…ë’ À x[bàìŽH€H€H€.…ÅÛ¥Ðc]   XbÌó6p•›ÍÈD¹óÐáa   ¥$ ÒÏtGñ6ƒ¢øƒÕä@w}ñA~#   +@ Ï1(½Næ{¦x›gòÛZ-óœåa   X:&ÃÉvÆ·tý³'     x[ 8V#   +A€âíJPgŸ$@$@$@$°@o Çj$@$@$@$p%P¼] êì“H€H€H€H€âmàXH€H€H€®Š·+A}’ À P¼-«‘ À• @ñv%¨³O   X Š·‚c5   ¸(Þ®uöI$@$@$@ $pf£¬6Àj$py耖B&­#Õas:`0`*j\C:‡–Í!+Çu݃Á›Ë “˜ùŸˆ®ÉÉ4É4r9Òr¾¨ýjV/,ò«·Ì\<}R½ÍÔÓ é†üdgÚuéÌGéGÏiˆ'RÒ4l”ñÈØ‹Æ3sqFÆ®!•H"7ûZ¹£–ELµ!ƒ-Œw¦ÚÌ#Ìv¹W“ Ö¹Æ?s? ¬do+yv—Ó½e#Ðû¾‡G÷öáñ'ã¸õ›ƒº*?¶‰›* F=xòkŸÂÉ×ág!rZ ìî«ðñ?‡ .š ª'Õ}àAÜûù½8r´=Ó-Ø}u¸úCáæ6/ö¬)´;ë}¦^¡qG~ ëg·;ëÒÂG}ð'éíÀ_þ·o¢WÃVµ*c/OábÓ~ô¼ú*¾ùg÷â\‹ék[3¯ "´?ßF(žÂxáú¢w‹|[ƒ?ø«/bÝæ«pk-(àŠøð ¬oK1×yQ¡#‘ˆŠ%(‹t:‰lV,6ÓbÃl¶ˆÁF¬)V»¼l°Xl“ÒRŒì ÒǸXÜÑsè0N£§ÏŒh.‡Ô¬Ñe“÷ý]#IôÄݨ¬ö!6–E&v ¯Ÿ„¿.€&¿SjŒ" ¢÷åCM¹‘p6¡Ö/‡Ã]bÉ £ëèAt—¬C_U *È[È Ýd’1¹ìUƒ „¢¥ˆÉ) ßyKNæ45ÁÐFl§`“Xh$‡5}Ò~Q!1Ú‹Sc#LÀŠ2ÄUZz&Œ¡¾(R¾8ë*P"‹ŒkšüXÆ'0pê„Xæì­Z ŸXùÎ?~! X‘(Þ–`Zuh"Ú2†Ž#ÃÐ`7ÆÆäX.߻πÃáBUõTV4 ¼¼FÃ*šÓAD&;pß=ßÁáH#BžâV,.áÞcøí?~kŸFªmúë·¡oßçñêc_Æ?|ã)Xo½Wÿ‡VQcÏ!Ôýîýä·aúèÓØòž=¸çñÈ>ÿ?:q·}ácТFªú#xW#àU­é29<ˆçú>N_‡qGéyÜ—……w5Wͨ¨<KÙ(½Cíà*Îì¢x“'»ñr}3ªƒTÈÉ3Mµqvßy'®ýØ]¸^,ŽEÂ,1 mßGðÞ{ˆß&žÄú¿Œ-V'ÖMÿ`v?üL$@$°² ¬"…°ô©‹U%™Ž¡¯ïB§ár›à´[Ѿ­ ^ïn±¶M=º£ÑR©4ÆÅ²ÒÝõ NŸz›¶^»Í‹Éºô_¢³ñQôýü/ð/¿éÀéá(Òm;a ¹P 6{ ¦ß";‚_¨ÇU¾>¹Î" ×êÛ¶Àn¼5Ÿ{áxIoÁ¦¿ÃÄña¼Pý>ܽ½mWM5dÜô6¸}͸µáÓHëaôõ¹:97-Þ´W¾ŠÞÇp_=²)jD9]¸íSý5`C«ØÄj<85ЃH£j¼tªóé| Á‰$vEç~ï›Æk÷`ã·G´ÃA ‘©åý½5y À #pæ?þ+ìÆÞ·£ÉøxlÑÈ("Ña˜ÍºXØÌðxÜ(+/“Wyþåõzáv;aµ™ÍÅžB<Wbòp‹6]¬‘ñÁ…&ÎÂ-îB·Ï-NÁ³„SviqM•À ¦¾\„¯Ÿeõp ¯t4‚á´²„%¤Ã#”CF®Ÿ2Œ©ŽbL`2¢¡¦®\æz–ÉoÞvgPnt§6»]\³XÅŸkšY8ë:~$ Xñhy[¤)ÖôñIü¢TÉúuMذ¡ ‰”<ÝÕƒc§{Ó¦ž¾¾o¡ØÞ¾]ðè öâ…çÅÚµÛÑÚºK„Ê &³††âS÷ÝK‰õÝßÀ¿Óƒ½Ý±¢YÑ»»‘ "( öwÖØQ#îN£ 3øý0YeM›ñQèããÂÍmçßaû›€§ß=ÝDÁ­hè7u'‚Ýi$k5¸fzˆKcxèGidKüøôßnÇ#Ÿè‚èí‹.•Û¯†§VZþŸû0^ôì©CƒôoÁˆ(­§ðüïÚ2nü3Æ~Ž‹ëCÔ¦¶ïçè¨ßƒ>¬ßbƒ§È¯zÑÃe  eJ`…ª‚+?ƒ²®-—õk>ÔTWÉz¶*;Ù‰t&+i(txÄÚ¦‚òé+DÄ%“)¼~줸SÝX»n­XêTpCAYëU[³^¬vi©¹ò~ïÌ6Þtà)…AÍZÿõ{«^ôƒû~‰ÞSâz-{®mÛŠ?Ø-†,1ñ¥†»1øëÏâ•õ·£Ü¯ãÚ1<%­G.º™ÎÀVØ«Ýh·Ü=º/÷J@i•ˆ·XÚÑýèhÝ1¸b·ž£ñ,žè5y_Çýò­È,.ÖBŒŸÀÄ–Іº—û†É*$@$@ËœÅÛ"M`L¬nqy¹$™C^V‰ ÈÚ¶¬XÞbe³Z,0˜Œò€6æƒT®°X"!.U—¸Vyת²ÒE"ãЦ-t‹4Ô+Ö¬AEK–©eûRD»ŒdS.Ï¿º&9ᢃè?ÕÞ® hµ×Á'BºAD”YKð8OtC«¬—` ~SØVlâru¸Qåœ@"ÅÀxÙ€E¢Q“Hôö Q~4é×ð}¨»O 1™F¼7.NÖ³\ÇÊÇ›KъÜM "ù𔞷)¼… ›µH€H€–?§–×m.ýhÇFB"ÞÆ±½½M¬m9tƒò°µÊZ'£:Lf3L²nËj5Ãjrç3ƒ$%y«.Bmttõuuè öËz°N´¬Ù&7 éCX.š@66ŒÎ¼_|؇֊»ð!ló{Q§g`”¼kC‘Aüèé·à?þk*K$ˆ`ð¢»8SA¯ƒÍVŠÝטðtv /<{ïjX‹\x/<ó4ÚvÝ‹Úr%ÒŽ©3óIý)¾×½çí¸úOï€x~aŸ­f³Ð{¾Ï~ö1tÞ‡‡oú.v×»±£°¦o¦~  Xé(Þi†²`=)/Œ0:>‰X4!™÷%zÔ,ùÜ$RÒj5ÁœÏí&‹Ï•N\¨ÉœŸ’ …t&i©D^ö‹ëuRܬbua¹h᎟`LÜÎßÿMÎm·csõFl)ñ Rm±‰£sÿ£èé· ½ãí¨”€‘rËäE÷Q\Á“XX+[×Ãu$‡ô±×0‘K ›ÀëG× á]Ô–ITÅœE=˜a±‰¥Ö[·LyÑ’6ͽ¡Vr»=wBÃа†¨nosÒäA XÉ(Þiv“âÞJ¦bâþôb<7Z^±Êä…›8‡ÍšßæÈnµÊz6S>zÒ&Q„šD#¦Òé¼èS–¹dRvP ZWs÷ªr±Šƒ9!ª¼Èy"JÔŠ[T“Ä"j›,•yEeæ‰88ôºŽÂ;Üøã»nÁ†­íhW£Uêeäüñ'~ä Ѷ–t ÆÌ¤¤$‘-ºÄ=™Ó-HF&A0Ùà¸À%‡&±®ú×m€ãH úÉC–Ä¿qYóvâôFÜÒh‘„Á"Þ‚ ˜LÅ <·%.Â..©F$ ÍÊF^$V! ÕA€âm æÙ¬öß”ô.l*E„l‡ˆ6±ºò½Iƒ¬‰“ý/E¨¾*°A_œµøi üëÂP_[µèFx ‰?ºl¥ON  ¢_«…Gv&PQ¨‚Éþ#=þI|q¯¡ì6üåc‡kKÊ”yù=M 'Eœ½Œ/MJN¶çð⯶âÕR²ðNvL“rš]wý­{þ[nþ8þËõâÆ¼¿q‡á5?‹@ð =§Ö-ÚªiA¥Ñ‚ÊKÖáu"ö3¨k0¢Ä÷›(‡H€H`I\ÈãhI²Ò:1ËC\Eˆf2Sn2«6e2*÷¨E\¦¢2ÔãêºBQ & bPÖ¹Œ¸NÕþYfåR-\°ZßÍU"zQácbxX,W#Ъ˘Ãh°Qo¬näp~ñ>Ä&zÄMÙ‡´÷Ø=k±¶¶åb¤sÏðSÁ"^To|rå˜1`É6UH %.îdÆ„RQƒþ2JUÂÞ žQæ¸$Ÿ_™sÁãˆfË‘­h…Kæ[mൢ‹˜Oô÷ ¬»3`™WÖNräBP² ,{g”ò¿•7Ö 8’ DÏŠgL4˜ŸÏ“cdve1Woj½›Ú!-îRe‰S%–ô"8\®’ËÜë—p4¹káqe±ëê þååß!Õ•ÁÝí²=VÇaÙëÇèÛü”T7a§Ú/V6¤?vô$¾þ°ŽÝ_øš¶ï”ÔgUo–À‚f|àkX|"Ñ=ôM|ê“]èuâ]ÿ÷ËØ,káÖ^”µLòänDmõ~lÙÒ…/~ÿR¥;ÑxÛݨ–½kÒÖE57=Âl,Šž¾‡ƒÚõª^‹6˜P£’Õ-¤±â»æ7  eF€âm‘&,¨C\:»º%Ï[5êkªÑ×ßD2+/[^¼ºÎ»HE©e³Ù¼%N ¸Ã¯½&ߨ­]+"ï\\ï¥Í-¸ùs_AÇþŽ>óuÜþk§¬ù HÄè[ñ‘nÅ΀Y öxþÁïâ°X»NK²Ðçß³ˆäûÏú•7ïù86ÞôqÈN[ð©í¡45µÀ{ÃÍø_za/Ý€oÙ »#È<ž½iëLßj?†'ñôýûðØCŸŸ›êÞã-+ åuhjnC™¿F¢!šTboˆM“ \qoK0Êg7:ÔËÎ ü«¢¢9ŸBDuow8eÝ› ·.yY%¥ ÌE€âm.*—û˜XàT€‚¿L"YH€H€H€HàÈÞK,$@$@$@$@Ë…ÅÛr™)Ž“H€H€H€„Å$@$@$@$°ŒP¼-£ÉâPI€H€H€H€â¿   XF(Þ–Ñdq¨$@$@$@$@ñÆß ,#oËh²8T    xão€H€H€H€–™Þ÷¥gžkܹ\NöàÔdõþ¹N¯Ècét _œzfEÞoŠH€H€H`yHgsþˆͺÁ˜0@ Gé› ç|Ïfç<¼2N$3éôʼ=Þ À²# »m†šžùÿÛâvxkIEND®B`‚trove-5.0.0/apidocs/src/images/phpMyAdmin.png0000664000567000056710000076705112701410316022261 0ustar jenkinsjenkins00000000000000‰PNG  IHDR„¹Ö5úaîiCCPICC Profilex…TÏkAþ6n©Ð"Zk²x"IY«hEÔ6ýbk Û¶Ed3IÖn6ëî&µ¥ˆäâÑ*ÞEí¡ÿ€zðd/J…ZE(Þ«(b¡-ñÍnL¶¥êÀÎ~óÞ7ï}ovß rÒ4õ€ä ÇR¢il|BjüˆŽ¢ A4%UÛìN$Aƒsù{çØz[VÃ{ûw²w­šÒ¶š„ý@àGšÙ*°ïq Yˆ<ß¡)ÇtßãØòì9NyxÁµ+=ÄY"|@5-ÎM¸SÍ%Ó@ƒH8”õqR>œ×‹”×infÆÈ½O¦»Ìî«b¡œNö½ô~N³Þ>Â! ­?F¸žõŒÕ?âaá¤æÄ†=5ôø`·©ø5Â_M'¢TqÙ. ñ˜®ýVòJ‚p8Êda€sZHO×Lnøº‡}&ׯâwVQáygÞÔÝïEÚ¯0  š HPEa˜°P@†<14²r?#«“{2u$j»tbD±A{6Ü=·Q¤Ý<þ("q”Cµ’üAþ*¯ÉOåyùË\°ØV÷”­›šºòà;Å噹×ÓÈãsM^|•Ôv“WG–¬yz¼šì?ìW—1æ‚5Äs°ûñ-_•Ì—)ŒÅãUóêK„uZ17ߟl;=â.Ï.µÖs­‰‹7V›—gýjHû“æUùO^õñügÍÄcâ)1&vŠç!‰—Å.ñ’ØK« â`mÇ•†)Òm‘ú$Õ``š¼õ/]?[x½F õQ”ÌÒT‰÷Â*d4¹oúÛÇüä÷ŠçŸ(/làÈ™ºmSqï¡e¥ns®¿Ñ}ð¶nk£~8üX<«­R5Ÿ ¼v‡zè)˜Ó––Í9R‡,Ÿ“ºéÊbRÌPÛCRR×%×eK³™UbévØ™Ón¡9B÷ħJe“ú¯ñ°ý°Rùù¬RÙ~NÖ—úoÀ¼ýEÀx‹‰ pHYsgŸÒR IDATxì}€]U™ÿoÊ›Þ{I™ôH I%RHDEEWWtuu«[þêºìÚVwWJ‘N @ éuR'É$“L2½—÷Þÿ÷}çžûî{óÞ0!AQßIæÞsÏùÎ×NýN{ Aº+VàÔ©Sˆ»ßü~?1fÌäåå!--íœ 'œl@òKœˆÞËf£=3GŽAGG233‘““£´²³³Ï™VA\ tuu¡¶¶‰‰‰5j”¾ÿ|ÄiÆ5ð‡Ð€”ÿ††´µµa„ áå¿·¾‡W Á—„þ©cÐ5a8<ˆööv¤§§###ÅÅÅö?!!áÁ~œf\ç¤îîn-ϧOŸÖò/å8^–ÏI¥ñÄD°å_lµñãÇ#999zùïéB°¾ÉëŸEâ‘=H8]‹„öf•4X< ’JFM‡Æ¥H(©Â¤)S´_ Àºd ú§)âwU¢ëÖÖV5Î2ÙIû|É MAÓû-·÷[@vèÁülô]1HL@0/iI‰9rüþ’’’ôÏçóE¡%ø¼ø-nf¿Nœ„G†iÄyxDÒ´ßõù¤ · ?Ÿ´,ÿòü‘¸£ÑŒæÅóNý¯M/¼xÃ"y³pçò¶ø‡‚;6lJJŠn{{{Q__¯\1‡æ,Þhбø’4±â¢á‰ ³4‡‚ãÐjˇ—¿X<ìééÁÉ“'QZZ2 ““зl.ÛÖæŒt’0bÄpmÿÙ'Hàó¥(ÂPcùà¡èÓËõ{å²aömñ në·qC¡gÓ VðZxñ–ÆÂ #8Þ©óâ·~‹k(4mš³ü^xÁáý–øha~>\$ÏöÛâŽäņ{߃ó'ã™<—~@Ê¿LpH9Éii…–—î¹ø#iÚo‹ó|ò · ?Ÿ´,ÿòü‘¸c… |$¬„‹³òY‚߆oZ–ÆÙঠ‹'Ö{(i,‡Ø RþSÓR “"¦ü« ç!Â4É> ˆcÿ%·ÀßÝ„¾^À߯0Aé|©@z™`¸ú"±yǽç[ýýýÁ¬tÈ…ÌØT$ˆÇ ž¼7™d L‚‹šiæ[x’ôa3cüбWZHPƒˆh‘™ÁLwa&´B¥ÀÀš2¡1-4)Æœ’cR˜§‹ÚõLBÅ; -0"2[xA@ÎõÓ£ çÛm`TNñÓZßVn 弡†:|ºŸ®Ç…°Š7Øo¨ÜŽœ®B=r{e¢Êí Ïx!ç’6ž0v‡„X~é¥ ·â ‡I Üµ¤1¤ Œ–a'DâBøæSâ\æú^¹"a²³sÐÙÙ‰––¤¦¦"++ ÉIÒŒ…S·_n9tÐZ¸ W/nÚp™ “ò¤3Ì3Þ‰sAœ€p¹C±–Mè•Û!%xÅkSxóÞb’[=9КÎMH<Œ7:`þhbiËŽÒb­‹!üŽÏâ “;<Î!¦øK¼Ôgo’0›Îñη#ƒóe$q3NdP.åá*%¤[¬´¬|äV9 +NT¹­@‚ZÁm€Ã¡à1dõ)¡Vîp¼<‘ÉL(¦PÁ,„ ò¶éŽ u „&'?2Ò3ä¤bw˜¤ÓðËÊdùçL±dr ´@Ðh~‹~dU‰L˜ƒÐÖR‘ÛK[éXÝ p?œr¤èÜP‘;˜ õØ$6zp`ô#ŠÜ‚ÕÍ#KÔI㒣ljòâ(Ékq‚Ùâ1ßV:‰´üˆî%Ö¸p¹bïÒ¸P€ƒF‡¸!¶Ìk€ µr‹ŒJ[R&,¿ÑòÚæˆå×+·•Ù!À—áÆÁj¾ôÄh€Õ“ƒÐ¤0OÅöpÒY¼4!0 ”ôò.·!!õÝÖôNBL;dih±˜o¯ÜnzEãÉ;H`YcPV¹e‡UÆRÆSRe‡q¹´½íŽ"b”4ß¡gˆ'éÃÄ „ ÄKÎ5yl¹µñ²…È­ƒZr…”:Á,!¡/RR’6Œñú퀄ñɇa£ÇPŽ.w8Lt¹-Œ•ÍjÅ„  ÁP½7ìITf!.L;2˧÷;„MðÙ/r¿¬ÚBNŸN[vˆ\»n€ÜJ&fècoºcâuSX!ܼwX$€éÃäÛ@[6‰Ê­Q&Æ–ÿÜœ<-ÿb?d¦gê8ÈÁjT%m~j:‚iÃL°)¤„oâs §L[R ¤²ÊÊUܽ;ýÊ6Q1Å(ÌŽ<;‹³¶T¿©ùÌ"'‡„[2¬×óm9´‹ý¶‰\PzT t!ܤa!‘0ÎwŒ›R<6Fø·~úœta aR•ÈÊM¿“Üy¹ßaÉœŠ 3ÐnF En^=,b†³VðËÆÙûv1Ø}G—[` žoòíà18 îa!L–!Š&$C (›Á!‘ã Æ9eÖÒ•w¤ÜŠ^å“|öÈæ÷`P&•'Ô"vƒd†LÊ~ssÿšu0,a²RdÙ¶Œ´½(]¸l!^ÅçVD^9l Ó8x# žp¹-|Ä;–Ü…“>DÚ6x]%ÉÆ+ós@Ù±dÎRn#³£§Xx]†Œž,)ûVÞ\ê¼ÆÍËù}G¡BáÈÍ›Â(%†Ü $«/7Ãè7ÿ=Œ q„W¢QðjåIæئ{¢¤>®f3p†[Gµü³‘‰Y´å?LnçÃ¥‹?!¥@òˆ.w¯åË‘Ûm¬œx‡¨Gë6¡ûŠÌÒÇÁ•ÑÍH*\”|N‘;:V?òîì·#–+·Åí„{^Ñðºü»éÝCá!Ÿ¥å Qïíy¹püFO‹±7BýQävÀÝT®'”x Nä‚Ò3€[t!´œ‹([§[ZZ9nr“ÃÊ¿äçÐúîp¹È!Z!έϖ›ßJH#ÝT®Ç¦á{€.HCî¡ôÝ!‚«|[þ<<¨7ë‰.wOˆ[¾†m€Œ¤ù- áX£ï†•`¸l†yÊ_xœ$1Žqneä‰õ¡Æ'OÉg¬¡$á>EJïtC£È¬ppaÔg¿HÛzÐõÙ¤Ïä$ LˆÈ¤¸Ô)YTŠº}TE“ƒH¼òòÒ£‘ ¾ð º¯ŒÆç]²÷W¶8ÈìV^^.¦LŸ,ëz p´lÓÉ]Í17ÿÎ;oça49, ‘G'•lßÑàUn§]pÊ­'Å{Ó+rˆ‹ÆïŸºÜÑd–ŒÕ BÛ¾õ šßQñ8é¼/iûúú°gÏm t ¶œ•ú}¹håVhkÙåÛ[Îc• [·*÷ïK¶Áè &·v,a¢É­³ŸÁráŒä{"nP¹‡PÆm}8_rËãÎ;µü˹¬¬L·SŸOeEË?‹?V›&ñÞ|µõá|Émé¿ÛïsÍo•[Á?¯>Þm¾Ïÿ;ÉïHÙl›&|DÆ o‘i¥üKû/}ì‘ò/ƒvR$~°ïó)÷»)ó`2¼“¸˜e\"øçíÃ4øßW~¿ùb¥‰&‡Â2"rÌ"°â"óÕÊn ßý§·ü˸§¢¢BÛÿÁÊ,¹§NªwŒëõj>ðÀ¨©©y÷¥ø3¥ «‚2‹+·ü¥¦ÊY[ÔþL•;®sÔ€œ¥’K“Ä8'gªâ.®? Øò/ý‹ ÊËËÿ\DË׀Évù“ò/Fá`â¸ÊâøSÒ€ì8”²/ý€Œdü#+ˆgë–/_nûŽõºetÚ´i6ÌÙoz¶Øâðo«ÙŽ+¢í_¼Ñz[•Åâx[ Èì°Ý†-u,~ƒîÛª,ð'¤[þe0,ñòÿ'”¹qQÞVRþe2DÃRþe¥0>¶z[µÅþD4`Ë¿­ïôiùët…°³³ƒËÙ÷"˦Î:¨n»Hùv´ô ¤{êQâL¤>“œOÙymÍšX‹V‡µÒJÆ“gÿ¯.é 3J—©5€pB;„š^ÃŒ¥-ô,Û Æôî·à—ÿŒpe¶ 4æh(ÄÒ•À§AâòNƒ"ÄœñE—Û¦t¡³@†W•)šÜ ,¡.ô …g˜Ë¶8¸¬ ƒÈ­y-i\‚7$M,¹]º’Öá%ä3r»0DbÅá[ŒßÐ Íhr‡Jt¹]'¿U§F„˜ù«Œ[Br;ØÉ¯Wç! )kŽŒÜýL*bÚtCn·“rW(ñÚVŸ‚ÇåÏÕ™¨O‰I´K[?,.‰vàùMná[œÊ¢ùÅ—'Ðx #8†*÷€6Í Q}¾Óü¶´ ÛF‘V‚ØZ7y­äDÇäØ:S¿‰EÄÔŒeÌ r[˜³‘[hÙ²¦t-yáEtk•.‘ªë‡n9žµlþ…#³&ò 5¡ƒÉÙ¦ ¢Ñƒy»È-¯ÂšÒ<‡ü¶¸Hb0¹­: x,¹-2áKÜ@¹u1ÆëÔo£W“&Ln‹ÊI2”üvùuêwX^ >Áå “[óÚTËŽI"â³¼ŸÏü¶2…Éíжª²ym81‘®œH»e‰þXr«Ì’ÆEà(בNñH¼:#·xm3Ôm£N«Po9Ìo—7 eh(ÃJ%¬œ‡P d˜Î•I¦©øÐ:24¹5­äñ r ŒÁÍgÔüv);päÏiH•W“ÌåYñ©"L:¯Üï¤sù; ¹ßI~,çÑå™ÅÙ²fÓ)ŸŽÜ®ºVù6iįÉC¨Ïk~G—ÛÐ’&/”Sy¸ùíÊd=OñšüvaˆHÄŒ&·B‹Èg!·ÅJ+>:o~;ê³:7ñúT}†µiNZ/‡ƒÉí ‚Bë Éí÷˜r¿“üŽ*·”:å×(YË™„iÑR<è$´<…$ÐòåÉ™0¹¶MsPðehfÐ tVÍ akk›.= `ÜÅ5×@\q Ä5×@\q Ä5×@\q üéjÀÇ»MìVSÝ2jD³3$´k‘‹)á|ëÌ€5‰-(Ã=  qœzåÁEd’/c©Ê;À´r; àW«\N&Sš JàÕo-eÇÂVrn2 íð"/ r`5TAˆ‹ Õ0wÞ*”˜40 œ—·xB\åv!‰ä¿CÓÈ-Ì ¦£ )3+'òšrCÊC‹J.@O¢„+„¦·3óÀ8‰rAŒÜF'Ìò'Q†–Ûi±Öt’\èèÛEd˜ŽÁlCø¦n%Öê‘^ã(/2èçù³‡E¥¥œ˜‡·s‹*ÃB†ún›}†oI[nù¹…;Û1‹É8£®Žäò§ÿ>¥,9Él¾»¬9`Ú¦{ʺWn‹IK±ÐV'ü‹|†Ylæµ½cIfh¿Ã¾›rky®}·ÍG +·¶i”?ºÜf:Í/#±áEôìäƒ(jÅA8ŠvÕÅ¡y c5ÚssËæ1)2zÆ,ˆIk³Á°fž!%Z›VÛeÒðÊíÆI*þ·¤å-"·m›"Ç,^Ä ž8qÂ]!”HE&xÄ$2LJ±Ò¢ùÍ3;҉˟\ý›È€(\è!Eq‚7ÀýMi”|º&ou.ߤr&SPI\JÒ7ÜQta(Ì%*xè§58p!/.'¥¥/I5.†Üöǰ—Ûð<˜ÜV_–®Ò”‡œo¹m> ]nI¡ÎÉo‘[ó‰üÅÎoÑ­q±ò[äVY ææ÷Pävˆ‡òÛè8<¿œ‚×ä·Í‡Ü!þ<ô…e'¿5^ ?ÿ·Èm‹Ô`r I;˜Ü£ÙK¶T¶¨Ê·[Î2”#ˆ$#ë)·Dœ7¿Ù!èØ ÓøÐÖsѯÅnôjal9wTj —!&·Ê, ]¹ йÊ-(…;WnG×n~k%`Y²åÜÐ]HêHgäŽUÆÚÊí¨4Ln5„ÝÖÏoÉOr,TTxÝì¦-ßYå·ÊmòÊÒ³¸½rk• e§qܹɭuÌ#wÔüvÊÛ`rK>iÝ—¶ F^Þ¨C§>xå>o}7µãÍoWuN9ÕmQcx#j¨M“øØùýNûn¯žmÝVNÈŸ”óèe\R >f±åDK©mK±ŠA¼N%—0+·­arZ¶,*¸>¼cËO¨ µôå[uOºÑò[ʸ8i>cËí´9ŠËÔMMćeÝêËÒUšòÐ) 1ä…IÙ•rë´å‚;\n#§Ð2ùýör+}AäÈ-)Ô‰Ío‡ô9Ëmi ê·*’oWn‰1½–mË…¯ÜûîH¹cÕo—¾ öÊýí»ß^nÍ­8¸8#g(¿å[t'åÄ8›ŸòíÊ9f‘:›ßZÆ>Ä–Wp¶-7<Û`ÛnÊ·mÓl˜ò#ùm²Ê“ߦ(ŸF)l9/ã‚ô™@þξ …l [–¼}˜PLnÁÒ0ô†R¹¡—n½Ñ¤xã.®¸âˆk ®¸âˆk ®¸âˆkàÏJž-£b7š?µÐiøªUL+V-RgvS¶ö²êXå4EeÖ7è“WS0Nf×Äræ,†+ZÌkBa~†‹™,t9s ³–~¿ØÖn±„¹¥ÀÎx œ@«}Ë[¥ô[„'ù’zÈ[—ó“q~üÄéð'0)\Z~ä­” Z%äÊM4JÓ‘ÛÎV†Ë­@Ä!ŒˆÜ‘p`é+·™éel˜ÜJé¬ävõcå¦0‚ÛïçuüšoåJ8I¼rKˆrg¡,BÒñ¿#9_fVCC<°2ë#ÎHhóÛjÞ‘Û×/Êê•[óÝI­¼I¼b4eA´Ï@F)%•IÊ¡B2PÊ››ß’Úòï-ƒÈmˆ›M't vƒ›_*§(åÈaÒ„g†…8w|. =ŽÜ’׊ø¤œ*ô{äv¨LRúªyâ•[ÐÊ·¤b–ÿ³•›yk’‹”_CKqK¸Åï‘—q"·3[¥!¹ Œa^ó8ŠÜNr.ÉCx)“IjB]ÞèÑòLØr ¨W‚/”#¿¤·(…šè˦s#$«”¾Äk}y±yå–HƒVx3‰KòSòØ•ÛòC}Ù0Mi¨¯GnGNaA¤Š&·ÖQI8ˆÜ®\ñ„_‘[xŒÄÍoKWh ¨+“’OSÚL¤¤×@‚ŠOÒ„ä¶uÆh%–ÜŠ!”ß–¾‡7£®PÞF“[h(+úî ^}).‘Ùd0?Z‚?\nMÂÁdåöÀÐ+ÎæwHn†ñŸ·n œ"· *)„7ioF1è;¼M,’^lg+·äшsdÒ¦sðzòÛ€2ܘŒ “;ÄOl¹-M#®ôËÂÀ`rÛ¾Û£‹hr{dQÍ„}óCʲ˳Фså–’€O‡eK¿¤>«ð|:BhÞj¸VIÃ’‘`Œ8PnB)oí6Ë€)´ár‡ãæ— É¡C„ßÿîöÝ"ãÐäù§|zû¯Ü"²À(炚N¥I\§–‚Š3r܆ ”üæKïŠrD&—̘%”cŽÏ #ù%¿ )K¼ra†ùg fƒ ,¼‰L)LIœ| ´ 3r{uqvrÇî»…$iñ:ÏX}·QUîP~ÎvÌbªÜ’¯ò9@n ôêBxŒÔ©Ã¿AAQì˜EÒ‰`ŒÐüæ—Ê)€"CèEñžõ˜Åò#ym°( Åf¾…–›ßŽœÊ’ðEîh}˜`LnSVlýöÊÍt"þ¹Ã± ìÃH¼ä¨ãÔ ”Ân ¼„‡¢í‡%n¿†—€F?N 7¡À ¬gâáK] q…ˆPÀ\GüFJúŒ A;ߢcB oJÃIÇÅYùtœñ8\ÚÀP´„(ˆ#·›N‚cÉmqJZ‡–d’b·œ/}…bDÆw*·Uª×áM®¡íêêBýéÓèåo”ȕ̑zT¶Îâ!܃$2"€hÀH">A<0*VZËB4ò±¼}H,:a)ߎ¨ÉÛ†áòáA{®xÃñ#0¸ÁˆFÄE|†¡ÊG,~Îo$íXtÂà#ñ†f(±ø‰>œ‘0Cæq0Àˆ¸så/VúXá‘2 å;‚åèI,ÄFkT"ŠøŒŽ3F¨%5210 -x0:. $C`èEîh¢¸2‹'.÷ï%¿cåù”£°|t²2Öæw,À†">]4CñÄJûv, ·&/Œ[· ÷¹ò‹ŸsÅ&?bÑ ƒŒhD\Ägš¡|Äâ'VøPpFƒ¾A„)..ä+jw…Ð5Ž4ad)q°¹æ±1`ƒ'¼µL\އ‡ ‰·hm°…u-j— & [Wœ¦ö¼ Œwn!K¨¥áΞXh G9 •Ø1Ü$V.xP'aô†Ä—pG@+§'È$ø´ ‘r„ ±³7–Y½<~ü¸=éééz6(¼ Þ0 GBrKZؼE/ÂË—È-¡üc”Êí„ä6ª €#7¨ÁäXq´§·éÜ,ËÀ²]4n ÕÄŸq Ä5×@\q Ä5×@\q Ä50Pf…P ǨpL …ôU®í†Ãš=’^þ{S[@c9f Åäg`õé$3éC–œ‹Í<žTž ††Òfunbç3LKÝåË‚» †n@ÆúÂÖI Mî°[ë4áÐä¶d5‰GHNJaárhˆÊBùM1ãÆ Í±ø;®¸âˆk ®¸âˆk ®¸¢iÀY!Cƒ–Føí(Ž•nt(ˆ`á–‰âvv.ã*Ìp‰F:2ÌÒ±o'>â32ÕÀoI .œHÑ Ÿ.Ó¼›,DØ9¹ÑÔÔyŸ:u #GŽDaa¡žñËÍÍF\ž»»»ñÖ[oaöìÙºåÓå5¦ÇÈ,Yb#&hŒˆ½{÷âèÑ£¨­­E}}=„‡””]e,))Á”)SôÏçóÅÀŽk ®¸âˆk ®¸âˆk ®?6 8gó‰«KáÎ)ÆJÃ(à]isŒB1&ÅØijjDoo/¹:U5² ©©©áèb~ K[hYºž®¡æ à O”3þÖaOoÚ}ÉÉ>wõÊå„Ô IDATÌMŰ Ñ7DD­B~G7ñÄÁ£gòóbž8qBAÑ“\íííjfeeA ®ÒÒ2_ihkkSøƒâ /4†ºËl,–•Øb;á/ùuìØ1ìß¿§ù3b°677£³³S·Š gΜÑx1…W1`ã†áÛ(7¡þ` þ¤§«þ¤Ld¦%»s1€RŸ½mð“àOHFVFÊŸ”lCFÚ$?Ú[:”ž_r’]ôCKÿû€ ¢«½•å2)ä/Å÷žc0Šú©Ó.ê4Cw‚¤$E‰½÷4À1IÐ߃֎2s2ÈqÅ»ZÚxD§½¥]½ýŒŸüä'èëëÃhþLÅ„ 0wî\5GŒ¡è8€µk×*_³fÍÂüùóQUU5䟳öw)þ–¶*ÙáP PB"’Ós‘›IC݆S‘=íÍhä²·Ÿç. “šY€Âü $ߦc ö¢«³Ý=èw!t’ø? y™?“>´4¶.¼;Ö<¦}h¥ò/Ý2qZàïïcö¢‡¿õØÅ>•7ºfd³G©ùÒÛÙŠæÆVôòœ§ŸJHLNGnA>2S“‘äÀûÚ)w·ù¡PW”Y*e@b*õéª0œ#6œä§ƒe®½ú–ÈDâf§œ› Âbç¸ ú:šp¦¥½}æL¯D¤g 3# éiÌ ñîëjE_ ™ÇgS[9©äÓ®¶Fìß¶Íy³1r ±¨DIþGÄñ][pº;ݾ\<DL½M<&}hkîDrf'e8À;'úÑÙÚ$¥ !…ùÎ[w+:zYV“•£¬ [… ö¡¯ç4¶¼¹Y£g¢²¬%YçÄôY ('*ú»qpËz4eNÅðŠ|T•9mîP’Gñ÷v ‹›îÂøDÖÃê8#3ç>¦áïÊöŸ¡Nw!wÜÈc;2"÷ܱFå÷ øYöÚÐÜ¡-—¡ËË—šÉÛ²y>‚“Û]ù}Ý>¶y½=½&g#;3ÉIn#礬mb;zúäqlSÙ6fˆ±’̶ØMÂúÀ ÙÞ¾~ôÑ ¸DšŒL${&2¤?ëaÞv÷Jÿ¤ÙÆs,À:“àË@NFÌNƒùÖ‰î†xsk¦/ž‹ÜôT¤;8o{;Ûnö{~éïÉcyôQ¦p{Aú¯~t¶w ›í·éJÙÖs÷ŽL0Û6A ÏÎæؼf=Ž5u¡71 Eçâ}—LFJ€ýVwËhûY#«/-S'ze².ä¨3•-­èî?u’”̾1+¢ýç(¬¯w­ÃÎõhí "%=Ó–\Éeœëëæd`7º˜¢4I/“Êé‘#| Ó×ÝŽöþ~2ý2Â’¼JÏÌVØP^…8<'Û')}Ôc¿\6!Ž“wé4>|»F²gdŒB}°]d±NÛb¶Ï~–Í6)Çn ãõeªÎ’ƒèïiÇþo %÷B ¯d›SzmÇ9,'þu“Âú‘nkËB@ =–uÉ+#Œ–Ë4êOÆf p¶íócËþ>1ÖäHNKe¦!Å#£Œ?,ÃmÒî9º¤nÏ´êÓN£È°g°¿ ÇÔâXm3Ц-ÂÄbòhAkc¶o>‚ÒÙ‹1&?Ù-ßa‰…ó²Md—:a#9nJËÌBFªã¬0fÑÏû6úXþÛ»M›“˜äƒ/%™Y‘í ÇllÓÛÚºµŒ˜2éãd TVL/Ú û’n©Wlÿû:œ!eI&m6YÞÞƒo­ùRE]Š^•Qã1¿n/!V™R8B’tuw⥗Và™gžÑ™Jù™T¯]»eå¥ÈJÎÒ´’ZnÁ "FšU-cH VEm 蹓dÖO>>póR¼ÙTÈ&æhîð+?ÅööJtÏÁÇ. $FXZNÁêÿûÖÍ/ÁÔQœä`ëOÛµaÍCaû‰|´æÎÀ4„y82ƒ ÿ ZêâÇw=ŽQ·ÿ¦Ž)ÖNs¨éÃád" Ïÿð>ôŒ˜ÜY—ãªQ 8²öA<¶£)ùÃðé^„ˆ¢Žb(_Á.t·íÁ/ú%F-/ÄeóshuçÈPœ+ :½-XÿÃxcòǰ|ÉÄs6ë¶=‹^YW7ׂc'u¹Ó1uÞ¥¸õ PÊ^÷–EìG g?uú3êô¯1qr ÂÁZF‡‰÷ð«»ù8¼ö þýám.—¾´,TͺŸ¿órq0é5SŽïű£58|øv¼µ£ïÄgn™1™nzñûXgNïÇ#w?€µGhŒ‰A’ÈlöHÜü…Àâ±Y(P‹ŒƒH^ùõðÖîãØs¬M ‘œ‘óqÓm7cü¨ ŒvÅî¯áµµ°ú-¶)N;Í U_vJ/¼Ÿ½ntÞ@OZ<‡~µ‰'bRE1*f°½v5~úÀ3¨>t§û)mÖDÜñ¥/`ʈ<ŒÈ •iëO×ÀK=Œ6Rã5­€mýÈiøØ§oÇÄ|vœsi­;Œ |¿ØÔ _õ’ÄI¸A\Lƒ0ؾ W½Ž×V®Ç¶:™>MÔ¥ÆäÉñ%¡v=hGGóQ<ú?ßÅÊý¤åC~ÙhÌ¿ö/pÇ#\ÑÄlÛ÷k|ó/£+)[Ç.iý=h9£óý8¸þ¬ùŤbæUŸTZ×Í+sqˆGŒÁý¯ü?ybêÛ9¸f^ù²Fàêü%–Í,CqÎù<²€¿s/žú¿°ýðntjkêp\ù±¿Ä䉔s˜·Ô ‡’ß]Xÿø=XÓ09•£ññÎuÛɾ–8st~ð‹¦lH¶qŒ' …³oÇ-—ŽÂ°¢D¶‰gðƽ?džÙ¸VÚœ!„œ,è܃g~ö Ž7q`ÖñékL>ƒMhk:„Ÿýç}X¼Ý¸œ¸MÎ,Ç·}¸x²ÄØ?ëöƒÞí8¸s vìÚÍ[º1þâ%¸øº¥˜ì©nî.«Y…»¾ݤa°Ô-ÈÂYÁÂ#1R(1ª ôÆæM¯ãù•1?g6Æf#©·'ŽîÆÏ~ùæäÎAå N$¤†êB'iºöãWß¹k7êä…N¢§ã’›þ7.²ü´8:qtÇëØöú«xxí! Ï,š„ɳ–àö]ÖF‹1xrÛc¸÷¾Wq¢µ  œôÏÛ¾p'ŒËE~zh‚³ãÄZ¬Z±+VmÅÉ>S‡oºã£˜†"7eÓ¬Q.<ù䓺uáÂ…º*XSS£+]²UT~—DÞâìÅ4ò5j”³uuu¸ûî»ñÅ/~yy2ÄÜù»Oãôɽxùõ—pÁ2‰ßÏ$HÃ$“2ëDQô4fgp7>ò/¿@ÅåŸå€´óÓwàίü>ÃYâO]?·/­ŠI,È]ÝþØ´êeTOČхH¤Hæìdf)7šš³7ݨYùÖ”Œä -g¢)_åå·Îú9ÃÔ1¬ ¥ –1 BçÈÀ÷~ðÕ[_Ç LáÊ\¤ v×áØ–ŸãÚ¿¹3®ûG|ö‚"L,iÂïþ÷?ðßwö¢èWwaÁ‚ɓ̙Ñμºî´qv57£ÞŒ›S‡úÎ^ö’Ÿ“Ðÿm)«¶ž„¨zâÄ‘xäñ¸ò+÷àâ >Úð,6½ö¾õ™uÈ}bnž=ÅZ%¸EŽ?OréM_ÀûˇaByG^ÿ9¾÷Ø7qºå8Nú?‡¼jx¹ú¤ÎŸÁÆW¢®(ÃÎ~šÄYãœr˜ÿ2)òçàR3Hæ`.1‡[%)ð9IÍÙç¾þj¬C^/·ÂK–¼SÇ ˆ¾†MXwì4†—÷cl´ gðOïFSu?RK³¹¦{>œ¬(pàÂB+3¤lnß{Ž™âËö#‡+®©çÁ~îRÈ+‹é—/ÄüihØýv<…íëW ÃW†/-ÏÂîµ[ÑΕØâY 1¹øì äNÙtýÑ»W4z»ú0uч0z.úhÄ:´¯lú5î{z>~ÉHŒ,”´~-x凿Äq8ʉÅ^®ÖTÙ¾8¤Š`à86¾ø2Þxz-¶¶eañÍæ C'zš°kÓ¹$\±IŠ^îJÙúè]X±¾¹£gàºÅãQÒ¿ =³¿y(&MÄçï˜KS†ó‰ø6råíØ©Z––è꥿ýfo'Ú¹º8¨ãŠE0I¦Y¸òdzfW|ü][ñýÿ~‡{ª?~î¼¢zOýï}¨¿z .¹ú"ŒtŠÉÞ„œùmwÞøiŒJ­ÁÞ]û°uï:Üýý|õKKPšÇ6¾£‡Y—³f~Ÿ¸f~2½†ÆS'±æ‰ï!­ì.\7‘Ûÿ¹³ ;¢½µþîÙ¸ã‹WbÜøRN~±½g¿ýÖϾ†·ö5ãP{!®¿ó3HÙó$Öm~§FoÖgqý$³ªéo݃ÓG6áh æM¸KGg£,¥oqÁaåƒ?BIá'9V©Âˆó2È1AÇ!Üÿw?`ÙHŰéKpÓ“‰»ë^ÜŠt.}EËI?Wöê7<ˆßmdùìÍÂj;iû䀬wÖãH] X6R¤rr…5À¿”^®ꘗýËž/çlÛYYlÆŠûïÛ»šÑ”<%õ̧ ò¶ýþžb\~ómYÀɸ¦xsåJ¼úð÷P5ækìë PÁrt6퇟“OGjªñýžÕ]m=Ý9¨ p·W„“Ý+½§Y/ŽÐˆÏÕqo¤ýë¥pœ×Ëã[ƒ9á)•uBj‡ê¸›,«°T°„ 6lÕko–\w-J 8ƒÕOÿ›ž¹•ÿ„‰Æb‚3Ãyàå_a庭Ø|¹·|ÓS¶ÑØ=Ž­o¾„o·&á+Ÿ]ˆ\®¨J™<Å2ùï?z(½s—äbxf3Ö<µOþà¿¿ú&Œ¯ÂØ4S‡ï»÷)nÊBùå¸ó† ­ÃoüüØ7n²þúÖU#×{õ©j°ª$†…p,£úÅ™§zõ¡3s4hĨ‘U.ù‰1|ÄØ™9s& òó±å¤78"±˜PEèD €ð¤÷ `H#½ ¬u)\–hïèàÊ "7«r²MÀÇ3…³{Ž 2ˆÜª‰L†Óݺm«žÃ½Š^Ä”m–²â&çïä·e%JhË™<9»'ÛKE’F 2;`v»9KÓ¾½$¿‘o þ“'OêŸl©•‹kdµPœÐ—Ëdd©€¥¥¥Ê«Íâ¬Î²Ýüæ^„>²xûÔ¡¶5É¥c±xÆDT²Üút7˾#[ÇzÍö§ûtNîÛÜb\yýtL“‡’¤$qGÃ#¯ÅÁ# HÉ,Âè¼G…uøøi oä(Œã±«Iã² uøñ•'PævëÀ¨±ï`ܤRÿ~¦$DÒ’Ê‘!D2C Ù)Æ œ/#GŒ1äÜÙ"æåÛÿ63.Nǃ¤¶5±â,ŽXo1ldÅ­—³3ºý’€Aîá–01%Î5À‘;þð$Ük̽ýÛ·oÓ3sb,É6Ìy,3fÌP#OΛIÅ£pøp³ #«\>#NâÄÔm¥üǯ ƒ>\2”à—Õ@1ü$ߦM›†•œA’|'{Äå¶Q1匆 —8I+ù*:'p•••¦1~v†9<Ëvn½å6nûy l百:l|c¥Ëç_ˆ9“Š€Î"|ñ¶xæ³oâØáÉœe¼œaŒÒÀÉ8ÚRHÈÈÆEÜ&pÛ\Î8él-„™…¹¸õ_¿Š`GÙѰ¿ûOÔ•^…Ic§ã‚a±‡Ä )åX°øzL»àr$×Ý'ž^M\ ¾ž.4q˜ë«œ(É£KQÂ}ñIK.BòwW댡VΧU,—þv‘ËS³p­‡Æëîaë–‡iË&ÇÜ2›\ŽÊ1åøäß_BƒÛTߊÜäp”Jƒð]±ÇÓÙÁÊ9ïG¥K©ç.ÅäŒoãKœÅt£é‘³ ½ÜŽr<˜Š±ùÙîž³âÖlž§è§þ¤T$û»ôœˆÌÖÉ„L*ϺeGœ5¤‰ÎõÍhíeZná È `V÷ùÛó”²ZʳÜr‘ÌUE_’mŽ‘*g/M½5e¨Ÿ{wáI<# ÏÕô°“fç‘™Ã-2ýèàÊmO‚,âɼT!—Ûë’˜W²=Yðú²òI›³õn±ê幞­Kæy/ž‡Nåö’6j½h-aýL–³ì쓤®:Šò÷š³/½”IŽ¿úH»—+ÚuZ NŽ´óì¦àJâ¥[~n¹ï抒ÐJNË@^nzÚšÑÁ¼’sDÉ ÏÈ•ö4šÙágW`ñRvdÙ™Îy 1t…ßNî„ãYr¼‰#TmgdÇFäù?σÔUïG0ur²Šuå˜ÔP8f1æŽàÄ  ¶ô÷óÜhOzy9NuÜʺ„Øy'ùä7OCg.d ÐÝn.–ð3ÿ Ë6E)y 5ÙÓÁs8ÜÚ,:"ãÄEr;i2'Šd%g‰Û{åY:Ò伊Öe“®iz9=’O}¹ª Ã/ƒ|æ= °D¦ °½ï—Á‰@'úØåòüGx7'å²—«<­ <ÃÔ«åRÎ.¥ÈY'%ÎU—Öf–œ$N2±lzʺœ%Já¹()ëy£.Ô­Á.;œ|:°ëÒwä /·N7ÔcÍ!œì`~Ö5à4/É.àäåæúgø¹ÒÄ2+üJy“¾À—Î3r,óa%‹µ<:„,rN;ÁÇmvÔY&Ë´@ôvÉ©>töhIä‘Ñ,='œ&gŠ!ùÛÞGåò#ƒƒ\)ëš¿<[šìKe’6°½vh¾ýKÎ6K4çšžº}I¿gËe Š闠ÂE˜ŠÜœá´±âzû!Ùr8qÉR¤eñ,³Ÿs¼ñ"¶w¯.ä«åèAÔ74¢%#—\± “Fù`÷ í¢‡“]4.Nq2vò(>㪸•±¯3F¦`ÏöƒÜ5zS y,“…¸bÙ2n¶ŒáXäüb[kuÏÓêl“úZØ>q,â)µ~„u;¶¢#X æÑš<e¾~.‹'^­Æ®hî9Ü‚ej²ü41:²‘6e&,cWĆjôÕnǺ”…妷‹g¯ZNỤ$–‹f^ø×˜ÞÇ|çW§ösˆecG`ÚÒ9¨*aE(¹ßÜŒÀ¾Óر»š¼ÌCjóÔ56àpW.š·yÖ-edÛ‹,Zó*žÝ\öëG#+…(W#;zYÞ’x¶±™ÛbSÑÏÉx_Ó>T7‘1f,ÆÍYÌIVîg-¿“6¼àþ&lÞv7Žcý’m”mÜÏu0ã'àÖí|äõðŒ%ó_­ÜM¹8±™L£¼£Mîè÷Lp<—+gY7ÙúäL¦ls¥_Ó±$ñì~Ž´íhØ·'y|cÞÅ‹1ml)ÆH})AW÷:¶Gmu8Ã]8«¶4±Ž±É–öƒI$UÈ™¶ ‰c±hÉ¥¨(ÊŠˆHïêÛœîŽ}8Ôa‘ɳt¶#Òædx/œpèïâ*ë¶ÕØßžÎ²ÔKWØd„·2¦rÌyÑàR¦[ IDATÕ¡ZÈ噸Î*<ð| šÚ¥ •bñùY>{Ù_4Ÿia¿Í–mG1R9>µä’9™PZˆ«¯™ß™¨ÙÍ”!Åp‘)W­Zåb H§uÕUWé–HéYZyY“{‹WÂÙ«¥p0”Ì3oÃè#„ÇéÔ€3ó»^6~~*Ú¾Òí¤ŠÌÞ xð^¶iŠÁ“ÄÁ·8»*'þ ÆÜšë0bù•ø·s"G?;“œQ;ÍB$†h>W_dµtÚôé\¡ä¬Š2EHÊÔ͆L¶mÊm¢²b':´«¯ü±Ô?‡—#•Q`Lˆ wl>ºÀ1æ7nÜ9(©ÐãXèIZy˪å‚ Âxã°œgeÅSŒÙ%K–`÷îÝúSbX¾½“Ù4àå%?ܙʆ/‰mmƒà¶Ò3§kðzM2¦q: Џò&Hyð™—slû"ŽÖæê,Y` ÒX4õ/ÒiïàzÏ ŠŒ2ØŠíxáÉ΢=ý|os2îzâ6\2m#ƒ¹,vŒÜÞËJßR.Iô<|Œ¤ßaÍšƒlLx¸¸˜ç$8™’š> y48rTÐH2 >‡¾ú×8ÚñT^¸_ýp´ŽÊ¤Kà6$.€¢ÀL¢™@1fh͒΋[ Í홼ٹ€ŸFIK-9‡\9gáü<[»n '¦¢$“µ›mxóá»q¸®‡‹gbló+x}?ÏC²Ï“ ƒæÝðyÜ~ÅXpQQë¦ ôSzáõ§‰ÃG±ëp';îDÌ»íŸqóâ*T¤qðÕƒý/ü/~²sÏVtcTþ<ô»ÝÜAËíO#–`ôÄIøÐ‡.æÙ›$[û0Vîì®Ö"Ü<·¿xb`H+†}·Ï<†'| ûŽÔ£‹{$–^Œ/üÍ­î;Ês¯ã‡lÆô}×Ï-Ey3‚ÆUçüøëc |îÌí}÷ÿê Ôs+¯\< &`áä›ñÙ¿¼yéÈêÇûȦßàWoäv¡ô±7õV"áQv²ž1гýà œhà¶Â3ѲúY`çÓí«BvÉ|éK ñÖO¿ƒ7÷ó‚¡N‰¸îo¿ˆ¥\/L­GóÉjü÷—À˜O}³¸-kõÓÕ¼÷þÓcÈæ$J’¯ ¹eº)}¾‰˜»t ]¶s*¼…Œôºš°võä/šŠ¢ª h•Šš•÷à[8ÉST…/Þ¹D·ïYý¬>àÃΦܲ°?yp-/MéFRf1Џ¢ø…Ï,C% ¡:šj±ò—wãéímz¸?=3 E•¥¨£ARî–)ɽ§±ö7?Ɔ½'°³¶r&!gò øø- 1i8ÏËr@»éáoàg¨š{–¾ïrÌÁu3ÞúÝP}¤{}Ëð•¿]•S¶\ô’üâžEÞU ÑúÒk\‘ëD#Gr})£pÇß~óFsÀàŒæ% Sz¢fû&<ÅAê³ë›´\N]ö L⹪÷Ï/c®·áÕÞ…4žÉá–õÚóYÅ#pá²:e­ËLuûq†FæidŠýµ¸ÿë¿Á®0t°ÃYõÀ×ñzO%>ý/pU? …6Üð/úZ…3¬›}D$«ÇU‹îÀ'n˜‹RÀÓ‡œÒÁoŽnxwÿbr&]ŒÉ‹¯Àͳ¸zÞüü#ØS}¯ìiF"gðs&߈¥‹.ÀÒycØ^ðÜ(ó÷¡­9èg[uùÔVüòÑ š¿©¥SP6~¾Àr^Èæ3‘ýƒ´­Ñ&ŒB Ÿ|b¼Ürx»÷alj ±JúþÔdÃw~s‡ÛI “N.K‘‰G/ÛoÞŽÙÊÕÀL6Iaí7W²‡IibÒÐeNpu¡ú­ý8q8•+à`Ñpâíä…ì—eÂGã,ÕZ£ÌYO/NBûþµ862'e¢”[ζíêA_q2GUêÙmåR:dê!H£D'rer€Fˆô¡I%NRµ³M=¾ƒ;<~Ž=m”‹;S²R@Ûˆ×H+™×ÃɪVlzó4ú3.B1û×bm+xyFÑLÌÍ~˜güNbÿ|—•S8ç±ßäªeóÎÕØÝ8ãrûq´¶{‘9u,òRºqtíxý™ß¡ÁÏŒ{ó§øŸM<·>ŠwÜ~;.à^€¢äp§N©Ûaäa'`û‚Ü:»y7p'bÖŽ·xz*ÁÂ9¬“Ú„dð’\®lçà™çyYM×¥h«ÝŒ–Í/àÞÕÌ ?ÏAÞ³†®#1lî­¸6s-'¨²1²²ófÒTdz¡#óx)N#6ne{Ù½Œ·Ns…‡“é¼$9y?vl«CrOF¥5£öàÒ.àQ––ý 'o»vàÉŸÿ»Ž4â(W~’™þÞøÐ—Y_iLç¤kíÎgñËGVàD N.&r"(Xpþæsq °[Víe¡¾s*}(MåÙ°V#´òRYäæOf§ë}õØñÚ:¬}nz áŠñ»ñæâkç¸É…¢Gˆôkêîæ¤ ëoËG¨l˜‚>Ø{{د־´k¸í~Ò¥á¹`¶9 F8¨ÐÀçÙÄ“‡7áÇ/4bØe7£¢æyNúùq”s¼˦LÐÊEM=¼Íþô‰Ój<ªàyÙ\)\Æ •ã¯í‡×b÷ÑuغýM!®'®/¦A³ä¸^‘gÐÈæŽÉ"ê±a`[ëÒ÷È.÷wH!7Û&§°Ôzt)J’vé­g={b%Çt>.ÒTŒÎÉ2N††Áša¹«Ï ;ŸÆºŠ«£\%D¶ãÌ/aÄ‘]Þ}lïOÕÖ“‡d +ÍDe‰ì¯’IžMåqœœ²\Ž%BÆsnYJÆGàõ­ÏtaFi¹ªzäK¥X d™À‰ËԢɘ̅‰Í‡ŽàxJ:ZgVjîI™œÌ|”élª©Ã䬠!ÝÂc»Ñwe%ëð{×iCBf•ŒaÁÕjVòV7Ú«¡Áiüdïúý÷߯b ˆ!!+_<ð€ÎDÊê“‘Nàdv_~ƒïÃùFÐ IfClДLui{#¿tŽ29¡<³ã}ŽŸ8ÎA­W0´p>f¢¤Mþä[â÷³ÂÈÏ?tñö«²2Ù)£V'¥GnˆÌ±œ9•$ħà×[˜8Û"F§È(²Š‘7uÚTtÒLÒíkÒxðL&ù^»f NÑ>Ƨi„Ž5ÊÄÈ^VœWn NLøáì—q„¿æ£ s¢pÕr|ìŠá„8‹É³›×Ö"mÚÍÈ^®ƒp ÔÅ …Æ5Øw  +çà·V¡«fjwmÆú'¿Gƒà?°t¼Ô-6îœl~ë×x:kWW'⦙8üìÃxë‘—1{üH/¨@˰Ÿ³¦m‡_ÂÚ=YØžU†«?üidÖ­ÅÊ›°ãÍ£¸'¡_¹m ;®HÖïÀ‰ƒÝø¿=ÙXpùkÜÀs.uxã¹ÿÅ¿½Ä­{³–aá4v=Gðð +±fïe; c‡e£'WPކ哇ÑpjÄÉÍOâ¯üë÷ÍÁì1©˜Çίö|ç'+‘Àm\c/k–âðš'ñ'¼¥†pJÀq4–9c_Ç:¸çÙÓ1ç&Ü:¼{vÖ`ÓöñX‰äâ¹X¶¼IÝxý·ëñÌs;1­p& +e‹P/N²Î•qËuŸ4FÔUPnµãMžÕ¯¼€`N †M¹ ŸšÜg~õï*BkRæ|h¼e€mF:[öáͶ$,ã¶áÎ9`âëj8Š®“lÓB¥­§­uu8~°÷T§`ö DqÓz4××aÍößbuõBÜ0%­{W£zÃ3øõ†3˜uígpÑ8v}ÍÇðæsÏâ°ðj5À•¿ÿ'{­å3æá¶÷OEe×.ÜÿÓ'ñÛè ±²xB&æÜv+Ömý1j^yÏlCù?½ɯ݇—6ìC³o.ýø,÷2‹;ô¦áÁc3o¶Ý÷Ûç1qÁõ˜W’ˆ‚„<óëWñÐ]"çÿ݈‰“JGRj­;xæ–güös;á?uµ–Ë+סëX+Ì¿Në_GG;µ8Ø·ß-ë o½Ä­‰‡=e=E2t NHÜÿvÔf¢bØ\|üÖ…è›PGŸâ¥Ü2:bþr\26ãŠ8nÛÃ6r#¾ñÓ×ZunzßT”ñìsKõ«øÑS÷ã»>–«qxßL¹VʸdÞj™Ä•Ê–ƒ¯à«÷­BÚä[°pît\y¡¹¼açãßÃSoð2µÌJÜøù¿Äÿüô'/àÍ@ê:sðWËJ¸òQ¶£›yeÇvgàÂå&÷ï¯AͶ3Xµï,ãCšÿ WhÀïka‡@úÑšHvÃ2 V:aFó… W;ýA¦³Ÿýä ävîÄÆ5;°aëvüvÕ~\ø!n9ä„ 2û¯^Þ¼¸µõ'°oW5öï;ŽSå—á«ËFs²ÆÛv9Âúâm‹ì»¹=¬½c#~öµ¨>*·*óçò+¹¥ë&ÜqózŒ€ûx;t–ÿåxã»+°öéCغâ×¼ýº§§âöÛ¸ýkÒ0—ZeLpÅ¥á$þåŸwbØÈ 0é"Œ¨,ÆØ"p‹;‰Í/¯Âê'Wc[`>ú¹å(d™9sì0žf#eõìfwí§ñ“8šG¸š-ÃVë’8jOlnG ù4'@£”»_®½¹cöàÖâžÚÇ ÙÚ¸ÒÎö"}>¾ü9Þ\šÁqÎèÑ?k"^«;ˆ„ÒÙ¸rÉXŒª*§yŒ¦U&~Ù.qˆÆ{[hù9Ô8~¡E¢2híáåfýÎý Þá\2w årÒ §uu?«r ™`IÍýxm×x,ºf†Ó€.Ì)FÿVn‘¦|2ŒQ:P&É|xgØÎ´óž09£¦cTV&.zñ›XóÈ·ð/ÂIåÄZå_üéÿàv½”ÐØm­ßƒûÿþØÁqEÙ´9¸æº¹¸°2 µ¼L¤ªœÛêilÔí}_ýþËÜÖ9Ìž‚ٳơ¼Å²)•ù4&9N Ñ„~néûáv´ðVÓSÎ¥2ãýn]~†óµÍÍ}/ÞÛk±‹[Joÿ—O bíÿc=0SüÒ²‘}­²‘À­ð¯Çw¾QŠ‘SP9bæ/Y€)´dl­ŽoÓæ¼ÈÉ¥‘¨½ýx1ö½ø(v¬+‘[þwX6FÊ='&ÞØÀ³°¯QI—á37ÎÃÖ­¥Ñ"™ád%±ñÖì<‰–“\e»½nû L¸îK¸hxd×°uR…oz¾‚2LXt>VÖ„_…}/¬BíæÓýýSg¦ªÛ4o÷Nà„—ìqÿwsµ–[)ÇÏÄ’«.ÇäR®;C}?ïŽØõÔwñÌêÚ7]þ¡ObÁÈv¬yìTs"›;ÃȨÊhîxù9Lb;>§„² íx9 ØÇô7îÇ›»ëÐ|úšêcÅÕ~Ùg0o,E\wÑÊ›w뎲-›mÚÓD¯I4eŒ\WSž %œ(¢üb9³òI+%&·ÅàOd½å„6' ¹ÙÉÔaÚ6©\|òn •ôI ¼Ã¢‰ý(S 5ƒGp½·œ1¥6R™ö§&¤ž˜âÍ—î6¥XÎáÉê (L Ù>¨[Ùvi·Šq!qÑœl]iãö‘½{öbXE…9g"€ÒB©™GŽì¨£GëÂ5dÐÈFo ¥_Î %0÷œüÓ<28Åj¢Sô¦Ê'¡ˆ[ŒQÙ"8ÅØ•Y>1Õyä6iE¦£ÎuâuŒÑ~ÚÃ5‡U'¢ qreµÛ¢cᎫybΞ33ù¢?ý$Ÿ#qò/@x1,­¼r Þ g8„áMe–§ñHô NV+Å—<CoâĉºýW êª*5íj¥‘\&ÓÐР«¬’VŒA+§.ÒŸZÊ­-•à®»¦ñv¼3ØõüÓxäõÇñHÕXüÕ·~«Ç;ûìi ‹Öd&L”Ä€Ž?:»zÐØdÎ;FÒ奈±XpÕ‘ž3•[ezQ»ýi<úýWÌŸˆçoú;üûG§q¥ÆÉ_‚ƒ+åå+[°¡k.þéœ=ä,|øŒ–øl½¬WNtgB–…c'N¡¶æ(S.7bG/·0U¹…«‰7f}å d_øxߜɘ?>ec¨®;žû.{ê%<»æ ®ùòƒ¸ñ ë ÕâèÄñ°qÓl©9‰µ íHåVEùIƒöÖÏÍY<<ÈI£x«>㮩àϤp²Â¢á›}·|"wÂøÂm¼E‘«|Õ§¦—àÀ^Ʀ­u˜]Á³¢+-lælÜþWbô0žmIè@qÍKØòÚA4vs{-­§\EÎmk}å¸à¢9¸è2È*9CÝSŽÎS÷aÿÑ&ÔîÝÏ}"Ë ·_qÀ™žQŠåŸú( 4äç Ýþ} á·S¯¹““£1,gPrñ³»¸’ÅòÅmIyÃqÑÂ|ؼNMC ·í œÂëÏŸ@wÉ2””ä"ž?s=nèãµÙ¼º/Ø“Á³ESôÒK¼ü‡?- WÄóB…Ú­ëy8Ï,ËÙÝQ›—Â-'ÆÉêÿÂÞFN\™q–js”HÎ+Ãè7à×rÅ5cºš:°'¯k/_ŠOÞ¶ã†ùÐph?öÞÀéãGÐÞ3Ù™×5Õ\š0×Q¥‚³`Òå¨;^oT+ÊlÇîÇÖâ(Û!Ù^éu~nÕi©©æ!^tU–†âlOn&–0C=I¤oàÊDz–|ä\>}ÒºÓqx_56ï^ƒNžƒõûkp¨¦k7shÂmxß‚éÜ^Ã> ñZÍêïüVëpÐßÀ­^µxiíi´¥ÍÔB®6—ä!µcr8˜8¹ïêy®¸{ân ä9ß;–bÕk{±~Ws¿Qƒ„†}Üžs)"g".åÝx¸µ^Uo²xíçpóüÑ(Êf‡ßÛˆÜÞø¿g¶áHÃÈl,Á$LÕPÆtÌá$Õ¢E“1¶4 ÅGVñŸ“Ìãp¢˜«7„ë¥z²Ç!·x†[ÖûhøäÅ`Ç\ç”õ „Éjh­ð^l®¯Ähâ½øò¹¼©’—õ̘ˆ‚Wv¢'! Ã§ÌÆŒÉÜÚËUùÆžÛ±³û•¸íC—ânÑËæ¹è^n׺qÿN¼¸k-ŽóÖÃãS/ÐÕV¹ˆ¸íøzìiða÷.Öá£ïŸ‹q¬+iÜfàêÃÊ7ëQäy¢Â1ÇíN9áqЇ¥ÜÑ ÔÉÕ~ò‹+yIɰ„“’¿ÁÀjÔm8v–ó@ý{×½AqÕÊý\þgùÎ'± çîgî  ~ªh%§ãÌR\ù°;€òê:”%ñwyiœ…œìThDZ½›°¹¶ÇëÎPçì_RXV¹‚î×[‰Ò ¥è㢻yîÌá¤bgñU¸õ²tYKãý4Ö¯} qxù¬ž«’ \깓°lÌ‚ }<ÇIž3<§%·gr«];VÎõþ)Ebêì $ñÜuWûIìã-ÓÏs»fþö?Ë?‚KF§ ç˜œ ¬ÁA®D^HΚÌUx®öwb9ÌèÂ?ư”ÕLnwL’}ˆê$„G𹕲‡cŽo¤FŠúeÂm¥œ} $6ó烺9â¶ãDnýka¾s 9›»yʹu­8¡‘¼ávÚxŒ©ä5ùlÃ0 EIÕ8¾i^lñ!÷ºIHª[ç×AÍq™¾rx`#cÛ\—Ìóî9¥2h¯WžRy†*¯l¦W¤ã•¼dc*Ç¥<ÃÀ‘m¬/’$ëÅ‘YHÇæ,w€È Ø]í¼…[O»yñ™\näª~ W‚}<ŸIƒ¡íÈFìâ$~ù%ù3ÆbádÞfOC³²¢Ÿç¹2y´¡vlá˜. W}ôý˜>¶c‹¹ÂÃÉ´‘~n{çE_g9(gž#;G ¯Â´9¥¸† ;^Äs›žÀÓy”c ®šš‰îS«ñ›5Çx·ÁLº‚[êKRxS¸4èæÄ±϶d&eCþȸ馑ܶ{š[<9É}hî]KnᮄQÙ¼•R“²~r¼Wq Í‚eóÆÒ€NÄäaü9¬û_‰Æ6lÙq’aö½ü!Õ¢†‡Ѩ˜ë®ÁÕ_ÞÐÈ]CëŸÇƒ7` eŸ2¢£ ¸rÎviõšÓhÎ^Œ’Ѱü¢ÉܕԼö uŬ‘Û{=NjB·®OrÚñbžOìkŠhÇi泬ø›˜÷ÛxFµ–íÆ)Ý‚ÚÍÝŠíœýè¤ÀÙÜi”^…)•ÉhÝýެkÂó#?€±Y'9ùµÖnWÊÝÜIàñ˜a£‡áÂ…UXõ» øÍ³#1Fâpn=¸ö·¨æŽšî rÇoâä™ÁIŒî²2NB¸‹œÛ9ùšÀÛemvÞs/Sš„oûgY”òÆÊ(ENK9dŒ_i¨ˆ!aABŒBû¶h¼o 'Æ¢ü®8f!Ÿ¬Òjˆ‚3Çx2ç‘l¨·PòNRÚÄAå–¥ÿÏÞ{æu\g¢øÑ°÷ö*R›)J¢$K–#Ù±ûÅ%Þ8›äÅi/›Ms6Þ¼¼xßËK²Îfc+yÞØŽc'ŽmY²ä¢jª³J"Eн74 Qß÷™3wî )JŠâ`Hü÷Þ™ÓgîÜ©gô=A‡Iéâ£EÒùø°fv¿¨ƒ£œ¤šÞ.^ ‘áÔá1-ÂÃ-õíÒN"õ$?‡/’è|•¡SF;j:äp6 u¾4Ø7”ÅGÁ?™F¥ IpðÃÜPVÎ rvùÇ™]’+P7æg†þ¹ Çe°Ñ¹v"6НGCï<:ÌGášùùmrx>̯ÕËí3¦aÙíæ¿:aDÀÓò6 sŽu0˜‡Æ`óó¤ø¸ayIG/Žz`eü‚œ>ÑÙrîà ±tÑmZ6 ] X"óºìÜß(Esn“euð «›þ âz®nÔé=¯K}~ö‡Ü€sopÎU69Ÿ<"GЉ8¾˧Ê&`¦×ì.·5è^…m¯_¹¿´Ë&«ÑeÙ¹R@A‹µñÐ6yé…-r¨£ucoÐå½ÓñÒø@^x+á2yööÃûë2i<[/-GNÈýge>fƒM"žgÕY„³—+eõØr©ÆÆì8höd°4«vöHNÔ¤Š‹(Ï´®lÅÚì”p¬®¤ÔçΙ†ã60»q tÐã^³^Ô3ÚwQÆ=o<ö.àc>º`5œÙTËùFxÏkÇÈJ:†…‡=S¥cdÞ¹p]Ü‹VŠw¾T¦Ì™‡‘ûZØð>· ™£´ÜÌ_ ·öSÐù(ÜŠ‘T8¨i¾ ãJåh#ö^¾kºÔޮŠ|í¡ÜwaëY4ŒyDÁÅ è ôb ‰Þãú0³Ñ|KS æÂÙÜÈ£±Äo~é„iR‹nö­Ñ·,Q¯64jáÔ 5¬Nಧ¢šé2#ùcá®§±üP×`„r¾×|WðÇ”Šñð:7u–ÌœÈÅÎÅXb…÷¼£¾(âQ·`9OkÃY UÎÇ’Iœ¹eí?¤²Šrµ~\Þ 1m\2N—2MÀž1Òr¶I‡2tÉ>Þ±²¥ ÛæNÇ @¥T`IC>ô)b½È4ÊúÐðo8F~-Úp$Qùx = ãeÌÀuC>žÕ)yåÈ¿¹2£µ{÷ã}Ù·F/À@ÚL™9k:&A¹aíÕ¾5SçÓ*æV°§®ÿrÔÍÄq940+pûJØÑpfÔ?ešÌÃh2›ª£Ê‘'4D±Ô»Ôþ„ËC‡'.ëýx;:1— IDAT/´µ¤Ê:=¶öàˆ}{ŽHoÍO£áS'3§Å;€®;ö ê¹v¨ûK0 ]YI;aÙ:FšÏ¡lgŠfÈŒ©c°g}!¥$ƒYÜZØ|o3ÎYmöMh¾ DZ¬2ûå0ƒz:²Àe‰y°ó–Ÿ6µb–µwc¯mãq º \ÀÌPfeÐqÇ÷…å†o%p–l.‡XþVb¶†oºæ/®ÜWšÁò(=—ƒ•9ÇA Q¶qì— š Üÿ -ÎÑÉ9ìëÄÛ5JÎoq>TÕRËóÇЬ¬jƒCìaùv3°¼ß«“r°Î…³[3Õ2yæ<”Ý ð6%íÛŽ}þcöpa TÀXB}àpŠ:Rcq´œ,]l>'§N·K=:ªÊ‹Ñ˜*%(rä÷xt®òJÇËÅ–r¹ÜxVö:% ¨öÁy×zÔ}]Ø„ÎÍ% Hp)\5ò¸0³À™K2c½›Ÿ†¼4N0XÞ:Ù;TÉÀwíäé³p^qIÊÆÌ”:8—êÆÃsMxwº±BcÏ1 Fá8 Êó\\.sëãÞVì×¬â™ÆXæÚW9UfãC =8cŽŸBmq¡Kñ°‡ ³u ú‰×l“ð_ yØkZˆ=ÕXB—‡}Ël?°ì`õ†ÿt³¸êŸáèÒ`Î&…Ð[bY5lÝš7F¦Í™¤e¸ƒ”-Øzî¼z.ÏœXÁ=¯À«›0ƒMeÌ(Ö¶xOøA‰îÆÞðsMç¡D­L›5[]jÑÙQ]¯Ó=|Ô—žL§ÎÆL:²3r¶ï˜l„gÕ“'š°|uŒô.,Ä÷tŸœh†Ï ¼¦œwo>r@Ž7£,÷´b2ö£í?‡?S¥m“ü"ì÷5û@GaOäh…cŸÓ r4öjÆ,a‰`—­sª&Ëø‰S¤n*WdÀëh Vß”ÂÃ|+öjã=,}=|ð¨œnêö¬`|û×K#fo/`ö¶û<¾×ÇFËt ®òp{¶1y&Ût2ç)¶ã=/MÇäÔ¹¥ÔÄà©oÞñÝçR÷1“fa6ÔMOŸŽýqG›°Ï´ËÜñmsõo\ÚEUSeÞüQ8s3”ÍpÊ‚=©‡öbeW“ŒÅ7y·„à(“fÔÿµhûTO”qtz€0~úUƒ½ÔR¯ÏöÃb8¸/\cŒ‚ºÂ‘r½\(Á;;߈cSÎÀ©L;ÎD¹Äì°à<Î)ÓgÈäæ½ÒÑ|u4–rV^&8dº|÷JJ¹]K”GOÀ^Û92kò9i8yPJÎIWUÚJÌgŸL®Þ ïÞa.Õî6½Þ‰Wm ¨ÁõÍõ5/þMö1xìG¥R!+–/W‡2œécH:,|!±ùƱNŽD?˃ع·Žg:6lØù€ˆp;w«EêÕÅó£J™x¹Þí³ƒÙ(® ³½Ähv.1â¢ÉƒxÒÌÇG(ƒ¹lÎäñeâž*eãï’{p±*K±tz-]0¼ð<6L#‡¾6býÿèZxìêwsD&rS.ÏwU²ò¾ŸÇèô_–ÞÖ-ò›k?"Û¿7 _¹8N6ÜúÓÚ(¦n½]íÒvr7>Âó¤n"ö¸dy_£$ŒÂ—Ãǰy˜‡}åÜù2 &~¿»Qæ¹±}`€Ž^0[®X¨Q¿`3{ öF&gnÃS^þÕˆÁ'}c‡‘É‚Ì4¡Ï ­­Ò^ìÉDzҙÓËáHƒD켡·É=Y¤ÎPŒÆçÔåï’Ú÷Ámû)uj0aê6Ù‡Áýëæ Ó1À¸SXþþk[¥² ‡Uˆ™V6e:/ÁÌ1xS>#hÈ*uþ¸ævxô7ðÏ‚¾mF_kPÎQ/@.®èÃꄊxøÔU }hÈbO¼UJ'âÐ5Ûl‚x¦^Õ˜IªÁŸ…Œ„ç‡ó,-Ö9ø8¸·µÁ¾Ä¢Ô5ƒJ_iã1RT6OVàiækIªaä©÷–ñàœr ,±.Õ@›¡B8Áwd AZfàÂymÓ.Ù»ÕòL c\â ‰_0j‰Ì…Ù¤9¯Ë±íXY)ëpžÕBìwºRÐRûÆXÜ?VaoÚ½ÎSø§›…1Z_1guŽ£åž´ûѨÀÌ\º¤ÆáyÀü•Êúö:vvž’­-%² g{ÖÍwÿ.Р,½´Ä;Ëd²\Àìü™3ðVY…½Åø.ù~º"£“%ùxOaáK4YÎ`ü2Šàõø¾ KáHƒ+¾€;·ïÁÌë>ùú±¤)¨ÄÐkCþb_eE­Æ'ùKú~ð[eÈ Ÿ4¯J>4æf}ê'ù_‰Ùa´•¤»åö€í“oýãòîÜóÑQአì"È¿Ã5²öŸÂ‚u>òüÔ^ùügþNþÆ‹2{ÚÝp 1Î7èÓœ}éIGâ Ÿ\ÀÒâòŠågî^¨+#óª7` v¼~j»<·û tÜ>EzÐñlmÜ)|ó™p×’»WÏ‘õ3ûåÔ®'äïþþqÔÅß—“HÝâ_:t,2“ðŠ©d‰å¢·.’þ“/ÉžCm²åÉíòÉ;ïsŒ08A]2 u–}Ïð:hZ¶°ù˜ÎìÀRäË—ç ±úJß–"èÜ/ÅØÿÝÉúH½XjûäË83¯Df`†ìw~ºûÀž•OáLÁ—ÊÃü#öEý¶Ôa§ lij]¥D¦˜mË”.”ýâ‡ä•-[dÓ¦—å±÷KíÜu˜ÑÅìUq»ì<ÍvV ت1%Ä8„nÌÎ59Ž; °@N7¶LÐÓ+qŸW]i)õÿZNcð%´=,­Ø.›ø¸}¯üŸ¿þ>™\Ò#p¶É÷¾³kŸûš<5q’ÌC‡e1Î&d()Çv’ô('bÑ ‡c¨úF”é~xuÇòYô¯r:†’’5rïmX²Ž3ŽyÐ×èu’›¿µgÙ—3p˜Õyk‘¼øðËXG„§àH±ù¨œzº[ζÂEï9LÑ—¿úë&ù•?þ4ÚpàÂ!þ–pU–ÈM«Ê t8¾þ}M¶¡l܆Žâ –Y`=Âú$gààY×nyfs8Cšé”G¿üETöðz{Îr0ã7páò×X¶ø{¿ñnì/æ /±j—ˇÿÃr%ÙNéÉW”?ü›bx…ÜucLX ½cýfb%^ÍW> KôûÐmB}Ô×ÏvÉPoX–ÔøöŽš&Ø=€°]rãÊÙò·¿þ—²sã«2ƒÞ7Ì®ÓïKJ1Äðgß|”Ù¸ÎaÚU×ãpªSP³TÞÿ±¥J’û_ýºüéÿz+%æKC{‰Ì¾Sž(sKîù˜”OØ,6½$?|3°Ø;9ÙÙ+çJó3{±ä[ÈPpJ+áŒ×òi|¾Çû6ÃC4¶°Ì[u§L?ý]9Š}¯ùEð ª|‡/ž9ƒI쉨¥á¤Æ¿Ãl•ãX#úpÐï`¯Ò¢ªÇÛù£ßVRü£”Ù‚úhÍžÙè-ÿÞïýž<ðÀáh~\Ù1ø6,sù!=O8ÒÍ¥†ÍèTôp8QÚ%7¸s³jLb²ùò-İ G‹£T… 8¸!:“áñ¼â‘ÿ…#g•˜#mmœyy˜Î8½*0|ÐHÜ». <;qœù³ÐŽ£$¶nݪçüe`; €åÙŠª‹§Û ßþö·Õc+½Ü¼òf™]7Ûñ6b¼ÆrXO7N'ˆÙ3+Þ)#ÉÔ××kžÑ+,àPvvŸ}öYíôq&˜GeðhбcÇjÅA\ž­ø}|$íX £}µ×¼ÌD쇪‘és±ŽÏYì©B9˜Þ“Ió&KÁ³{±B£ÂÕËý‰ímtœûNKÓ… r¨ ³[ëWÈ´—*°  bùmò©9µrüW#{Î`J¼C–Lsïk/fsúÐ)J WÉt¶w³ð`م׺&õ …£´WGaÀLš9Øì;ÀJˆƒoø^ÂV,ï.…wïq¦ß¸.Ì´rTõÛE'·ý³lÚS‰õ)rÇû×áýÆ  ?VY!/«ŠÇÀ!Kì9wU ¼¢Æ0¬GXŸ„@AYG¢ŠÁ a~I|èƒ?#í¨›zÚ…E]²óG8"äö“^)ï»w1–¡¦­Œ\VM¾i•LxðsÒ>›¢¤Ó±‹Íèèb¬er>fŸÍ•ƒÑ ïdÁXÙ:ˆ…˜q×+ûηÁs8Î~„ 3øÐ,2±-ø á1qÀW®Ç´ÞdÐÑž´ê]2ñ^ÅŽjYcÝÉä*™uûõï}¿¢àÒvà)yõùÊsp˜9“XQÂÁì÷´U“?ÀŸ†~l­iÙ,ÿù©]:O¦ÂPYA¾Ãù‡Zpv!K)8þÜ•ö© ü˜Ò6g6lØ NEöb½g’ØÑúêW¿ŠýE“ä½ï}¯÷6êð²YÚ~ÃTš1RC2Åu¹ &$ûVP„¸ÌVÔP:¹áÞ7Þ[ç•÷ìéè±ÇÕôˆŽñ³kHòî’¤’¶½q†#˜E£'ÎgžyFâ / ܰZ[[µÓ¸yófí(¦ªj”Ôͪs iÖ0*.M?24Ê0yu#ö.>þøãÚé[·n]˜åcg‘•æicc£z”å ";ûÄçDêtÿý÷ëRƒêÚÑ|£vpàS0Z&âÃÙ°ã»òг[åËÌ®•1ø˜•aâ”Åò³'¿û7&OÎû/Ò50Mæv¿,`¹äý¬,™y‹Ü1¯P÷üþŸÈïÿÍaé(›)¿øgŸ•/,Çê Æùjá±jÒé>³E}x—¼´Ž~òÖc‰V÷öÂû#üÙ3½Rzû™{ þ…–ìtÁ:òË;äO?þkÒ|ãoÉìVËoÞ3Ó«‰ÍíØ÷ÕÒ€sjŽžÆh!:·gÏc$ùlR(ã¦OÂòŒ1lƒÑÚÙôO_•-wü¦Ü0§FƶɎýP¶vöȆù“eÑ‚ÉÁt‡°©|/œ±ÔÃe÷¾çFäÇàZ—2=þµoÈãß}Nz?ò÷òç]ˆýý?[Ÿ“ßø*:úSÝr¢^öØÀÅqüâ›e&–{Ò(Ï3ÚûÒv˜€e‘˜¥ª*ÂLöÞïaŸD‡²¦y+«tÖˆuw¾ÐQ/»N—ÊÊÏÆþÒÁòd0ý3ÐÙ M/|Gž˜û Y5§@á¤gÏ–ÒŽQÖŸºe®ÔÀ3ì† ¬DÝœ0¬6°I¶ïé“‹?ÌÈûÖÏ–®#é9[§Q­Nz×2©ÆÇ¬sXŠWJDÓ¾ïX#Ïœ~Y±Á¼³ €÷¯Ç>s€W©m†¾ô bo­Ç9Œ­²ï¹‡äDì¼o¢ÝÕR‡sçÁc/ö.<¾qº:9MnŸŸ‘—¿÷EyÎPÚà …ÎݨÛðogZ^íôÒˆf#1»é“†¾º§®ÓäÂ),3Ä ÌòyXVÆ)ù7!p_ÆÄQ;e.öWlÚü5yrñÇdq-<>/º=D;á6ß®=Ò: ÇÉ 5i¢LE£ûÕ½òÈ#¯Ê}뱬쒜?ü¢|õÅËÒ9uå„Y‚-jÚšbéh,Þë™c士ªäsú}8þ÷²úÞ[àÈa¥Ô•-–{WW˃»ž”;÷Éמ,”÷Ü< Œáû„;/Ê¢˜+Wа/péóå k̹ñ¸„ª+wt»6« ìKÅ0°´_8+‡OýX¶yMž»G 8¢µ–ÎÌtc¯ÓéG$orL¨)–ËXéPx§¼íùæß=*Ó~í)mÚŒÃî÷È—_î—ÉwÝ ‡h¥º"ÂÕÛiv<úg̲{d^ÑA9±y—<ØR*³@#ïÈãòí­õðZ)“nß S0pQŠ|«Æê£I…€ý¡ì^ýì£Æ2ï¾F9´}–röaßðX™‰í£ÆæÉ—eßÃ_ïžUÒ{Ë œå §2ØîPY·ƒ\ð…pó Aå"ßüÒƒ²fõRY»|&œŽœÃq/2oé )=NV¿{µ|÷oŸ•ï<ègÉ Ô%-»•çñnTcOÝô9pD‡•K³°Šd[š¬|8Øô˜ì:Žo<¼ånx|`ß&;’Ý8N‰Ç0µçÆRÖŒœ;ø´ì‡#Æ/ ³e£{ÑGOšøÀ´m{H^Ï[+cËWÊ"ìÜþð7äXÓ9ɯ©“5kæb¡[¾,E§FÖ”7…€GÔ­ÛP)á,Äy«å–%pd‚ΣÀQÔelIÙ³û,–)OÇq$Ü;ˆóïžý>Ž×™3 KgýòL’BU&{6=«K;gU¬’ ùÇäÑG°bã,œ V-”è;ºN.–Ì£=×pK1è‰{79}Ð)š8³}h9AžÎ–FèŽe§hõw5É—¿/Ïa&w¶'L/•تÑ[> {ÇûáIû19ÞqPŸy¯Ì-<¯±?€î-ÒQˆŽ¨êê~ô-A‡xӣߑ…åwÊ”J%4¨G'K¦÷ìn–±Óàü [lò±½cïSÉix­…ã´qæ< d;šOaO%=q—ËXÔ7mGž‘gŸ{Y6¾‚e´‹> 7G;œm%|ï/ã¥cJe"ö£ab3ê¥üó·161Sî¹i²,\T‹ïh§¾Ãß{à5Ùy…Ÿ©–¾«Zßáͧ啽+8ÞéÁUq,cVØiýèQã£Ù(.í‡'Hz ÕÙ6ÀîãÞpγé8ÃÄN!—s²“ä*>I~È/¢›$ð΄Q ÿìKú—pÒ1êYÙYd§‹{ãl&_ì2,¥óÎlª;þ+è­ ØB‹ÅÄÉÐ÷†nP[°CÈŽïŽ;´£Åµx×*+ÇR„‹XÖPµàèpñiiÂh_:à31J™î¡z8ìÕj>+‡öŸá. 4Îйtñ<–y`oIæUyè-z¶6fAY‘;?=¨2íöõvÈá'¾&›qÚ%,w(ÆèV<’Î,–™7­•;7̯>,9élkÄŒE¥Ü8 G[`ŸÂ  Å#±Ø7°ýÙIãž|iÇ¡Ðç/Éô›7È’ pU^„ýƒù¾°Lr?2ÿ¢¥Âºœ.ŽŽÉé=—äç_“Ëg÷Jkþ©>Mn»giË«´IˆsdšËÑmÖÆR(&Ž5L½Mº„pÊ"¸Çá³ø ^Bçmé"Êç ùyxOÆcV°›ìwÈKÏÇ>,í:_"°”«a×3²§h%`±Ç`1ôÜòÊä.Œ“ž“9ÓŠÎ@¦MÆyXIËQ:»È .ÉèÒP'Ó´üAl5' ¹§ª†<8üÈàÞ,OˆëêV‡ïâƒ÷Û‚ñ³g^Ï£3ØÒØ„õ¥Øo‰=?ØN•Þs©=!A ŒcAd " w`º žòêdö¢î|?ž‘¦Ì• ‘ÐS]+…­×h a™æS[ËÝËk¤°ý¬Ù´Q—½.UØÔÊ=‡m-Ír¾h†¬[¿PÆ !›ÔÏ—åÀ‹?–Sp:RsæN g|©é5Ùò\ƒœ˜2E¦ßt—¬Æ;[1i lß*çÛ_—Ã/?-?@¾÷·…£n©˜±FÖcÙq5¾ ùy8ËoìR¹mÙ^Ùqz7êõFé8†ÙÃî69td@ÆÎ]gMøö83HËaœ×€#ZÚÐÈÇ;Ñvf·´b€¥væT|£¦¸÷´¼NfÌh”• ÏÉáTä¹§.H>ΰìÁ¾ºq8V¥u_bÂ|xe­œ¶ƒ"'äЩòê¹#rùh‘œÞ…¡¯ë1Ø4_úU.£ÊòùØËtì¬Ü·I~ôHößwIËÉcrö\…,Y«L*‡îøÆt`o_ ÊJZõù¬h5 ääµÉë/=¯^XqÞʼ-î>)™É+dæ”9²rvµÓ³´3çÏ“ÕpÊñè+/Ë߃·t¼¯ÜÞ7z©Ü ]˱GÚ½¤bKÞX±¬OÖ%’yŽNn“<ˆœ÷[ß;Aʱ*è¶•“uO'×TLœ1S¯^,{Ÿ="[7þHb ­û`[N–ª7ÉÒ:œ8g—ÌÛß»Vƒÿ–#/Ë‹8âpE›Krö±OžˆígÜ,w­>#›Æñ#mÒÙ° q»¤©s¶Œ›9I*àÈjtݹeñQ9r`“¼x~œÀÒÞ‹õ{$3 ޣјƒcxFvàw0ƒãùèˆai¡/ÒÙŒŒC¯È‹ð*Ìs@;ÏØ«eñÖµØ÷ ‡uêðï†ú»áí÷{À÷ß”»¥|Ú 2¾æqÝ"zZM‡>l+€mùMÀ^Ûv0ÈÙ-Ï='%¯£ÎÅ·£š&ì ¬š±RVÎ G_莒7‹pú:N`f§<õéÊk‘½Í(;Sq^òòe2–0ý=gñ-o§ŸxûÆÏ`™=Î=¶W¶ý¸KŽ`Oñº¾ƒhO£ Ôðúcª{!—ý÷à¬Ç3ÇqÔÓRyתØïŠºÿ2Øâ±bý*9ùã½pîsX¶=û”Ëo‘öbT,øfÂ.e`Ó†y0Vït¬ŸÆþ~´%ò.eÕãpòÓ×Ý$Û_zNòv¢CÉÑ£>ìõÛZ Ç-’æM”¹>?I÷ÂÉm²çö'ãæÙ±—ÎD‰ã¡¦-•Õë¨óA–ë¾8AkÙ'Oÿ¸ Hè/ÀÆØëz¸m´Ü|Ïm2ã'`¢!D|‡×-nÍÓòú¦'ä; %úN]!§ÍÇ·!Ң@%‰·>X2wåßk4\Øà"Œ‚¹Å¥„l|°Suü8>Xè…3ðl?ÎvÝ /šee/ÀèÎ\i*húŠIyFt5iΫ”gÆHܲÁçbP<ñžNx†œê”… Ä;c—á €]'ÍÒMø²CX„YÅBÄØˆˆõ¦$‰8/‰Á0Æ“8Ðбãaïœ9åòKêÎ}yì<³5u*MŒ®ÆRóº³KgÙ1…¥«<@zÊÔÉ2cæ ų}‘Êa½lÌ`T’Ü?´?ÏäœÅ䙄Ü3ȽLcž²SÊ™@ë2Ž3—ì9rDg†Ù¹%xýynŽt|°YŽíÚ$ ƒ 4%ì?mÅÝrËmÅɺ·ƒ¸%5xiïü9yß3ÏËÿ‚«ù­gÙ…À¬VÑýòûŸ\/Ëf;w¾t8Á½(ã'a7¼ùù²À ÷ö¾"ß{è!ݓņ\ͬÛdÁòUò±_½˨¸ïÌÚê¼Çñè“åXZ¹p–!öÉîBºXÞV\=•;:SQž{„Ží}YžùÑcrïC F(/¢RÙòx½ì©Z*ïþ_ið85jÜLYóȪ>ƒ™ÖïJ'†R™M%å£äÞ_ý¤Ü¾ %–' prÎmiélTžë¥nò¹tdpÀ2©Ò)©ž‚îqb{ŠqÞFÇDNÉ«›O¥Ðf~˜C“Ê <èµØ,Ïï‚,7ˆ†­&,û¤¬¾ëV¹ûö¹¡ñué,FÒÎbcvÑx^ÄþÊìo¹Ÿ-f|›ñ1>ùØPR‹Î=ùa!I5:H=ÅØà&®è¡>ªBŠ+Ãu¶\¨ŸFÃI͸N‚™Y8¹F³TÆAž©£²ˆÍƒÃÖÇÕh¬—Ó‹ 쉫-Õèx”ÛxkfA륹áÞÔI ŽAof ÆÊè±ØƒÍÿVô +±¯n4œà`ðÀâØò+Äþ±cÇÀ™÷H• ¡5ÖúdluäØ~i½òêQ2sÙb jwc?Ë:¡hDLºñnYu²Q¶ï;%»î”&4¼Ù·_ôóЙÂ2ú“»vûäR™½hžÜ¼b*t9kqÀøžf)8—ûÂÑÎKEI(€ãŒRصŒ’eõsi¦žç¶¸h,óÍ ×øw.—qpÔR›ö€oy-œ¸`æ²4j|Wb¦ ò @F«î ›ÇY{dR(ë(c£&`0+”u°‡š^¬B8ÓÜ ãàè’sÝ ‰_…ceÜòÃU¤œ§ÖM/“Y·Þ Ï“d4fEn¿iŸ}l·ì~3>Ä@‡a¼»å®531ÁÆ)ëEì§ežÃ±N9–ayêÝ÷®–ƒ‡“úó²ÿ•CÒqïr™tùΒª‘§w¿.[O;;çUÌ–ÚÉÕÚðbþòÜ×’š$Ï™¿Z&±= /½}·)ϵ„ñ³æ¢ã‘'5{wàH’­¨·gËÔq£eѼbéz½A^Ýß"kç•âHœ» § »ÚiÖi•„÷±jʲáö2¶:Ì£þõèhhl•ã\*Ž=¡•Ò„™«&9‡åZ™Ù·ËÌ[T;Oæ C˜é†WÀ'ÐèÆ87¡”VcðÆõ²z»á}ÉÃ2² ûqö{„Ý›0óÑpˆZ¢<æOÀìÑ*³4]Æhk ß{äèÞs²í¸³•jën™¨—n[Á%yÀDGht/„ã‘×NïÀ¹³¯‚³ÆÔÈ«ÇcuÉYÝ+«uö}Ž^.w¬zEšžÆ¡Ø‡qÎ3øça)ÿÊ;î”óÐ1óëô3•så¦%ðÊX‚ìódû Nˆ¼¼JÌHbâÎ[er:R]hO`Ù;ÞÏNäkâ(–ÅÊŸ#àƒcœÎ»Êuc‰,_¹zÖE ×Q2uîì-ížïË+Ï7à;ƒYïÊñ2má* Êá¤ÚRV ^UŒÇö &•+R±âbÃM˜Q}YÎnÜ-[6žF\žTÎÙ ³Y/A[Cià{Žú£½—÷<('ðm=âeË`•ɬµëe9fÂ'ÃÕo¦¼¼ë¾ÛpÞ+ê´ŸŽ£>8†ïH¾Œ–pç<€ú¿zâ2yÏ­4nÜŽÁÓò§ ÇoÀžù£àØŽƒ¤E°×¤›`ómrú‰Ãr¼±GŽQÔ3×À+ìÌÖë¹u*Ü ŸLYÔàÐy¾3¾ªQ˜Ë=wòyy«C¡Ò¢JÌl.X)·ß<KW©-:4(3¥£P×â;Ï08îG¡q”‡•RKÖ­Õc½0þ:DÀþ;8N©ÀL]m¨Û ÿ½X†süð>¹p(i©æÃ~u7Ã;ê\Ôq\ß‹½‰ÜêPoq!¼÷ö 4ËëÛiÛ,¿|¶,\ºHÖbFÚò¶¿N|.œ–]˜ØàØ{&ú:qþãžV9âùï¹u'†0ÐtþÔ«‚£%Ýòjäs~1–å/Xãµ&cß­£˜Aþ,\¿NöÇ»y´ž…wËiØ}Áš13Ùoà¸öƒÃ¨¯ Ê`k¬¶uIŽìÞŠv=Û,Ùõ8¹z/ÊÉ£Ðé‡wb”­³Ð~›¶bÎìÇ9èpúÐ… …˜Uõ˜Ù erÊ2Y°ô&y÷ê䈲 w_B›n÷Ë®Ä2‰oj鸅rËOÝ"Ó1CÿS(7î^Óë"¨{_|ýUÙˆW“ené*¾Ws0A`Üß¹Wô¡8à:/“#ùØÇ¥ï"妲8“Ï}Zç¤AÞ¿úê«òàƒj‚3Jœ[¹r¥.)dÇ‹´ß âøåFå@ßÇ1^ù*#Ÿ â¹ VgË—õ´¥h»†­Ûj 0œ…kCgKFÕ«)ørId5öË••b33^@jÄp-zSVþg§Ó ¡@›äKÇŽÁ ;:O=ö˜viv ÙÉ¢Øá¢ýËÎ;V÷Üsv&Ùhí†K[]4§ÞÌ+g¯~ìá9|ä°¬Z¹J—§³˜¡Â¬×g§õ‹_ü¢lذn’?¨ÎeìÃÿä“OjG•ËhÙð¸óÎ;uß Ï<þ¼üñÿ±:¤‰g$†â5€†Q<µžo…3tÊð‡Ç©òa¼¶b&² îì;1*= nËY•ÐäW —.b4K6ÚºÐóÂ>ž ã¸ÉÛ5ů„ûÖ¦c&£l—1»‰Ü—2xÍI‘«Ñé­‘ ³‚­ð>†Sµ/ÑËIA•L™ç,©)#Ìþ?úßà,¡E~Üv¯üÕÝà?Þ±Dmòƒ?ÿ39PYήüùÜÿ63òhôb_"FV±ÒêšCœìùÎoÉ_?·Z6Üs³üôý ±y3 è Ubi]õ&¯™r.ìNº¼[øÜ?Bt¢nùùÜÏÍÏ€+t,ã¸]G}X ÚŽú›Æ8Blj^ I/–éö¢\åXïœÀºê´|ý¿~AN•}õ‡K~å]cÞ"ñzµ ôcù1ë9Úk¨0€z®Çv\Ä{‘Á€F9:ëoFööwÃÃ0–ëÿõÿ¾¬úý䃳p¼ ¼ŒtcŸ\y5< %аñmòÍßÿ9P²T.Ï¿'”õ :£ÔÅ׸Já6](…ç^4VÝè€Ò`Ù¯}øö¢U‘câ52Ó³=Û°Ÿ.ƒFZ1:yzÁ5y£ÀÜëÅo1μ…íó1BÃï ¿AÅìÀL¾÷âØ›¹x ‡h`eÐIeî×ðqhåÈç1? ~Åxgk‡(“ð€Û»·w ÑŽåmÕ8¬ºùâ?½*Ê:ò—0ÞÕÙ…½q,- ž”È•ÿh¤ôÃÃë…K8¡ ^ŽQN¢1‡Aªõuµèª¦fœX5^¹ô-çGõÀ͘y‡óšR¬Æ*ÃrD q_U Ñ&é€x/*À« ùUý{ZСKÏ`ï#fwŠÐnS;xÖlXÆè„ôö`†®¾c8§?þž;ôÊ…³g± ûFÑY.Á€ÇAr™¡:Ð'E/ÊJE%& @0†Ó¼Blj[V Jà/"ÇKʲÑoÃ\(®Æáäx×b¹e"ÆûàÁ· «Ò.áPˆÛvƒê@” ‚ÆòN€ÓÞÞ#(ŸCçÃ<ãh¥‹2Œ=š< Óq°–ãæÐ» {09ûˆÝ‘¥S.Bÿ2ÌôéìcLójïñžq;ŠêŽæ¦Oñn€7fosšõ¢^À~Tt²—«¬£ª@÷a <9á…~zðÞåªÇéï"t¿ŒÉ ú å~æQXÎ;˜.\ Aç®NxÇF}“_\•jèÜa¶0;pUä%,nÇÞÊ~[ÂÍá3Òaö£cÜ9ZÝ;\‰iú\ «²ù¼ž7«Nœ-aÍÀf½Å¡kãžµÇÄ4½q³d€ŠzÑd從G¹ôrãÆº/K¹d´#ÜtË®}&¥i|}Ïi=ö­xa§ÍÉÂ4â"$ ¢‘_^:$+%D¢’+ÁúvÂ÷¢F˜ªê*턱ÆF[H}(½93ÈÀÙ?êÇålD`GLeÒTpB:’º±s–9§Ï˜®¼G}TØéÖŠ /€vi+ÂÞwß}2Av¹ Ðé ‚†Ó›Â8Í#ChÜP?AìPàELÇ_pÔ—`éæ•é¤w( ÊyÄ &š‰Á†â•E:÷#6šf0 Xm몇Rña» f» 0QQ}ue$7c«t+Ñ ÂvBv°\À2~´™´Ó}=ʳ½Åc0Û~%ÞÆ—558³w8»ãó¯Aâ@/]׆rDR¿xתtÏ­Rå{18æ&߇.“\¹RŠw½$›ÊMÓq͇šb:¢Ñ×e(þAÂwÔÎîæW.«|££^ì<°·ÏŽ%ï9ûu5‡–¿Qž×‚G™Xp6“™é:\úø–ü°ƒ¬<Á—<:µá^Æ––õvÊY@.¹ä,—hrÉ:æÖ9¿VÁˆÇYÉU«®°k$¯3ÐÛXÓkÉã{°gdáL¹aù4ursds¢÷c&æðóß•Wppqõ¹ýÎ5£Š~"\‰Ÿ>γç°ÄmöÑX{à'PÙ^89ªo?¹KfßûY†c®¡@·lýηqâÉ$é™°àM)ë?¦QiÄ#±ÀˆþíYÀÍ^¯Üìø±ãÀN;‡ì˜ð¬ºwB`§ŒSÊÉNÿÞê@^äÍŽíÂ9î¯äòKžýÇ+ãi7°ÆÎÙõÊv-ú™mfÏž­y³åà Ÿ-_¹š#Á·ºä¼“è£ã0yúÊ`OÝ )‡%ØoõfÜTŒÇLu?ö¨ÁùÆ[Ùoá^ŽòquR‡e5EåUêYìÍÐáI#.Èg†±Ûw¤ˆošPØSZ†‘ÿùpŠ1® 3TײÌ`H!22ueY¦Fú+ßœ²>$«‘„ ŒX`Ä#±ÀÛh!T(~†«#uk„°{]1©?œ]s)ÿˆgÆ ! ¸:QêçU™®4ñìöøQCÄáÿêŸyt[idõߨ‘Ip˜¨” ‚tQdBò KCR¨ˆ®Þâ'ð#€†ÒÛq£XÈ[€ìœ$då…V‘ð\‡œ5äy1ÎGáÁn¸ÒeÇ‘3«ÙÛ Ž·‹¶ß¡ôf:uçA´‡’Õ«ÖèÃí!4š#× ŒX`Ä#±ÀˆF,0b ŒXàß­ü !û5ÚKÒì„C—‡üÏÎ ®¸h1KS &E@ú„gÝþ†«>;BÚÑÁ3ééþ;ìõsÁák‡Ñ3áºpº>2®3fà&OëXªR…GH°cäöü‘¯ë¢j27÷|ƒQjDÓ zƒCJoÒÕX&p{¢Ò ~‚€L&€RP •Ðk^œwäÁWÌÅ‹ðÕÔCE÷ •d¬·—Y)¡wð ä Ö°§:™ŽÃÈïˆF,0b ŒX`Ä#±ÀˆF,0bApNe´£]íð¸n Ÿ]'‹X6ã•¢àQ,Î÷ãì1ëjôy5jìIEDô6~v÷!&ÅÀ§ùD£Hrz¯ñ>ѸDøÞ¤TPt4õù­Ñ[;²¡÷˜Èæ˜ÆÏî^#™¨>̓'º:½¹¤“KS¹Ü³GYpVr$ŒX`Ä#±ÀˆF,0b ŒX`Ä#Èeà9Õõ;|/=.‹´N“MRÙ¬œbw. †ëbl&P§Ú|7+ô…$™¥Kfõ 7Jó¸*ÍÌSÏÄÍ¢C”Õ¿ÔmNœ´¬FÎÑ^ï´ÎÄLh¥uŽÒ"$yõ† ¦%é,ŸAÄìN @ü`LgAÙÞC.¥ãî,ƒ»²ššZÝ×£ÜX`Ä#±ÀˆF,0b ŒX`Ä# B‡0îØ°’tJ¬³1¨·u…rS{˜Y8Y³]zFîøŸÝ)RiÓKîC—LoøÃ4âø«>'8ú˜õ£Ë;Rrë­ä²ðÂcŠWˆ… VrÇt³äòö²‡ hÕÙÁº8§CŠŽ_zÊ=†<÷± çÝu⨺ÏÎ‡Ó ÎêJG¿†mv ƒo<(ÅpÐ ®©e9JH ]ªéÇ.íàà ]> Õ#§è#.‘e0CÄ¡éçNË¢åyçâ©—…” ´Ã¿Öo7Gæ{¯kêTfϞݺÌБ¾²;6í,™{áòá]ÔSÈA‚gøéöD÷ÙáxèD$~jâ®\Æé‚<üéšœ–…æç«™-€“/M—O^`MÈÆ æeøNoV|¡ò‹PSz3¿ðgz–3B„lÔÛ¬2œÞ®Ð%zs!Ï<äÑtXœÖ(y Aªvy渘n»fáå|á²"“Ë~–r5ôÈÛRÜ•¬²mšÐrZäxˆ¨kĸ½l$hù`ªÙ5³lNÔÆBîrîõõ@ÆËpx5šqÜÕÝ'¼|.êiJÆË®L¥¾²óÄâ#ý°èÌ?,g¹jpñ5…'øûXF G+SfÊÕëÐ7ê;d~#Á:FÊlMï„Îà;ÓË®ƒ |‚ék×4\.lÆÅáÊz'…Ëè%zÇ”R÷^o6 ’úa¨÷:Á4ìj þJ}-$ïL„ë•Ò »1ð`€aô6A#½µ«01K.Ë`DÆÕmŒöÁÕ7ΆéüŽ€r–ã ˆ‘Ê}50»F6òý ÀôEÀõ 4,1ûjîšM†øI¹!n®²C\ ¹Œl< fˆkDFyá‡ÁÏΨ,ÐtþÄ|fÔHYùd­7‘|uzgä,aböñ}/×!Lìh*Æ(¡€úHãdúÚ5…£„tÁôNÕ›^ƒI¤°^[Ç•ž?/Ó×®YX©GãdúÚ5¤–â®TAó9Lh ù–EÐñ-1ãÛj1DŒaÐÔ×B¶lŸ-#µ±Ô¿ë××G¯4DZ_³T “û>áíÒsQOcfËÏÔ«ÕÛ(¹oY®ú× Ü5¯„0}íš‚ÉYÖß¸Þ mpC“z÷’DwÃè¹`Ǻè=wß#ã'蹟Ω ;ücÈËGGDï™ üã;K€>F+6€8x—ÎTª5Ðïq®Â1œLaÈ㿘¡Ý3¯LcL¢w0Ê9i  Œ3P‚¡õvòY9ggHéí"ðkövzZÞQoc]ßoÒptÓz{˜ÄüFgHëMÃ7~ †ŸÜz“•ê~Ýz­AŠ¢Áäà5ÑÁê4‚¸zÍäö:)¸á¦7ÈÍ•Ôi¸W ‡Q^oW‚¡t<“#½EIÐ HRµrNûã]T§) Ò(S€¡N‘Fù­¬•ŽGã7 ÁÞoMçs”ßo\o Ç?ò þ`0œÞª3…P¹€cEzk^+-ÒJo"ò/ú†‡$‰Æ2ÎgüX~ÛÛjåÜ•qÓ´œ­ˆ6|~Ž×ÛëÖ›Œ çÊ\¨¢c½5SÈÖÜ!•ߦ§·o1ÒÛ‹ôötRzƒŒå³»Â^¾¡õV$µa*-¿)Þ©+~»YÙk^ùüf^3¤ô&/ÂY~[^Äzõx*o<"j8½UWÂ"ð>×·›D¢ÿ GP1}"ÖyÍþv3&Êoÿî¹BI` K(¿Ã ëMÆÊWã¯o¬ÓÛ›$ÖÛÍ2º_!•.н½^¾ åüvÛÀ7Ê So•Ž÷×òí¦<øÏ"G´ªQUN8ü:§2ˆÔDD¸NÙ0¸ë †f&c`7«çˆ¹÷H‹E0›«XX¥få‰ fœœ€®ã"óø¢Qv—i.&V:11¥4Yþê?ÓdZÀá0%䒘Рz#ÚééÓüE3›²‘IòF_’cËDLoˆÄÀCˌ⓷ÅÑÆï5èÍÍw‰‘ÞÖÈå<‡Þ±^D7Y””>»:ÍÒ ?&„,Bo\o#”ÍÉÇó¢…WÜkúç´Þi:o§Þ&žË>ãÌre:™l´^vœ=3-‚ã­ƽéìLpE5GGáEÓ•é ¤©¸¸Ô¯0¬Ôö&ºrz':0 ‰Qgˆáãü½Ò"÷üfëm™ÙÏ&Q¸#(ž5[,CÞæÖÛ¸ÙAñ. F1š|µz'tãü$]£h–>8^ÕIDɾËBø×ÐÛtЂ›e?ÓËÄÎÐI[-Ðñö‰Ÿ ?\³½zSV†wÊ·›²¤ôv >Ÿ±õ²ŒED¥·áÇÃpÐÀ}N½y‡…µwÌÈ‰×øQ‰ó3¢ÆÛëª\³ øÆõN¬£B…¯&ñÖǽæaÎišÎÛ©·}ó\ögÚ×JÉfVçÕâ &ŽóÉF8Ò;Ø™àŠš¦£ö |.§Ó¸§©:+£D9L¼XïDgf!1Êë]òÁñ–„}”Ê‹ç7KoãtÝzB.¿sëm\¨©*–¨¬wFÄ’¯Vï„nœŸ$i ÂÒÇ'y¯¢dÿd!„üño½Þ¦CRÎC ôL²®¤w‚•;Rªý\ìÛ¡·³$‹½+çA^½¡@!fX½À§±®Sï·³ÍTMkç·esˆào–Þ¡ŽqDAÜqr’˜`ÙÜ)ƒƒr÷öëa Å×kª ï´|ñÆÈgÑyKô6y<ß\zHR¯YL,¬Åᙈàãyñz2êjõŽó3¢ÆÛß&—²Ð÷“ý%ÁÖLuÁwáÙá–Dyȯ›Æ¾é«ÓÐ Jª,*&D%±&|ÄÜqò’¤J€Ç"]Åñ0)©ÊîÎýZ呯#™D#R³§@7Cém°AÊÄp½z›-”^¤hJïX;§‡cn¶…£—èíÖ;Ù?u ùKoµ%4 fx;õ¶ uüsæ·Éç An½=Õ!”,%ª)¡Œ3È—¡ä1)™„y“óÛ‹””s/¯cäÞ‰´Ø‰|)½Pò~{:ª’pqú;”ÞÍ‘VVo+:f>öv2Î1%ó ×®·Q¤T9'¢] â‚ë"ô÷:ô6jäê4r'aŸèš7áÑ1–ƒ£Î .ÍÝkn5.Añ´#Ä».½˜Xæ„ ©,øÉÖ…Ò½#2„φ½j½=kÑÛ·}¼eA Èb†3 C°ß(ÑÛÀ}NP†´.h8½à¾íz‡—ÏInZ¦õöZ~iMgRqÉî÷zõŽ9¶åU*¿Mh­Ïƒp¡œ'uš'@ø\z[œÒS "dåw¬]’× æ€õ7¥w"’–è1äwˆÃMR§‘”W—` öIúšÕ}»Ðo·ÞÔ™AÍÈ[“·,Ñcx q¸Iô6Å1ÀJÛ`…7üí gb/°µ ü£)aræ*çoXoOtX½±Éiòyc¼a½Í|ª<˜ÝHï4ëäÉÝEuy7.Äùç¼âzRׯ·#””s¯”êà™¸Ìu¿,˜¹ô¶8E‰¥uå–5Þ…w› Ê‹7ƒacJž´‡3ëcEe@%Œ<íˆÀ®þÛ­¬ô'ç a,œq{‘&ˆÕ‰¬MbÙô-GbRIÅU: kÞeãeÇ8¥cÚšOÙü’œkGÓoÁq†"§˜ˆc­üyk@iiBj³§¡´4 ƒ ˆ`ù[e§´¯_o“넽J™<ùçÍ›®·½üÊ?Ùz«ÎNœ7®·jáˆÓ„Ž\È3ÕÛäà=”ÞšFdØÅˆxZލÃM¢ È®´%×3/ Šq¼ÇOˆÆ}½R„ë¨DùæÓ\ øçÇh$ŸÖÛ(%¡§ùýfêM6^Üz;9"°!I3"A'£©ÄÃ"˜èI=eù¼f´Â8Ü#†ÒÛ+5µ­¿K¬f£4µP›È.¿AÛ 4!yñ x¸ð&€ûG-<(¬ÁPÝ@<–s¯j…sy¢Dòv1s8 Ÿ8;ÜX‹D*ÄšÞ&Ñ=måxqÊW«wJg ‘ÔGÿLIõ6‡ÞWÓk¨w[)jº»óÄùà;ŽQòpz+X¨Ï¯¥.¤Ø…ìLëmùEq}®ÄÕéM]ð˜á‰7ñ÷ah½ LÿÒ[9:b$åÉj,õ±÷*”s§·KwÒ__¦””k.½]^;˜X6Íêtv0¾H*=÷“¶0LÇhÄÐzJ.gµ x{ÿ«|»õ]¸½iE_’ꪡڙ)Ìï×LrzÇys}z“¿ ¡­fmD;9$…äŸÇñùÍF]}~Z1ôâÉy~‘Þd㋸b˜a­M ê^ó)üšñ­ŒÒ,5DÞ„•(ÀDO¹Ê¹é­èDRÙ†Ëkô†ÃÝ`½£yèìOheÐiY5]¡<=$'|‚¿„¡•S}?ƒƒ ì‘ô•™Í¾;‚Éà3#ŽËæn䉿>‘xžŽáœ‹r¿”ÏJ+ G¼äáB€ÖGE1<Ä\«ÞÁ¤ ¡d®¨732‘!4MQ“Ń$z;QèœzÉ€o7ª&5³½jªIêBD88˜XN§’!ÿ@ (žL*N  >9(Ó=—Þ‰ŠÐõ6uz;:*oG";ÒL‚ ŠS•¬X=ó¸L2É}¶ôò8ZV¤K2Ĭ]Œ$ d££ ë  £ë~)VJoËSËAPÞXd`n,\„Á*¤Bo"dçQJoÿ Nw˜?q\΃Þk‰äÜ}”¨äˆ¥FpÄcM !ô&ÂÐùIBž£gé¤õì£`× w¶ý<;-jet“äµJîô6¡âkJo#$UÈXoŸÅÎúàpR0A8꙼ß<•CNvFÏÉ0HoHdÒº±Sn>Ñx%Ü#¥ 1QÆ'eŸ?Œõ°4I—IFÛûm #¥|cÇ÷*áùeï¶FfÛ#!-ÛX0˜|)½qŒ+®%*™DVë²Æ)É›X cÚ)Eïœ!h²cã×dI±H‘¼Í¥·ñu¨ Ž”Ñ ˆ©ˆÀŽ7ÊÆóâsÐ[ø«/ÝcÐÛ=zÚžFJo£kì=~¬÷ rž…C½-Jõ-Ó;%@xÀ½>'X)9M^=H’Åá€b½­Œ'|.Ç!¯7ÁIÁ0UY'¯¿Õ"¢ô‡ÕÛÉC½cš¡Chµ·3L í厕AÍ€D³P(hÀ 7žœ» „hB2éÇÝò ï øÈÀÛ#é4m$(mtxñ†ÔÛ€8þÙÊi÷`T<›èáÆÇ‘ƒéíž²~ Ùg¦ÂOõ„\´·( {Ø ž‰Æ?O.‘ÙÅz Šy%To•É1grÑEuà âMžpè&ìÂ] èb‚­õÑ3Á½#1Xï0 cÕŽ$ Ïèzk(õ yüDoâ;ALÏ ¹âa#4D9'P@v1Š|=7>&ÙÁ”_”ß^ŒA:‡x½qr$º{¹b `¼\±ÞfWËÆ ºáBÖ@’’ªžwH¡BÔ Á0œïa Å!F|¢„@'ÜxÞPlòxØ´Þ.’hqH½U§NÂÒݱ^KÕi„5ö‘ÞI^{b¼hð²Û£¿>x6¹B\¸ï 3#1‰x (ZaU&—æ!}<¶›DB™tdáù¥ô„%•ß&ŸxRÖ[þÚ«$ÍÝÅz¾cé k÷š¡ a …÷ `ÊÜs °x3{$_+•/¶-Ñ•ŽÃr;À…G1—Þ¦31H+ÐóÌ=T„Ñ`‚4DIòšNo…c„=óÞ3 <}¢‘ÏYÎ}bÀ’voc½• ê5²Mà(/S’8¦9<—èM(öW£„ÇPÞ<ñÀ;µ[#cè&æŽGL•óˆµÕiŒRYoøvcò¿km³˜M«xJ´y–b¹ôùF4gr„›(Nu`B KÍ¢`ÈFI§‚íSùfe< {’‰n‘vžD’æ¤J‘ò9Š*C)ý~‡|Bb kò„;Д×Ê89·ÞA,O9ÖÛ,“èå‡x½‰ôQÁ½„1P¸÷2x¥}»=)½ã8Ä[ÆÌro#㌓/ANò(1œƒõ6›Z97j±ÞÇ«Ãõ&_ÓÛ_ 䑬¼9¹\^ƒ:“g(½ oe<§Þž­énòšXA˜@&XRIó'àÆÔ±Ô<Ö"ŒqDIª›‡Q¥Ü}Àòhƒô&ª¡áÞàIÝü˜ò~$ŒX`Ä#±ÀˆF,0b ŒX`Ä#±À¿# $3„Ö[ewѺ®ÖÃô‰§Ã­Wi Ù=j¢ZOÜ-É0 vl=aôMyo#&Q'7ãÀ W›1æ¾kt(nÂÛ„wø6’ $¬›¬2ÎèyV¼˜Î¤ÂQ’ …‡1½IuoMqºê-`Í¢'lñJÏópT™Soh“Ý?ÓƃêÅz{rNoµ…Cæ¯éì…83uHô¦tŽÙPzÞ‚ég2yTŸì…ö:¤õö²é…pÖ.*´#ã@½·A¼ÇQ…|ry%‚N@6½cI3À€a²ìȈÛÕq'}+ŸÊ"•ìTpýQê>³<C€åì))Z^vÚ6ä5±£¼ : ¡·cæäÖÛ¥yV9õ6[ºRéíˆ{¦'èÅzÝ@Eã-"Ò;,EËôÖH¯Ÿ'N‹8ØÈ¡’p)øu¼Œ<£žOKëm²¤éÄgà"£Rù£‘±Œà3þB¦šÞ ÌŸ(Éã‚­¿ iAæHï„$ ‡Ð;”að 4”«ý8 äÖÛÛK¥!œ“*èm$xMøXÂz-¼þ."Èï1xñ)½œ×1ýì° 4Ýõ60#oššM 'ä™É¯rXª—×.Œö÷Jw(½#¡9’ÕÄM¡†ªf°ˆŒÊ³’4Ûzñ‚”Yz“œ¢\z{ðÄ„Š@”@1܇rîõŽa¸Þ×ëì#R:¯ñF„å5ÅpzF6`d½C^›½ÇàU0½]¤{Ä[/§áW‚àõN•3S`F `†x>{D»ø´Pl½Þ*‚ý˜.'.çA.¯D('äø’kò`e(öír™|ž7釼fœÉ éþ!ö7.³…D1÷ëm´¼ìÁ¶’ÂáÎÑ åÜÓËÖ›(®ÄàjbD:LjõNxZ;¡LoÒ51yï‚Q>™Oóϑ޾økºªj#½ƒÌL‹õ6|œ§ê´Xž@ÃpÇ„¯’vëí(ñ×ê#SS¼¬Iå0Xoãï,FÀðlòó½¡ –Ó yMÆ‘-’wt  Ô~óDlr6Ü•ylåˆX¡Chˆ ×H$ÄešBØžžnyâÉ'eãÆÒÚÚ*ÕÕÕz(ú§>õóRUU¥bS`£ÔPI“×Ô‘ôæ² ˜+ÿÀT|ZBÇ à¯ð !Á`Õ¶Œ@à Ú) ¯°^ Á=½óLåê‰ÕA8^Y1ëŒ0Óý½ÆóÇp\ÙÄþåóŸÿ¼d 2rëúõRWW'Ë–.ÓçPâ¼<žJ J½ƒ¨$Ó;†U x†z1½½\šß—Æ"#S7Üx\ÚÔtÈqãe=OÒXŽqrò»XýÅ¥™r|6<•‰Zz ‹W½³òÚlaÅK_>Epȉޤj\ÅXïP¾¯DéMoÕ Èç]¢7´4‚É Q¢rî"£wÖÐqOž€(ÒÛì”p¼Ú âÅs¼HÙ“ŠPŒŠ%E2$ðáý t½¾À6µ²÷X¼ˆ9æÖÛdMʹéMÁ]iqì=­ˆ¤‡t…„Œ–ÚØ¡9ýã ½•‡Ñ4ÖFŒ)—} 0v£$)ç†ã`´^£a|ag~··wȦM/IA&#6l¾¥;RðfO^å¢îŽ^Ö¸ÇÄúàå º©©…À@¨o¤·“ÆÑ³{%®I (#GØ?½ëy“g’ך€4GÇjº ¦‘˜écߨœz{š)ÒvH“xë®å·½qšmSƒg@ HÓ„2"€I}È“¥·F™½rêí²‹giòQoS+hcb‘8ap‰£ƒÞ ÕÑä‹t0DM(y½I;‚Óz{½Ì>NRñ5ÝÝ«œzk +9E üÕÖ‰Þ }É8¹GþVE5b¦7Ò<ÇXï8a½Òz;˜PÎsèmT(‰+çIŒc )¼ ùÒ%Õò‚Tôí’LO#À/K>ó²Dpôõ ï“>>ã´f^ÙÏ3˜õ™°Ò›-ÍuéôãÊ´c¬¿çÕ=ƒvBå.çÆÔ¥^–'*#2*ñ )>TÚ±, ÇG{ù=/Ÿhi¦Ÿ /–Ê%ñ€òŒ­„äÔÛ ØPz‡¼V±ÞN2G¿‰qLMÈ㥷‹ç§/_iûY™|àER—ŽN¸Ü­|î½Ð.Ý­¥»­‹Æüòÿ!½ÓgÉÑŽgO“¬”ŸéÍŒA`\ªN‹’¬'/¤Ç õš>“Žc”»Í‌B¶ÞL51C¾p ì€B4í>à*¨3ÁG†÷z[^#ÉNo½7EÇç54>Í%—ÞÞ”J“hΉT¡Cè¯ý·° @L™$å7¯’›K*¤ ™Û[X(û/uÈW¾üe)(,’ñãÇË?ø¡k'þïc@Ê ¥»áœL<×)¿tÓ:¹Œ‚Ñ“W, ‡ŽÊ–mÛdÞÜy²î–uR\T„a…áßµÑþÍ(ÏʈçC6Ÿ=+/JëùóÒÓÝ£•C íB¼CcÆŒ•ñãÆIaQ!êž‘ÕÝÿf2÷'\Ð'Ÿ|BJKJetMíO¸¦ï<õx®l7ZMg›åls³ôàžjÉÏÓ:côèÑR]U-•••RPtÝŸ÷wžF$zÇY dਔì—q%‡¥xÌRX:Mò Ê%?SâÂlšXû÷ÚR‰Ÿ5]¤}ï×åÂá'ßqúte Œj:"µgöIaw§ô•T#ÿÑñëeçÿ’ \ê–¼ö6èì¹Ü%½=ˆî’³óWÒ;gÈG?qe#o‹ÂÃÞOí­ZÕº®>Ѧ֙\Òr^¤ë’"­´«K¦÷ ÈØL¡ôàïR?úà( §ÑG̺{¥¯¡ / :ç;¤¼é¼,x'^°³çÛQ·öÊ%4vî܉ÙÂY2nìX)++UVFÇè;/åK9L‡ál}¼Ô `z›B^悟™ÞFÂ]=#D>á>ÜxPÿ FV®Œ–³(QÙñÇ“EÐ1Åëɉe"œça£3No‹¥DNo•Íó2Ö&¯Jj¼-2’ËE‘¦‹$ÅËÈ?ΦwáÝéG#¯¸¸Xï‹Ð¹ç_II‰ôööÈùó-Ú¸+,.Òß°zGö²¼&oÍ„:¸›l½ D·»€38Íð‰§°•/禷ak~œ…½<›É!¯I8‚t‚Í#‰}Á àvtH¤ñ xDÅó§ì÷ÛÛDõï.H9°òr)'5™ø2y ¸Œ;‰\Cé ú!Rƒ“P˹ ë Úl§'ïø ®IéýÅ'œ@ÝåBàíaxqð.¢¯·Wöïß/«V®’ÚÑè"š<ã:-èp•<~^ ãôNâïXo/Günßqõ|Œ¶gŸ?úèi:]'•ÝQƒ4>¯ùL³S(㤧ÊÂßz ÏÒ!2Í"B†(ŠþäÖÛ¼Çõîîî–N u¢aÕƒ{Ö—/_F‡p@ŠðÝ-/+×Y0õ Ÿª«GI–8Cgu¬#ëòšÂh¹H*y•Ïd¤7á½\VÞLÍØ`Ùï·ÅÓÛ)È_— î9ˆ@‚`ëmÅ!æ¥q±­L.„4<OØ%û8ÃüŽƒÓ-rI$­·‹T}n‰¡4Q·&­·§ÏtO[å ãT€•Œ>`ÌAnOÎÊx ïn¬Ôç.çP¢·"vü~Û÷”Œ2Ò"%yõR‘ß$¥•¤°rŠä“¼L±H˜¬ü©à¤Ä8²`<Ÿ$¿t‰tœªå×é¨@`ì„pqCôñ‰¹ô6^c½Jˆ7âYïw0¤rCoÆÛûdtâüvqãÕe¤W)EÇx'‘=Ö;ÆRoäD$o/¬¿0•·f#>½GP¸¤Vó©žrö;ãmStù¢”öaVxJ:U’ !éº(ý õhËÖ+< Zåg0° &œ9¾|äf„ }™/ã Vj§R°{&•ßÈ«©Ò(œ—Sõñ0ža(½ã´ð>yœ¸N³œ5”¥Ë_ãá”ÔhÿãíGÙ½°N `x½%â` Žy婘‰NÆ›Rx¨ô%ī޾\™d*³GdèÀ¯Ð"¿ë²Lz~«dNœ’gíÁÜ IDAT~tøÚúz%Öl)ÞµCŠÎµH>FwÍŸ'+?û{ò…ù¦|ù+_~Ì>ó™ÏHEE…~ ®Èçß!Àl”é¼$™¦¹üõ‡å‘R4~¬d*K¤o÷ËRˆÿ´Ú©2yÁ<©yß=òŸþ%)A£`íÚ5XBºDòÐÙ ïl °¡Ó‹÷¥©©IöîÝ+³fÍ’É“'ë2àÇË8ÌÖÔÔh‡pÇŽ²}ûv]~ÍeØl€ç̾³3ø'X:ÎNoi‘]»vÉ}÷Ý'3gÍü Öö¥Z?P-°ýéÓ§1Ht^/^Œz­Ô×£Á…|3fŒÖ#ÌÖ#G‘Ù³gë ƒ VñŒ„ ¼¨êÛ,£Š¤ª<#%3~Aº÷JÿÅG¥¿·÷Gñw lc{`€kC{\ó–6D×Û&%³¿§}Ä<, ÿ6-7QÊæ?b òÝŽ Ò³s« <÷¸H:ˆèºàºT–lƒêEôH¶¿3ò<úR¸‘½ÕdôÃzŸ‰°ÚdêçÿNúûÝ-ù£ª¥ì·Sz^Û%…ûYh>+—þåÛRö>!ÿý›ß”^zQ.bÄrÓ¦MòØcéž“Ñøxi°òÈ'•AÒ‘EáÁƒ:”ÿŸ»·ŒëºÒÇ¿™‘4‚3Z –dËÌ 1ÄáÚM›&M›¦°»M7í¶ínö·M!iÓ ;N2Û13[–,°˜™yàÿûÞd§°›ìvÿ7±æÁå{Ÿñ×ȯÝk`&x†÷†$Cg©ÇëÓSEµrŠÛ~Ÿq{rÿ"­šØ¢*É?|j4 ®áéõóÇͽË+£Y•M«ÑBBß±ï8ÃhE0êBü#_Â5…öž$mþ!¿ÿlëV¡ÊÏ O=ù85HvlÛöüü|‘ŸŸOi+þÆ=ž•}nLªhý2Æ¥î<«—¶d¬žI—[ž!YôÁÓ3ž}¼¬ÑwûFfwýR£‘_~¥#ã5©+©ÄŸðÒ¨B½3ŠªBUF«ÇÈ"í¨k½íÚÈ£7êî—ö\ªuûièý|¿q‹t_ºššµfBÌõÓ̺¹¹óçÏW{% @ùÞŠ–0‹'Nœ@uuµj,’Ú`Ù ãsÌþY—bÒoc„ª÷­ñÔ=n•S§1\U^^0·{bµ’ï7nÕŠ^^oQ+ϵÞîw“÷·öB[oiCÏÈpò¾ã– X¸Ù¸u€3º`̱fÒ¤ž…¥UÃ7·Q^e1ú)m»_¨ŽÈ•<Çm´¥­¥¼Ö [iÏn:n½Ó“µžãvK¥a5•Z!£{êÎhCï‚{ÜŒË{µî´PÀÖmÛó!Zlc Rtâ˜åÞ–öã~o”·û•»Œç¸Ýp.UJÒë›0n£ Õ ¶ ¯ýøcãd¶&îo­R>gfÕ·Éã–&õ9Ró&Óá9Xc2Šôó¨eÕzâ®ZÏ;^TÞëwR?3zŽ[˜Ávš–WVV*¼››«4ƒõõõÈÌÌT£ááa%HZ´h‘z&øB|úÅ"GÌHÅì\%ŽEõÆhÎ÷x¤*ëäqËCígf•Õó¶wTýz=z!­Iã¯<”Ú½±BjÜj1´ç’EŸ~£!½ò^«ø/Ñ,ª 1nw‹RÚè‹^µŒ[oÖ=ÿãÝÐ2é÷z þh}ûñ~j8MžÃ¹Ñ–‘Ÿ÷üߨÛîq«Bz^íGžhIÕßéÕÜŒVÓWØÝ!U=7…s½-Ïqó£ Mè‹Q#`s]C€-ÖèùpUb¬ûqUØäó†0Ј‘Þš SÈž9-ðòKEÐŒ‚Ox2¼¼º0R÷°Ö/¹ƒVPãóÈÇî¦n2ný‘å¦ã6ÆÅLÆ­Z侟 çÆ¸'ôCë¡QÊ}>FÆÞ‘ŠÝý‘F$é?ÚÜK§´‡7´e JÒnÜp®¿3²H;ƵLžºÖÛ2Æ¢ú%x !£ÏF?Ýy'÷S:¬—ÇkZ‹R‡À°üKƒºÆe¥ÕZÁ|XÒiúÚË@É5 ¿V ß9ä€}Ô‹ÙE 7?´´ÑœÔHî¸G¥½‘[vÀxê9nϾ«i5&[úíž\­äûŽ[Z™-ÌߺuëÈÈkf¾b‘½"ÄŸøŠ™—%ûb&̾hå½p2òÿà¯nÐÎú*”Ÿ=2γã¯ÿ9/}Ý]è¢m/M¥ÿþÇøWOÆÿ/2ŠPO´SÇŽCvN޲ôøû˜C<®Ÿ>Ž’V;ƈSÿ/&MîFGG”™î’%Kù¼ ûCð„0…².ž8Cüƒƒƒ!ÌaÏêÞÞe~þ¿9~÷yÐØ¶ÿ‹'õÿæìýý¶mm%<À+ Îá‹$ôzù;È"Cè«îÇHW%•€t‹ŽËî àb¸³Ý—^BÏÕ]$zÓàöLÞq¡,Ê¥ÿ•D˃fiþ;¢Yþ†Ép ad gÞ;‰æAÆþ'·™(4¡¯¾šÿj0ØX‡‘ÎvšÓdÝ ëºMðZ°ˆŸ;ýe±íNÒì «·‘á¾d våýÇ<6Ї~ò]TLõ÷ÑÑ`~Þ¿ÈÿÆ5FXiÅ…#gQßÖ‹Ê:þo¥AôuÕáÜásh!cî 'n ¡!Ò¸RmaŒ¹i¬íäø^+‡oq)#U¥¤';>Ëc¬†¦£4s ÂDÖûÎ[ñîñc¨m¨Wc²D+"’LÆW;¢Ù²°¶4pÙ»Ptj?®\ïBÏSuÔˡ‡ÌéÙÈ™žŽp/J!zªpéè94Ñ´²žõ9\Þ”’F"93©ÙéHôe“& ´•£®¢'¯u]€5 I9³1;?‰¶ï”z3O{] \jsç¡{>ýµ"±jË:Dyóà§Z¯Ýä/ÔÜé´¬ (D§„ú„jYµ<®®8Nž‡Abœ}œúƒ„E âžÛ0Fm«‹Ú#˜»áBà­kpáÒiœ§Q4MFS¢úºzd¤gð‘Þ¶úáœ:ZppûaøÄf"2{:2ü܂OY‹¬;ÚŸ!j0P¿ºÔÂÕƒÞVìzã(fܱ‰!6XÝùqȆúPºã]8îb»¡~ðç¦×’Ñ?ct9³]͵8»ï ‰Èä­ÙÂr¡É˜1Ÿþ§áô¡ãB¨Ò|iHO m€·G?¤¼ñNk‰ý7ÙQyp’fÀwvÒ"e.´·zçnò£[^óeì­—ò†c06‹Ü2B©ñç&¥”ÖDbŸ’’‘â BaI…r ˆ;©CÊöõõ©÷B ‘×ÑÑ¡‚ÍŒ›ŽrFô1¸\£nÁñí‡P;LFL*ÑSxÎj,Ê‹Ax 1ýùqks2>ú½Ì¾>]Fä@éãXw1Zšûq¦07çÁŠ1t–]Céé‹0ß3)¡j¯U«ê¯Ì£V7…:cý¨+<×j004F¡$ÖÄeÎÇ’ùb` Ñmë­U0†X›z¬Õ©÷Õ½Ú;½ÛjÜïçzY‘Îå£ û[¯W­«» ™Oé¤VÞÝ®Kkák¨ŠE§WÖÎwä»(/¤.÷½Öš6L£êNê÷·~m<Óóë"yÇçŒ#`_ÞÜÒ€¶&ƸÝ0®ÆÔÖÔâôéÓ\çfdS(Zl½jÜúÌ©žª¶ÔÕø–´q/9[p”x©…‡z¯*l†- S ò1%>a4ASI«Ø ã²Ús£uMꑪÛ=n;†:[Q¶g7ì[f"‰0ècÑjà9R¡L.÷ŸÐ„–úcøø où˜Ö+ïFSs'Úô~x$!!%)iIH£ÿ‹^h|ýŒZ´_5n£]£ë:œ‹®ë¹×L¿?ân J”Y¹0"P˜éííUH$ïÄÄDwÃâW(xC’˜š)w©CKÚZ˵Zo{ Îí;ÅzÇÇéå‹Ä´ $sœ)ĽÆ@eghåÔ1õÔ€µ"—îqó,§OQéNí<È$a#¡èY‘VÞŽ”uϬÑ4m <³€ûÚœ™àè«B3Mh÷Ÿª@öÂ;05!rÈ»“Q^`4¡wTË陟ùô<Ç­ç”=Å,2îîÊãhi@ak2nÛDÁ¸z.]œ¸¿Ý“‚î¨vô¶õU·£zÉ,&´]ÝŠÂÁ<øÐ_oñÔP½ײp^ô ©ÓŒòúÞk%íê£qçVò\ï‡T®Õ`ôϸc-NÒs2tAd¯ÓöÄr÷0Âa2ƒô#†5n¼¸·Í^m8ŒÆ Œ4_¢9)á±óá·‘¦ƒj Â8Jí¾ÐZâ©àlʽ§n²S‘™ˆ„™ñÉiœ³2÷¸õÉ…ËA¡‘N³Ø?: ‘ÁÞhOœ&uŒÏÈÄÙÓú«Í¬—$cIŒõ6Æ&m/Çóje<ë™ çÆô5£­tÎŽ­ÅÚ™ BHÿâ¾z¼õî[øØô¶28jÜjüÆã6îµ>¯µê¹<ýÒz&õ Ù-8Ö£*ðïµÂlöË!ä†Ôü˜H³xp¦\Äå–d„æ }¶ƒîO¥4)fˆDaX ßÂZ}ľu«õgíÒœ±¿£pŒtàÒ±](ª¢ÖyŒù˜Õb¢ÀÜ7 ÖÎGLl(BÉÍÈܪµÑºÉ:Y‘q­´¶ÞªÛêÖ––ɽÆ¢Iy&©ÓX7“0„ÃÍØ¹g–ÓÚ1•tY°jFµ¦òK›Æzkhõhë­]ÛdžH'Äîs-Tnií›Ì^t ›9)°Y½•OŸ‚=}.Œµ×‘¿z9cÜ|âî…öŠs©A`A®ƒho©ÀŽ=Gð‘ü<2Õãµ¹ÂñG“®X‰— C'a-½®1ƒÌbf3IðY´Ã{÷ÃÞÐÌg6X¦$¹`6Þùͯ&0-R«˜½ŒÝŒ¦ôˆÑˆúêK(ÝØŽ£evôŒ˜È¸DŠ0–ŽaŸÄä’!4£µê2ŽíÙƒŠºv´úZa&é DæÜ~ôšüš îÅ^;ýž}½áÑ4e‚o`2»}‘–ˆho“bk.½‹gŸ©Uy,œ03üè Ÿw¯CñŒNšLš˜ÿþ­‰#gKFväaÖ!jT˜}8‡aÑ^µíoì¤Éh Ô€Z"`Ù²×v¾Žâ⢠‹ô^”ÜÄ>ßшCT×ûϾ9‘¹Š!¼!ßßòÀ5€áZ¤63zå&D C8¹®£M5Ÿ; çê!Œþ5’*W?:‹°ëOÏ < ’BúªšýÉæÁž†˜`š=Q"þßJŒoÝzùú\á°å{²Kÿ­ZÿêÂâ%ÑÿÄ Ôßß_ A&P”0„òN4‚ÂüI4ÀBø ³˜œœ¬$þ¡ÔÌ{YÆóyyÂQÕàÀKÏã²ËFX²Àê¥Ð,R‘ü´7Cè.ó\8z«ÐVÕŠ÷D` B²¢} 5hºtÞ·:ßWs凜¯³écðîÉ&…ze/ ’ ®ñ&á”È „ûÞd¬@¿?Ø*\ìjŰçÝËŠ`šuKêª|׺òPmU áÛæÿLmvâ%^ˆ?«/’’(LcУÿrRx© G¶½Œ’®Qt’óçt9-éèr…!óC¨'Ãù_iˆQÍFûºÑxá œ·yzFuöÞJÔ—]Â3ÏìDHd8¬¤Lä¼ !q5ýVdµâìî7q¥¨Õþað!lZÒ0uÞR ˜C1ÅÍ`5~ð¿£”ºw2€[FF†Âb*8A’à aÌå™à #ɉàULÓ%›øÚ{슩4òLøu¶©qN'™óy£˜cArx„NM(õ7Ü€ãyÐtæ$\<â=)¿¡–¿”u¨­U…ñìsGp‹mâB­doÆ(ü¥šþö÷]UÇPz­ ¯]YŽõ:Cø·×ò—K>9×nƒ?é!üËe>Ü´´¡vˆHýnb2V= zÔE¦€šmSÏðøu4Í€…½!G5FZëøù~† «Cõ—ú™JDf@ˆÖÉIž9›Ý{±&€{Ñ›ç&÷bμ%proÆSðq2¹ž?wOÀ £·Žýu4ËŸ«ïà}°­¥»qx`6–æ…Áß<Æ ž]8yæ4n'økÈ®ÿj7…lôǾÚ½ í×UÊõï§FÖL­¥Ð7N˜}±.3¡3áþ.FÛºo¾Ö7tˆ ä•5Wqd׫8ÛHE‘Yio0š‰ØéSáF†ð&°sC}Àž_B»œ9y›îD±YçèÚŠwâ…çª` ¤"g ‰üN|³ÓãÏ…ÖôoéÄÞžÕÿ ƒxTàAii£Ò8l=7‰™ÒÆÄç^‹æž®Aj«ôä}Û:X—,€ßÔ,xG†a´º¾4íãâŸ=~‡i.:9 ¡+ܪ1êš@ãB?úÚ+ðÌCßÀÑ ;ñ…|³ ¦ +”K(å-¬ƒ(Eæ)x?ýþ02÷“È»k.ž½w6Lý¸ôÎcx~Ûn<ó³~ü5,Ž¥ÐÉÌè¦p«ñ䫟€¹æ]ž?ÿ|áÈY¼›2©¦&“`5ù0Ïý*OŒŸ™Ré¹™‹Cf†øNÁµ{0ÚÂÁHJ"aÜð×S’)…c÷¬Ã·Om#¥åè¯*v—öc0žå+áE_±È-w`”Az\$ ,ɉسw7ªk«I yœœD¢ÁãÝÒ¯86aÁ•„ƒD’6ûút©ƒ®<’:ôdô׳>U‡W Âb#ðýÇgðP¢)mÀõj<ÆMi™ŸMHb(iΧÀô)…´abÏ´6„Ù£DÄ×w|ó)ÜC†ÍÔ|/?þs¼øT:æfn`Hë‡WD›ÞMwŸ¥£&­>™íJ[› ¾ªÚùëÍö´GîªäÂ÷øC™1m^ÜcÐï<Æzk÷zëZ‘ ãsQù‹˜} Ñ&Iˆ91÷qåÊ•ê×hGžÏ;W‰©¿}âO(ß¡4Öj⸅ õQ|ã#aéŒ$ÌŸÂè¥cÔºqF%z­±ûµnj]æß‰ãÖÇðgÆí*ð‹_…üH¾3[|Jd–Ù%ó2×Þ"-àZ‹vJƒ=y¯¥¦K;påÐV<þN~H¡Å²¬H„˜ûÑUq ßyà_ñ«'ý±iÝ Ü¹,e‚PÆ’¯·{²Ýã0ÚÐY¨?‘½óÇ,}Òòh{ų½þñ¢’yB[.×0výä~œIú'D¥àÑ[âÕ˜S×ü1 FÉ&×(…UªIµÞîÎp–&dÔóñÇsÜî,ú…QÜýœ MÕ”¼xßqKœ°gÀ¼¼iimQ&‹W¯^Ell,bbb ×ê¿*-x]ï„¡‰TÏÝ•;&éŒ Ðd%œš0uå˜yϽØߌç¿ÿØ¿7–Za+RyÈK~7 i#sO¿& ¸’+i@ÛE¼CaiE†A—}!•ñ­1F‘ ã[áKáß7~÷,‰öã™AÚ“uˆ`bdØJц á)9XôÈÿÃ'§vcëþgÊ\Ø^ï…å³Öó츞X¯´3Þ'¹›˜Üs¬÷EM‡Þ?wN}ÜÂüµ1pÛÝwß­ð„1ǂ̚5KùtæçR^´…âk,A«$@•ø ƒ˜’œ¢W¯­µÜ¨¾˜g˜æ1Î]¿ý.\9‚7ë½±bÖˆ>XS_ÏÉcÐ*Ö4y(ª~áœ'â/1¹aÖ ã6Ö…ãÖÚÑjÑþj/ÝYT´\ªnVD4”?Š2âr0¨Å™}g±:/ÑaáÜyFÒëÔ~Œ‡,kÔÌù0`XõBÖÑè–GÆm¬Á8œÓªÇ›×Í4ç >—š*Y\Í•Q¯ˆú»çãÝ2®X‰jOË&£L[ûc|żç5¬jÜzî}¦õ¶Zd’<Ûô³zgôÏÈË_£­ìø¸ÇâBe¼vt° …J;8¬`*(z%3î¥Àû9 ·ƒ·ÍI­‘ £ýÔè÷’!¼H+&9”ØGQÑ×PÆitÖ}v+|AXÂèÊGÿp/¾ðýÂi*÷÷cAÞ2øJ1­&é2“Ôćr¥Þ¸Wå¹1H•MƒQ‡õFše|­¥¤çZé5i͸ÛÑ&O_ýGÏ)ªKylÔ¤Æ,Oõ~ºû¥F¤ç7Ö[¯Ï/" yw>ޝù#X$jÔ'y{ÛŽ,ÒµZ¦¿8niSµá7Æ\qnŒqý3†r¬1ûjYv\°îâÚ)fPÕ)õ‰b€æÂcŒ6JÔ»´½Dšþwë– º¬R›"=¯üÈ(åŸÖ–6;@šK/áå¯üC¶àG¿ù<òSÂëo¢yl®]®‚O\RX[ocÌ|Âú´¹“k-"·LÚh ОiùßoÜZ1‡·?…ìd‰UWõžë¤O¡V¥´¤7.ë­gá3'|ÈÄ[Mwá›?¾Ùé.tœ{Ÿÿ÷?àØÂE˜Ÿí‹ ´7ൟÅâ¯ß´(õ©Ã4 ªÅž§ÇÎóm´„Á7 µåÀ]ôcQ?T;6\‡½OÿRåé²'!2a:þÊG‘LÓN–ƒ‡¹©ù M ümm˜žl¿ÄòŠ1ïÁ`w-¶þú ì-ìT~AAQS0{ݧððmS0;›ÑÓ\†íϽŒ½%ÔHS5ÍCdãç¾…5S8œ|‰E—K3-Ù÷ ¼v1’Ì)xèÓKʵ7ËÂIF&mÝõ9Ô©õVïœhWòW»¯’‚$ í¸ãõ_L M8i>*Ì tòë†/–‘5­ºø ã8mÚ4¥U4º¡*VukWòÜÁõâ…-T4„VúBÈ;’K4wŽ6 ¡¢oÿé²×ÇàÜ[ûp½–þñØüÈCð/ú#Í>ëq¡ÞQ™kñż‰Ä´ýͨ<õ&~ýûýÃjKÀ¤N]ˆ>H$F"ÏÑ}Õu]xçt¾ðÙ…  @¶„–عT·îg¼p” ´¬'/{ãÎïó¢½áìïF‡ä‰˜‰/ýd3üîË(-ÄÕ¼XLªÂï¿s¹Ý€ÄÌx$zSŠ[¿Ïì1!)3 Ë—&ÃÏÙˆ7Ÿú5öœ©E{ÿ¿Ãæ‹Y›ÁæÕ9HŠôÁ0?œ{ìwO¢#w>ýW ÑQÕáÅŸÅ7î´âØë/ ¨ø:Ž•2 ÓìÛÁÒÙ™˜—EEß3Úž9¹Uß3ŸÇò…Ù˜7Õ†®O५#ôy’ß^DÑKóñ¯¿û<|.oCqoê-ùxh¥¨öJlÿýit÷!iaŽÿ~+® Œ"}îzÌYz+6Í‹æ\¹Ð]qåEÅxæOï¡ýÉ%Œß²t6æSÊ*Sd¥ÒWIÆÔºARË¥½ãblLNN“çn8×áÞX@ãíolÇ•+WÔgO$ˆ‰#éõuç“>¸{áÑ”þLÿÑú­á^Óú»ùùÃNÆlN¶Ç®“ÆTK@BÃÕŒíO>ó×QÒ4¨¯çñ‘ÕS‘@áÅAS¢®bµ×/T4£œ |(Pºó«¿ÀŠ úç²M5nºÈM[ùq\/<…g/¤á[ßÚˆP©™S$5ºÂRp·²€ôÑÁsO ‹û³ƒgPŸ cÔ–dx¡†› ¾QU©Õ¯]zŒY.=Ç­zÁú´gÚù2žç†ÕÑ&WÕ'xBð†0v‚ä †/..Ná ‰.*8Å3‰Åˆ˜–‹Ï¾™¤ @f¬·{­¥¬!ÏA§SÆÉOWŒ™ƒ‘“ì‹"º©»Çél•ãqp÷÷É_õILš‹»–j;×âÊ©“8uì,Þ+Ó„–k?óMæÉF˜Ò”KSšiž×ƒ¢Oá™s)¸ïÁåÈL¡ÕÆX ß}Ol¿‚ž+lÁ‰øÈ¿Œ5¹AðæÞn¨¬z‡o IDATÃÛÏBúòh¼üÊ.$ÎZ‹9kïÂ&®µi¤Œš‘VÔ¶ø`ÝGfãÀ;/âZm¢)¼L ÔÆY{ì8ÒA†ÃìÜ‹m'ûà¿KV.ÇÒ…ùH#@Œ4¼‡£pµr¹SêñìÖs ~1±ÉY¸çá`Qf WV_[V«’ܺÿÉÅø »‰TWNïØFSŽgëT±÷<‚…331+3šL+ËŒ5âØÏ øZïE&e¿ގ”e˳n–§Xiá² :éòœ€, æÖQ«ðɘ?­*½>)çY™1w–¯¹+ ´u—ý*Õ´:§¤Ëú˜ôÇ ®3L^qéø£2Jf-ñVì?ŒÑªÐó vÐ/ÉA—ªá1›Œ,²XÈžâ.óÎQù„|N«]ûšL#Æ¥E&Õ!¶Ìç#RŒ0èGÓSjèÕ^lêZt:0²ï•?Ñ‚ªÂ £³oûñx†ÂãaP£ûÞxÅe58Y¥YUÝõÕ_bfz‰T«¤ d´5(=ñ*ž=—ŠúÒj!}à5؉¢}Oë0„˜)Y¸ýSŸ¦0Ó¦`ôÌ˸t¢é+"ñÂó»0ó#_DNZ4æ˜.á×Ûüô”•—âÔ…&jÊãᘋ²“(<|gZ­È\ö9ÜûtL‰µÁ9؆ËoþOí.A/¿ÝçEWЬù÷â¡O­D´?ÏKšŒ6\ÜŽ·únÃC«$FúËZ©ÕÒ§Qæq|­ÕÄÊ•&Ð,úZË ½-“±ôFf®{d ¿¾w MÝvTw»p±Á‰CÅüv¶8Õíj¥Õ_{Ÿ ÃdF\ë׿y‘ÏäËÚt9‘5×äÒŒ˜YkLŒ ÃWé#]ˆ£f+¾òø?!7ŠLÔp:†¹æ\§¤©™¤ é‚ÀqÛÉv”íÅï~¹5è2û"(iøÌfÌœ Ý–œ’§|/^}v'®Tµ¨<‰‹ðàg7##61d¬v~ç•þ­úÓn×Ñô™æÐ™T,ÁÒES±bA}bÙié+ÛUðOÅ¿¯½Ïü§ŠšpµMX…‡¿üQdGù²m•]­‰ve{ÉŸ—ÚÍ ‰2}·ËÊ¿Rò‰›“‹p½÷é_á­õh DHìB|–õN‹¡Bk¨] Exêñ§q•ç’Ñ€hâ[î¿kó‚••¥}°ÝM%xîOâóDøñ,@="ÈNXeO†Pë°ç_kG¢Žœ„‰ÑåÜqay€›© ôŠ™ÌŠJ<¨LüNš$ñm“°×7K¢Ù¢wr é@¹¶`jŒ?¼Ä÷m<Œ„.±Û¥š¶âB1–ßÅÈDÁ!Ôhšù¾I 7g,¦N‹ÁÎÓ5èÝÂèk<|#f µ4£ª¶CÕÇPVÜÈúgcF’?hÁ@d®mGÃ~¼úFùÌÇ/A)s°<—*Ž)YºiØÙLÉ™{Zò‰O„/™lkò®øWÙ!!Ã-Ôf9FúQTTˆñ)œ”DjŸI“¢[©œÝ,o ý­°ÿÂ8CR°dí?ãë+ ñ³g 2Ä…ô©±H³¶áð ï`,çnL_ŠÓ¨¦7‡ÓdŠˆÝD)½})CÁ}¾¸ñûÛkpmÿcØu-kï{É#j.ů/£´TÚf$ÍÖ \Ýóõ.Á–Ïå#p°Må—ñ‡ß§àŸ¥”ÛÆsƒëàż­õ×qÝN©RS)Ž^òÁŒÍùÂñ£§™‹ïǼÇÁQwU­xùõý Ò’‹$ïV”¦Mý®c8¸_øÆ<ÂÈ%é~°EÒM`’ó‰ºK´œ{¿Pƒy÷®@Vn™AA‚^Ò68aUö 7¢˜ Ã'ß"ÆPj“šbO´ŠòNþÈdR6ýVê6)_©sÞ@[Q(ŠhŠà ÄÜ[hBå7Š2w—÷â\ÍR¬Þ¼9ÃWÑví$ž{ìû„ò£qµøv\\ˆ´D 1Ls;JΰŸå?uhh3y“sAõu¬­¤«F’q×,!vyX 0©dFpö<¤šßQ‚¨ºîQ2„c¨¯¨A2ý¡„¬àDt£òŠ™i uÔÊù aúø‡• vERù<¶õ r¢û0º›Ï`çå.¤äOÇüû6 +-†ÒÅAÄä¬A`ê*¬}Ð{ùÛ,wÁ<@Ò£M3(µg.ê{f±gö#*Ä´œåHœsæ…BIÄZD¤LÅ'Öå!ÑÇ…š.:øwÍD­OŽ64Âj®ÂàgïàéwMý˜·` ¦÷îüåïQ4Œöðåøê÷h¹3J“ÖØTø^µµð¢©ß`ËT¶œÃï¶µaã 5*~$n›®¾‹ß¼dÅ–¯}á¦F 7”ã7½„Ì'>$Á]­¨,<„ Ý«qÛ}ŸEjr âI/°çh,<ˆÚþ°ÄDãÁ{§Á{ïQvµñ±‰HÒÏq']νw=ˆÄòµ÷ãÑ%Àåwß@íI^i1ãëŸÊ'ꣿìy?×…îÀõøÂ¿,Eûù·ÑP[€´ÿø(¢·êñœDk‹?ràò¶a×¥x8móñåï~A<ÿ¶=ý"޶-CýÐz|dº —ßø)ö]ˆ+h¾òƒy(Åï¿÷ zúÓ1@¢WÒ@çIÔµ%rM4ß-rTÄ'§p¥šºìè,ÜñÅoaií+¸XÜ#ïc^ÁâNÅüWð¶ì5‘ÚW:í$¼G-ÔÐË;ÙE:§¥q{ä IßÉ·øÏŨ¢\>•ôíæ&½ØP]„ÂË^îžà(d-LQc$åF<¾A©ÎIxÜD<Ž{=Þþé8kÎ_âF|õÁ<Ø£Iá¶±šK.O.t•ï f½z«  £q¾”O]Dméy7ŒF¡¡º¯>± É?ý8’¬cèi)GÙåã8Ûµ·Fóò²xŽ ’±éÀù3ûQGúgÞœ•øâ’QýãÓxö'‘=ykÆ\\ÃcϽ‹ò9qð @4™§à)Ëðÿzñ!…Û´¢ûÝã¯âܪyXš _ž½íGQÑ·A B¸Ô9)…_o2…Ü#6%_ˆ´Ñ,Û…„0'ò£X“âÄ>*NòʽÌÊò;)°¶ûúQ‡áêý•!R0rŽÿvòc÷B(˜ïÂÌ8±!£‚ð¦£vfÞ€ëÅpÛeüðW¯ iÑ'pW:cŠ„Žâú¡—°í—oÃ÷‹<ëSɵ^Qybf܆»îH×ò|¯ýâm,X7›td|Hÿø·[5ãnl¾')!C¸~ø l%NhOõÓ$` ¿ùöïâpÇRä­¼ Í´¡æè3xõ¥4|zËt¤S)uã©-T ]‡ÚZÑ@þÇ‹ô~Ã¥CüùLLCLˆF©-Üþ=Ö»œtó'áÓÒ­¯¼”ŠÄÏÌC¨·ý‰±`Ë7°%. u'Ÿ¦p¾;^ºŒßYDÍq?®9ˆó{£.ëA|ù³)ðê/Eñù‹xî}}I«yå†>ª¬úl[øáyߪ:r«!-ó`âacNˆ£ÇB #|Ò/J¤Q‘fŠtòfI[Mš©£òW:c§­ùI‹7µSjQ¥ “f$aŽ\äÀ%*Ñ@7¥ÀÉ6evb’çÌ$ÿ,àmõâ71} ÃÊIÓIgV±3e¨ÛÒÒt]Cù¯Ô¼Uˆµñð§I©¨1²SbÙÝϾóÖjgP™aATR³ÖWÕˆäêÂýTѾ"yq‡²õ(ÂÂZâ3¡‘5¢U+kê tOÔ7ã›Æ;Š„?%ÁÃD¬…%Œ Ú‡Pú©Äáóöö¡Ý¯D‹#ÒõHV+‰uJX@šÖ»çΡ÷Þ}/­»(!µ›âH±HÉš‰¡#ðz¼êþŒP+*ãéoLj m+µqIŠ`°ˆ©ÚŒN214 ‘ƒ€ÆpÃÔìÔQ*ç »™S ÓÛ(â)±â”±KZžZ:·ù®§æA#x¢v›;PyµÃÜ"gdVòN´()b@;á¨Õ”Žy³càOf”Ƽ„‘ ú–¥!‚NÄ#öHt¶°­fj%Ùo§³ M$,K«›xÛt0òm(ÍxäbàJÕ 1J ÓPS.–ÃœŠ´Ôd$„ÂG´˜Ò µ†Ò¹ävÉ;íÂ-95^ð±‘Eß:j½¥¼û¹\ ü0ƒ¬¡r¢)i¾Þb*>A’G¢Ê?É#¿ BÊ„¡Ô©ÖZðèž<—B¬ 0W¿¿‹ŒŒR"Ïý!eD[02LóÓ¤æäñdôÈ¡«xvæÞ‘Iæ&‘=>¸l߆Þ»r8Mc`dÒifF qwt8{PÓ-Î㲃ˆ¤©Á’è Æ€ùÜŒŽédNít "ú9ŽQD¤Eá$컼  éÈÈͦ£; Ÿ"ãÆÍL¦`¨/ƒ Ì1t+Ó,Ù3àž±i{f&µg¸~£„Kßð$p¾›£’‘œImn£èèk¦à¥-2x}"åGŸoã‘ÐF2p’>Y2IÆ+íWòêÏTßøÚ(/0î$áVÂhÆ4[ø<#>is¢ø~©Bý7Þ-©R[iFïþcô@nEhÚÛ݆šò«l·CS—O"žø•;M~j=­$ ,½îõà¼3x…/qÔå¢ xßv?’§NÇ̉´aTã1šôÐêC%ž%½mU(,-As¿ëʵɌE ‡Á,ôµa ¬žîš+Se§/“7Mÿô‰¥inî‘Ê"\®«EÅ@ㆱ!`a«à ƒ!”ošI=úzk-ïd½å±ãã¬AyWB²§èãä;¥í¡Ñ4§#õ‡cŒ ù ƒ¸”¸Åél§Ö¥ Q$RR ¸ð•èß²·EàÞþÎ:Ô × ±®æ¨LäfÄ"ÄŸç}»æ'T٘ij,„„O'£Ó¢¡œmP›$ðÈ5¡?‘oX<÷WâI4û“*%æAcI ìþ3a‹Ï@ld Òâüp˜çB{k†³ù]Wuvq/êÌ´]MH›†éyÜû…;Ñt¥5e¤c8é"†£¥’Ó€¨øt¤¢ÓyðÈÏxì®Ã÷1¿œ »AhpnÀ´šH™Oþ“gòOK˜hN^{õ-g¦!<>3gÎDà€7NX·¢¡£å•pN3£Ž*ÙŸ„Sø6³ Ö. *À˘›J`£œÙÚþ|ÒKº%˜æÑ‰ÈšAWŽ€£Ô›PßÂ3šð£á­?ì’‚-ã “:ÔžwBr0É8ôg“÷·–AŸA&©2BËÈþÏ» †'¾b²KX™øÚÇ,|GŒOœ,®%îžsǬ¼¼!0Z_qkk¸ã™„¬ÌP·ð—~ˆŒMbSàÆãúÇ µáêµJ8æ¯F$ÍúgÎÈCµÒCÄ éÞt‘kÛG¿öÒ†2 9»`‰ÊBn:¿¯çcgdËf´×^Cec¼£tè¬ï#nh!Œ†¹ðvÆ©å¡äK‹£|Âhr8-uxÖµrûœðŽCjÆTLEsd³á˜ȳ5¿)¤ûL=…fÔùÎÙÛŠ°xÆOà>örþC̽Ô -,“%ñ5¨ÁÒ§][0}ÑôÙs¯!ûæ^ky'c5òè“-yµ–WòXp¹g2`Iž ý×3@SiîA±Xµó¥¥E˰ mvÔSÈ[ß)ð ë¯%ÑÚÛ:é[J‘-MŒ${FÚ7úk<#îµóü¶xÇ’7e›œ•™XE‚£tŒÂ/˜ÌÝ»†:«PZá¢Oå!oZÒƒ‡к[w”¡µk)¢h}Ö¥åɽ-y3ȨIž–ýغ“y¸®Ý‘°uV£¬Ê„<ÒCÓfæ!-°¦êP8’™åØ$Ó"¿&ŽÏN…Míåkhõ[ §OÂH±ƒ8¬¦˜çöÀTEGÉhrª5¹Vò ¨®*"Ü¢îJµž+Ì8&ÞôÇdTÕÚË%¬w¢›ÃýI'ªzI7s9ýüi†…”t_ĆY1éÖvZW–ô*«—ƒŠ~º®¬¦ 1›f#ŸnD^] Ò Ätœ´Ón`¥£Fr0 ´Q„¿þ” ~½—> NÇž={!ûíoYYYê’Ï LNBØŠFäfB w¡·˜ p â׈ډuˆ­+õM$üM#ÂŒÈ1㙸!IÀÐê“"‰iN¶ETÿDƃŽãØûæi4×!:m&¾ðÝ{çÃfa º$ˆÑ§,‰@DdìÃhw´ö£ÊêCLΦVè†p<ùçgÃŒŠòr|ók_EUe%fñwÜ}–¯\ÉCuD{¼H %ócæÑŠ÷|~õ‚\>•GŸß‚[AlÞL¬Ë£iÄ@,Ì6„Q"o¡úŒÛyóƒqöð6Tœ? SçF¬»e.M&87 ¤mhss'µ–ü Æ%/,üÑBJ5¼©y £¿H.fÓœpØBsub°—y.0ØHêìxë:‚$º;`j!CDM¯¬O&/.`ñÙ}¸ÐÞD⃚ϥàSÔB Ò¬N!¶é_;££¯…‘lûU_F$6ª{ºQDÖ×¶,F IlJxèX­b¢B"”ÌGkÝE”3XOÉ‘&üø­_`N„ AD6ÂX?Ä$!℘I¿„G S(~>ò\:aå½üÚl6õO˜!ÑÒA(DâŸK‚²âÒr‘à,SI¼Ø}ibBsn'ý…LÞ xèg`:M©¬Ôœ Í‚•ÌǧÖL£æÝÍ—Ú‘Juém?M$’-Üó~$ÒÚ.ÄÉÊ´WU£—–„žé&ßÉãÒx4ùשKYŽÈt˜ ²K6£¾ƒó$,©aš`•*<,0Âl’ÿ”¦WîÕ-×6D‡´âüé³h£½yÃí l©²Ržëë¢@hÉ翈¹q˜A2²N‡ áèjoÅ‘£%èî!-„˨r„áI{fhQeÏpGf²ÛÉàóŒ ô…ÕÏJ%‚tŒ&[ºüšw<I˜°D$£7Þñ“@¾¿Ã5CkyêËÚ€æš*Œòs;ý4:·ãycç:‡—lFšH6+%•}xI„Ãij;)xhˆB8€D¬”¢ÿ÷“,šÄr6¡£®×:q„~Ñ¡s¿µKóÀù§ˆë"ëYyýͬjÜë©Ö…}h¯Ãq¢Ð¯mšƒüÄHø’8rÐ×Èjs6A°1[GqiïÓx…{}ÕæâîEºW/ÏîvCС™gOZ>µaÔ¼z{ÒB$T UÈK*Fi„¸«ôø.œm¬FÈì¯bÅüéÊ\of\v’½.s/‚WÃÿ[þÑá æ˜qËB,üäC˜êKF:Šâz™ò¿9 ±¦èCÎ×C´„rŽ‹4:xÖ† SÐ.=1‘¨í¤pK,·hÕáíËÏUÄð¹>V%–wï—Ü0zt'N6Ö_|&ûX?#Œ8J>Çd£¥Q·ŽÇ‹ÝxœG˜H1ÚZ“ÄË_‘Of>ÊÔüÛ©…ó&mÀcHM)-épõ½ðÆùÄ$eáK}Y~½$èüªµu%£(µ\WÊ¡h=y¦9x^YƒñÉï|™Þ}œZTñŒ‘!ú˜Öáþ»—b­Øìõ(È7átö£Ô¦bq¦:;’d D•-ò  Ïê¨x\>v ŒFߨÚAA÷*“š1š»›ÍÚüéShÌ ùÃNcÄ;åõÄÛ<ó‚ýiâM…ŒÄ*X—dÒœ´°Ž&¥ûÆH»qÈYÎ^»h Õ{`\ öd¢yº‰ûYžKà2uÖNê4ExÄ!ÄËA„pï<¤‹Â!G÷5ì§ ¹«S¨4€~*|œ9—ÔÛ¼x®Z‘½v žü Ç.TÕ;[}M噓’„xšJž¬[6#á·_} yRQUÇÌN¦p2ˆZº_äÌžÓèkMœ!S-0®%¹“èîm ò˜ômΫEG…–MípP(cgTEÎÀMö¢pŒña¾€ã‡.°^'º:¼ðÏÏ~©ÑTpÑ̼“BŒUïUì|«ŽÖZ¤Z¥^nv1ئ…îgmðÞÉjFYîAsmR8-uô6£ºßŽbs,~Fk?3q ƒVFQ(râ;mÆ_ÙczÒhŒÓxllVý^LC}æÎÄ~õK8r—èÄ-‡V9™˜ßþö·J:)’dÏ$LàÚµk•dL|¥„ØPIuÆ[T,"ÒSÐÓ±]üÔ¥*Zdf“Iã_úø0gÞâ8¼µ§KÓÑ/æ¥òNÕ3†¶ÆV\/©áGOE¢CƒÍXÈAÇeO>µð5_gð‚'ß@ΗW)m$LÃ4mEpÊ4,œ=¡´Í–ÅÖxLŒ›ÝP*OÙ¡ &<å ;`À‡ÖWm|zV•_5êbÎ:†'ƒç™üPæTU%}¼ÞÃÙ3§ó·ï-.iJ2‚ƒ‚ÑDS'ù‘¾ô¥/"%%…}Öúå•{Ž¥xË#´Ã·sŽmˆÑÉe¬Ú†å{µ6^Xñ/Ûè»p<¤w¼óc<ú§õøÁ>…)‘Ô“‰WæÔ²œ…G“hqÇx€KH]EkS¡FnçÆN»·ÌC4ÍqìN/Ü뉄pªò¹V¢°úcó7öwQí"¾óüOñÒš?bu*š¾+¨/?‰G~¸ë?ó-,*ˆBLhŠ ø‰-\DÈ2f%Év©'Œ¾й‘yóöò'cÄ«3¸z¡ésh&@4-©d£ãœYV3!éÜ`l\xfr×/¸>"±æO¹¥K—ªÜ²wdOˆy—íT[[«?ñ×!Štòñú¢¢"Ôãžëíî«9‰/6™›;Órâì…ˆœ|#HÖEéáxpš(¹ñá^$¦£¤SòˆF,ˆtŒHC ÂdiR×ÎO·ìþíϰ˹Ÿ{p)%Q=ô-ÅYqêàØå05œ°eÚ m¦ŒE%™õ¿1‘.Äe‘Ø/F”« g¯ RBÊ€ÄNÌÆú(E¦Í{ñð(føùð32„9Õ\[»Ò¤ˆšB¿ƒ’ÞÁêíøü¿mGÎêhóŸÅ9ôÉy­ˆ˜DŠÊ%Ä­Ži~H½#‰ÜT/×nÇ·ÛAÉf Ö}üa,#sqâý°‘8TKÍÃYz Ÿƒ{†ßZ¢/„‹å4<¦zÌé#Üsxœiö•¹´Ñ° JÆØyã˜B܆ W†(Þ•ý¢`ˆu‘xÓoÀÛ”G +7­G߯¿e l!¡ÔÒÐt‰ÿ¹aÒ½¥LžˆH¤½àßàÔ˜y§Á¹Q¤Ö/^T‚ ¾‰y¢À¨Ôa4Én¨$ýá`™ô1ÞÐŽÇ+=Ĥ3cÅ]¸ïöõ(h}ûöO°cÖ÷0¼`#|a°æ |ëžµ‹V.ãz6ãØÖV\ ‚ƒËˆ´;BÂŽš“1šzq¿¾ì IDATAõn#‘ý%æØÑÒXŽâ+¥È§ÅhnfM®`øyÍÂlúgN ¡“Œë7ÊuÚT”ÞSrðéo=޵=¿Ãßþ Ž…ÜßèaU q†>dcÚe&$y1´2F#Ÿê‡1uÆüè/Eˆ$b䜜aDw¼!Ñ^%È”a‘SWW§Lâ_(Ì£h §Hþ<%©µÒ×Z»×†ÐïççSøÚÏß±ãŒÊ-ã$îªÛŽWß:÷ °ê^î‘„H>Æ9âƾÛy.) ¤ÿ‹ßs•ƒY “ÌI*û2Ò”·W÷^ /”#wý·%’7`¡¹+°bSƒ¼QøKsÄÍ÷û"Žë"K '+ˤIÍÃÆHÈË#ž3×ßÃáþaôž~…gÍØÃÏtDE„¢‘þæ>É8Q8ióiæ*!ªs°CÝÚÙ;Ľ%ñ$©%àÜXü„X5£¯‡‘½ùn¤„x›Â¿3³¬¾Tj/H9Y?…ÿä…ú7¾óÕšsxKf±&žµ#ŒÐh¢i³B_27,ãò FñŒäq0̾0y.ãfûb•©;­›Ò"ÿWH—íò–øÄêð§¹tH|3,¿hä”Ý ÚDIFqu§#YynÀ¦žUå7 (˜Ñ iÏõ2f§ó„)TIˆz­´Ø÷A3zë©/= ï£`à1~?3ðÿ¡¯|¨‘qRuì@­fÊ —©hµH{ÂPJR}–?Ô6õ1ìÅÏ}ó?Õ^üØ¿ýÇCoGxÜDZ ŽLâñSx܆[>ö,dxø\1 1r|ÝY<ö?ÁØ Yœ‘ŠIè÷uš3® HÍ’*«ÖŠWrFJÒþÊ…à:wíîõV™Œ?ê=ÿŒRë)¯Ç¡Þh‹ .é­¼)ò¹Úh-ÔÜÙǨóÔlq^’Â|é7†»fâ…ý²Oåó$ôÏ£eYÏþSµ=dÏ©¬sšUðN’ä±S²#47¿g8Ìú´q[ˆ‹‚éf#t£ô‡‚I 5 £ôùw)_taÍ.‘¢I¸á|Éz~Iš¹ssc0/ÑJÃݸÇ;q1ÊB²Ëúè]á•ô±!‰ŸÅ·ÿm"]ôþæ¿âÅ_¾‚ÌYO˜R]%»©Õ»va<âÈ Ž8>¡ê§°¿§t?®SòÃç-¸ÿk÷bF,­*júiýF!¡ê”à2ê¤ùœ<'^¤5„äØTòØwr¯J{wÃ_鹨Œ$Ä6‰×6ª{wïÛ§TÑxˆ9‘8°KXr €¡¢ êeDJ™’’¢¢¡‰ÔR²ÉÉ'$‰˜3°Ø»oýþMœ/¢y¯,–z¨),B]}3µ~H]rFùíŽç±ïl­luN¬ …ûpôܬ÷ÂÆÛi:Bˆ"wxÎA~“Š'ÔÒûî²; ÷üëxþ½zôS%$ºÓá‡rÜíŒ^šrö3ðŒvLîéwoŠrû]ªøŸ/9·¥ tv>Áƒ–ý‚S˜¿¦Æ9tid4Í¥hŽ~øaEˆ¦é¯Nn¬ c—Zëãåù„jæÚª^„¥ÎÆœÕð©WpþÊÐMÓÞa…ÝôÜL þâçÏÐäŒ2yŠRŽÆvjšËQUtÛÛ(;áa2E!¶þ3éËCóØ(¡HÉTíØpjSÈ“)&$²/]DÞ «‘2m->’=ˆWúŠiZ×ÊïîÔV‰.bœ\pŸQâ1( O•¼£ø J†Œ/ qøÎ®3t<&aG³¥1J({e­¹AÄÄ0Íðô¯ö¡Úù:ªHÃÈ~˜I¢¹FFF)†PÖDÎZ#‚oÈY,ß)Ü"ïä·°°P1‘ ¾_—™p 3M<á›y/îY›>ÎJpÊžV®˜• ²É¼ã¬iò¢Étl’‘©jÀÞ×ÑË-`gIù¶¨|7L&’Ö¼ÈX¼Ÿüä=ø×OLî?þÛÏ4¢BIÿÀ0dLGMsG, ð•Âó!Á5è,ûV6’Ú¿²(‚¼$¿ÓùÒî]ð|ãë?ÄS?ûÓOá'ÿñ½#ñ£¸x¨ˆ¬—ÓËÝt[h¾X„Æ>ÜôÑeá*zý›M4Á #Md²‹¨9¤P“RKu!Z«+ùÝâlD‰ ßvc¢¹¢ËA ÷„•¾#tƒ".u%aFŽ™ŸÄ(Á…S%œ7§jœÚß‚†îOœ™Í€XaHÉ6¡øÐQy}®T–àÔ[¿ÄŽúT¹¢.Mµ9Ü4zJ;? þ*©©º±‡ÎýQ• Nå?¿šÆS¸âÇàiÃüœÛ†–=?GãöŸ0è݃ü6aX™»yÎYHáß79å¶Œj{‘Òøæ< íź+ØùâT’ hæ: ]6Ÿñ*²Å±A˜ÒK}-´@º†£g®_‚x~ô Ñ¥D62q•·È^~¾õ·áþ周ù‡cûÙ&g³ 2>i¹SŒó»‰1IéHM¡/WŒ˜¬@!¿£+ÅKÍR%X–°?òVÎ%9åZË.e¸žÔ^vÓñ„ç!´Ð­kŒ Y-ffÒÜ?ŠŒ31.2¾ K„ïü;â¾û‰çûô|~w˜A¡ˆ{n–übòé2•YÎjüé×o¢´¾ݤå$ è9F%$­(Ä0*k:F-;hÚߎÆ.*'Èl×Ý‹úÁtZ>!-%„ßâf0Eæ)nf€½n‰K2ŒúsûèÇKÁ»-) ¤oâ¦0hÑn¼r'Ο¡¿Ý!üâ?vcÄ}¹¶’ô½¦®Å*Å/³çS€XO÷ŽQƬHÊ@jZâ£èã'R÷I.j{đÌh—†;þù°”¾…7Vàr•IÕ;G¯üÔ›š†¸Hâ³aZkÕ£êL5?“´ §‘†áçÛìŒK"Ë/”ŠWH,ƒÿ0€¡£ŽQÇom6_¿È=pÄ*f­gïÆ¹=XRài7mƒM4rR¾ P…*:´7’@m‡‘„€½|ù²b³ùÁzI"U{ùùóç#‹Ú/‹ÔçѺtZázᙸçÁ xûÔyÝÙ… ´ ¥(ÀI NoW(rææ£ Šÿ±K±ec%®”œÅrWåID:Ôª ˜‘½b9–Ñ¡ÓêMÏ"wM\ÞôÁ  XؔٴÓÄb¦yoǢ™úi„ Ipú»O`ë‹í k!·3µ]´ßž¹ñ>ä2ì+•îäî¾~¡íK}w2—š^w&­˜6fy¨çæ%‰þ‚2·’˜ÁâOgü~4ÑÁ´ƒ¿‘ä`—ïÐ]eTÄü¼i8{êMCUô¸eË–ÁFfP¢bº{ 5¦Ðj±%v©˜ÂPúH†^VŸRL*RÙÕzâaEû¸ã¯î '5¨ŽÞ6~ë%Q4 ð7“Yai]ÖÆjü¡±È_9GïÇNhâÉ:L&B0R+Áš>hI˜uëÝ(~çNïib°“†Û¥j›Á66Üš›Hë˜Ymo2tN¯(„Æe`Ñš•8ýïGpäØLÄ›Nû}G5½µ•¼8q·S«Ë (ï\CìíSYÀÀú׎¼…WH чÐFß—Œ›PGƵ”ïœÙ"Yìn¬.} EP»\ƒ°àMÈŠ¦&Rßó2KÒ7nÕ§ÈXG÷VRϵ—ã+¡ÝËú©zx+Œ{Í$†I,œ={Vègr¨‹ä?%%"å†Pž‰‰ž€¢QljjRšÄ@ Ä—Ós½µ–¤£Ò_-…ï½Ö+A8Ç€2•¾!³±j)÷/3»×NõL 9 „±­a»òŸ€¼bkL6JLC‰”m$J÷`{;ÍTmt¸h¹¼EËDЦÖÁ@ú-ý¡} TªN©w|ÞxEŸ¤ø©óh®ãBù‹%xwÛ‹ˆà¡b¥Vn”Á#ø¹H,¢ "? =±N~;2'Í‚²3{p½ˆîïâ#q^0sÁ,êà¨4„y_ÇåC»ÑP¯Ñ>„ØZpõt9‘¢ S£)ßb¥cw¤¯ ‚#Sæ[†¶Žš–¿I‰ò ¥œ­d«qüÀuį &œÊ d~äŸVV»àµÌqMÒ4F={eà¼Ø“‹5[–sœœWÉ/¹ø+~¢ß•=©@C½äÞ¤E„ÌŸÔå:k–1láü´Å‘$™:— #I^ùq_hÜmÉ:ïøPqÔÚKý©–_ÿ+o¤?Òªêj\¥ðAàÎHQĽ™éZ½^Yk-±zYížÏõF ÓÁ6*IFmà ÅìOÆl¶ÅaÉmw ìç§Qwé"[±‘ _(׳‘AöÓ}¿¾ž×)i÷Áúyɸã¾5xíÂQœk¹Šš³NÑ/ÉFR7at¿‚Ѳ`ºð£^KîZ…(~Þ†®FÝ“*ÓË3VÎÑ0KR`Ã_‚—&«äµUòÞD·ÿàX¤SŽSz|':H/RÀiŽöEáþÃü¼Ñ_OàA%Y?} y§=õ˜E-Ÿ» •W+à>'TÚZKñÉ0 žéõ:¨ñéïa„ÑAotë°ñvò·— NáV¶o…ö³SÍ8ÕL¦›´¼™Â£ZŽQ³HüdQŸÄÞ@àGÃKÚz N5[c5ÓŽ;î]Ž×ÏìÆÎ×ëÈ?~ôùí.¦%Z0àâLÿá¸sùœ$þ)9Íõôq §ºYËoAz<­Él<`ãæãÎçq4_Ëõszž:•'wZóÃ;÷¬+ACÝio—ïÏúfù*ä<á™À¾Ì³6í4¶…cöÝ÷áÚÛWÐ|¡¯4D‘— fΚeË3iÆ(´z~cQÔ®uq“JÐHÙ¼CÔ´Ø0ÿvØAÓáe[”ï®÷ôž.\¶Ñoš@j¶2r>¿w†Èä( ]ÀÎ7ë`îágŠFaé?‹#…³±2?‚V#©èžÁïS“çq”úQ[ÈX ôG‡}YÕ3®oøbÉÄÃD’<·M$ZÔrT×Ô(¢ur²²2íòÅž˜øG-Z´«W¯V@3©mwq—‹æþÑXöÉ-héÿ!vœ=©¤†^'ÂŽXhÎòí§O 6ß¾ÃUÏ£ˆRá=¥ {™“1Ý*,»u%2H,’HµøHBøÇDú„˜³Ä (Ú…5«r±÷‰ch黬 K ¡QÅ8w¤ÎÝ' _¾ðcÈ çÂó0û “9’¾E$îM”»èƒáEsÐFšºtó †À3 Ó#¿®¶ .VæCÑ$ÎÖ­[§´ƒ¶g²£ÉtE‘h°Ñ71ÐÊõ¤¦$8!Ž’2wnÉ…ÂcÂÀ± °¢¾ð$.÷ ‚~Ðj̺ó!Ú\3€ó)iãó# ›LŒò…Œåktø(=S'Û‚"‘›O)‘ø%ÒŒ$0’ÚÝ-Xpé+xó|{¹øïø F|K§Ù€DÆ `½$<$˜¿Ü‚”yø-½c(d¬÷¬ØpD¥Î@rÔI šž3[ y)d€ÎPú±&ñÔEÆXpáø¯q’0¡F}v"¸`a‡‰3ù[½÷nºŠ?}íÕƒ¨íºQžèÔsþû×ò1yaòdH1ù’@K ʿäädÅø‰Ä_̼$Ô¼˜ƒ a.ZB èáÅ=ù¾‰kk&E“±ªŠ‹h½ÎyÑ3qîfÎÌ ¹¤bâ"TâCAIn"lÄtKàÉ‹0‡Q‹¤}Ã0­ ‡ÞÝ‹½WãH”†²o±°\9ÇÀ2÷ÑŒ&¸$Dc¦È×ʘX±Ü‡ÄÇÙñRõŽ?ÜÜASfÀ—‚õ?ÃÏ·ÂeúZÈ¡à¤Öh¤' ›³CèŸ`BOçá73؇+ïÑ/¨ÓÕÖ,¬ß<=ÅD¼6¬Vú/Bz"µEgÃac0¦ìéÔhZ¸wZÑDI{V\B’â1J³p«¾§MD®‘©s‘šT‚¡’jØÙˆ¸‚%Œ fF÷H/J.ó£ÞkBL¸ôˆ”“ÿyº‡sNƒxP‹FVHê„é‹tf/šøMÖƒ­N̹{7EÐ_6rò3Éw"Ãù]'MŒtâÀÄ}aGl2…;Ìã°¦à–e ‘XTˆŸüq?j.hFÞ æ1Òc¾ªéÃûSRR¢L ÅoÍHÂXÈ?ñISÌš¸à_A4÷“>Ip·™s%¾£ÊMH ð¸^ÄK D9“ñónÇŒLZšÐÊ ®¦‡øwþ?öÎ@Ϋº÷gfvf{‘´ê²Š‹,Ë.¸bƒml¶Áôbà-í‘HH€$äQCsž!æAh¡ wƒ{¯²¬Þ»´Ú¾;õýþ·|ó­´+$˜¹ÒÎÌwï¹§ÝsÏí÷;fჶnÇ6»éÆMõòÄ45í³ìsÚÙÿëÅLD^a>±†tN*ñzˆŽS_mÇVD3aг`.«}œ3žÉ„Ò©5&µÊvÛ§n°M{_ió{Û¬™ûìYÍÜû3[˜Ì³ãaIûYvþÑMÖËê\¶È…*töj\1mù:ί½ÅYµÃÆØÆ£á¯*H×Ò­¶ë>Á? šDÒ6RùmÕÖP U>òéY´jƒÛÚèÐáš*È&{9‡žc]“°’sÖògs¡ ç%çãÛ¬6ýˆÅÃöÈÃ?³ë¯ÞÌá9\vÁ….¬J®zp—/áÌøËžgÅï^o{Ÿ¸Õn¾v¥³‡ žÃjÐl[ÒïSÝc‹i¾m “t³ì5/xÜÞõémËÎs˜”ÑMÙçÛK/|À>òÍŸÚþA­„r<%{¼ôÒç1ÑÃÄ,[ÜïŠvVe²’D{*óí‚c9ÍËå+ClSUN;úT›uDŸumÙh}Ü5‹Uˆ,‡°Æ¹¹ï–»m°ÖiÚ%\0òܳ¸”Õ —üEv•ì~ìF[µSû'Û¢eóí²—h­´KšT;8ä[{éÀ鈴Ön¿±ÞhgÇJÏi/±³Ø½bígX±]Åå4¸¶è˜ ÞnÇœ¼ÌžÅ…e.‹:êÜWÚ¾e°ŠyýÃëmæ+^Ë…· 7{y4Ø"4ãOfŒq뮯4ÄàOèKtÐv³øâB†s°=3KÈËÊ‹ú•}ja'°Æ=°U¾P¥?SµÎ% ܪ ±G^õÅN'ןö/ââ¯å+8FGxàjV޶‘/la›ŒS„ÈЉ6J×|¾.¿¶d»7=`Öî·žÓOů¶1üøõ‡øñ]VáÆÜ3_s)»Ø¾„M¯¶›6?ìl´õ¤—sL©ÕfPd£ãµQËMg5¸g6ºÕÙèæ½/±™³yýÈœØè£ÎFÀF9ðÄ1‹vü¥çÙ ^†×Éû²gÑ—*¸FOVH`Zþó¤Ü4yâZ:çðšúT¬ZjT˜e•¹w>w pý}3; ºç`çŸÀ©;¯·'š°îislÅñ íÎûVrƒ'Ç›Úy‡*ý¬Yleô«ÖøhfÕ7k†æ¯²ìõRõ±ÁýÜãàí²ÆdZÉç®–&vc1ÉF†Î|TíœE9{à.íQ!O¿Q ¶bÊW¹?W§}º UlX¯{Îë^Ê+eþšW–Ýä^¥ål‰<³fŸ Mæ*0HzÕ%gÛ{¯¸ÚÖ²²ªWÜä³Kí÷ßx¶-œA;Ã$E© ˜‹Ÿeÿ÷›ìž÷L€9žI)½r­Ü³Â^uÙöíï}Ÿil‡íXl—qëôÝ›8’ävõÐO¢Ÿ%_ÔÁ«öÔ|5q–oñ³ß`gп½õ±µvó½ji8¿â™‹°]í?˜Ôw.t,´GÌ _nl¼Òr”]zéévã§nd×ÊQ¶¯|º­Þ‡Þeß¼míЖ ‡·ƒ~ór޻Ď.?ÃZïºÎ®».kËOå>úBó:µUít.DZxÌq({¿ýðÊŸØÍ›2\zÇmÜܤ=ovFíL5°ÆÖñZMÛJú¹€Á­KjÛ»ßÞt›|-Ûø†™˜ec/¼Øþm]ŸùÌgÙö¦m)šÍÔ·¬·½ýíî`»G‡‚„›ª‰#ãfÐõ ®²tN;»:Ù5v€‘?˨lqËóΙ ­È h‡Íy§üÎN<Ûéüšòi±Þk3Ì¡Ì3Àhf`Ú–+r½ÿ 3ht¬˜¡h¦Áïnã6Án²D¹ºiDƒ¥ X½·låi7ó&¦û^†«>]Je.º._ødÄ Âà®ÄùÆyÇãc6FǺmù)öàY+ì{Üm7Ýp½ƒ‹ê´h€}ÔÑö–·¾Å–ª‘çLˆ»5*¥‘çgñ $GÚËL´8Ư˜¡óÉ™¶V•XÂb{K&ÛjÓqˆ–ØÇÐñ8•°w¦ÊÁo™©pIVÚ p#&çþÔùž1£Ý†úx{Bk8½#‡qKÒâgo¡“Î>"[Ù7¯-\rêº2£=ü Ø•¯•©ýûXææ›ˆ°½×òd™)ë¡27±5t”¸qÎ}u2`ã»GªÁf•«·G¨¨:ëQ¥s“m*Ø´ž<Û%ÇYyQ£ˆÑŒ‹v«²jÕ $º`_˜]l/Î2£;>Âö¶«¼}YK¹*-_pɹ†`áË¥ÆßŒ[ÙÐpÔCÉîØá·€._¾Ü½WL«êZq¿ûî»Ýà0Þ$¨mØK–°‚ߺ0B{ÊÛãe~ÓéXeç¶Y‰~ µ1΢j³]–sh­\_¾‡³ Ÿð IDAT,”ƒ{½¯oz'Ûy÷px=ævLëæJÀ}l«†1­Z™¤ÜèŠ Ëlè‹‹=tiS©ÖF‡“Sq-UÊr€Î ƒ{V$ZY¯ŽrÉ1ñ,ǯõ ^´•§ÁcŽ+´yu /›Ýoû×ü̾ø‰OصkƬ0‡›ÚžqŽýÅ»_gÇr.¨EWjð¬ó=º´B¯ºÑÅxW›ÞÎ{M±qù“6:º°‚E xÕ%%*ÿiº»ûÿ¸ÇŠÛ.º93¤•öÛð Uä*²Õ 7Äö/®ÖîE÷£<«Îè’:ª3\®5}vFY+©ÝlÜ×,°ŽW‡ñÞmÓqºÜ*Kçu_?Œ¯‡óºqä@?$«±Í]ÖÆ€£…ýýûПÔÔÄÀFNât.ÈÙ1v®sEƒ”IRü¾è½QH1aé4ü­3šhˆÁáˆ.V¹2¼£ï¯íAμ­Y; 4àpvˆ®ÒaÙ±ËXíu#;0(­æÏ²ÍêüóÏO@áœß@8ŸÓá:vã£lÑgk£¼¤‹‰ `˜$Ã-¸òÔû`å Ú¸ä¨Ì«CTnºh¤HÇ£ÖÄ Ÿº^e‹’Þµ™£#•Ãne&ø­Žöñré”ÑÝLˆt3!0FÜG²Àô0¡0!h¦šÕå}ƒø:Úz]M‰3·ƒø6]ŽÑÊΉ6V°šh÷ñ¾«2[JȉÓ<â×õ£6·zœöñ¾ ñiúTl<Ó%;VPYía÷ˆ¶‘÷õõÙi\6¦—ÒÇ[½uVPFš8ÚÈ ¯„dzÅWï§u;Cƒ¶HÅÿÆò°Iêw"'òãGœ¼îG¾·]X©of‚gŒúÕNÇÏíAWcû8Ûü«”E;CunƨY:IÙqÚrÊr¤Âj—êÞus9Ú’ÛŒïíÆ÷ê„EW¯¨Ÿ]ÜöÌܵfÌ+œnf5`D[e«ðÁMÛÝìC“ïÒ?ùzíäéÖΛot~ŽË„¢œÈ(úíø 6.Šéßk~çìkƒ—ÓI{†}ð ¸Y– yÏj…£EøWßbdË7ì?®6[»ûû§+^ÀÊç™URb²v‹åí\$%‹VlÛã¨Lª%è–óqÚÀ1Ù¢tLÛ¯v•IðÖm+ÒfÒ z*0ñ•g ßAYwÿÌÞöšw[7¯´9ñeo´ç-Á¯vò¾FnЭÛÇ…7ªG=:WÌŠóCÔ•&çO¸“›U”¾¾!8÷6ä9’mÕÍ3¦åB²Z”êÔˆ5g1ÁŽ“<ÄÏ]õ»6ãÄß³î“^Ï„ÚiGÁêOm®2ºÑ¢âÔùÇ™ræ{²ÐºüÛÏM¶ûŽ+mÿ¹ßÄì=ï/ /¿É–yÙèø uQýMFóê³6F9&1ä/:´J—øql”Á][)áË]ÿ€~Jeî|4~=×̤ü˜l’3àØ[×4ÎŽ¨ÝÖ˜ä+0ÓKßK#ܦ«6ý¶vNà Œr&ùèÃä9ç>ÊÄ­lTü´Ò/ÞÏû±¥„ÕMª¢3ƒ6öåQÂN•¯…•Èö 7 bCòOôfÎìàU<à—£äë¦^GÆ©{\¬Â@QgCÇÕŸ¢¯bκÒѪ¶¬v·‰¾ÙtîàÚ·Ûµïñ¾‡Tq:õ;¾bAèA•DzÖcÒ7×o%bÛýÃG›ì¾­Íöõ?¦Ÿ@Ø-ê6ï…Ó}ðð½ï»ÆN ‘ð¯ûð^¿T¶YÜþzÅÛ¦»<ò]‚êñqì"ðáé&Ф¯¯~`oXäÞŠr•ËæZ:™0n²aêe}é -œKíbBµè_ïñ.qtf¹æÛhi×; é×s,Ku^0ElªHXm‹Ø(Ó6ôö²; 7Þ¿Ýîýâÿ²øé+ì‚çŸd¯âöQínëUY øÛž‹Ø…ʹ“ ÿ,»,´È#û¡»ÄMÚ”9ü©VHƒA•ÀgYŒšaÞ_ŒŸŒúôyú óT8ó§:=™c‡—¶¯†.^õ›û¸¬)G?ˆ‰²Žývv?jAG}Ù*>­Äª´.[Ò¶(µ{ìêè/q ¶ÔB=C]Ø·Ÿ7œàÞRswN: ŒN!ȾÀ `þw¿o5nîÓŠÖèK/³¿[Å;t¾õ-×ø8¢‹×è ]œ¡ÛÎ^ðüçãܹþ]Û jà$lüíð™²Tu t»RtÖ)ΟŠà’âb ¡÷­ .º •¢†j…AͶþ»tU€t>œœ†¶£ ’DÔÉ•ÜbGA9óØÀ¸Tbð‰>Ÿà‚wM ‘>H΃ŸÓào¢°ª«×Ûè•_²–9ómã…gÚëVÙ]wÜF'wˆ¦Ù–,YÂÛ+x¹//¬§3¦³ƒM jåô"mñéKn¼ b%«VHVˆnä ´ò+§í¢¤'2hÛ;X+£Ù}ýSp[ÿóø5óO*À4tBGOo¥ÃX„+‹# êqñjhÝM‰›Év¯pY·*FE¥òž»|Ä”úÐÀðˆ. ^0¾Ä³¾kr'ÑE´'xÖ -X(oÉõé‹×K%ûð€þÙ!&"–¯Oœø9!OÈ&}¨A×Çþ>nie ¨mÁÚª?ÍôkÅP«‡j›©VZÜ,¿V4#/u1£b%cäÐéñDÙ‡* :æ^¼íâ%;åíÖ'™Uoë[8B¹E½¸EK੺Y>OM6#篺®49<©kàƒË§ŸÒ¯ÒW&Ôgã"(KÎPl^y3b4n6×.`_pn[+€•ØÞó©ºK¹C×E Æýã 8•¿üG=fàIY]'LÐäï ¡: ÿD9ûÄð3Z=ù<¤£å_ƒ!JèèˆG:ÿ"žIÅÛHDããT>dpç„Eƒ8ý‰u7¸ ÍÇÞÄ…Ê[çsT$»ê¨gC”áC/Ž‚’E_?“ ¥ êðºßDäßtãMöå/É]l¤•ꃃì"út¥é·Vº—/?޳4dáC>t¢òó~Ce—“…6×h&d¤kÙ " )EåÈS¥ÑŒ“fÞ'(9䂯V}q¾‡<ÎI÷äueCaéüŒ¶cûAe£àŸœHõß>IZw¿’²â1M[‰±~',‘«J§zœ9ÏØ·×¿òIþBv¤Î•våh"Vm±&VÛ¹¬A«‡é ¼Â™ø,GEëB$ùY'zum%qe:ËÑFý`Ušözïê€I½²WW凌«ÎÂë'ιzˆ; ÚêMš«>FV¯CéH—L)¿b„C~>j]±îf쓚‡÷m=º£¼Wç5ö•=ÿËÚçœbù¶+Ò!bwSÇÿ‘M_å½¶%{tóBûÐg.d V(I¥#‹£i¤h•§|“€ø<ûx÷ClÀ›.ÅÊÀ»R¥Gdê×8Bä–Ÿ~Âþù;+m##v'sKᨵ.ƒ½è“ìEçÍ6SeTkåÛT™žhÉ~]ßHöɳÊVl«PD!¨îEÅÙ$Ó6êØV“´*_JÊN›ŽEÉ7r¾,Un‚×Õ‡ÃÁcÀï2¹öÞ|tBÿˆz[÷K³|'¶àxwd&|$eíŒ!9*$Dh@xÏ–fûç·0áFp“<ìðÉs<«Ë{4 T½:n®Ÿl{Û?m²Ý¼À¾—3xyC·Ã"¹ÄŠ~GRúå“ügl·ÔS‹|&}žœ¬‘?Š`¼xäGnïë¥OŸWþÉ™@У¢~ŒCIŒË„>÷gÿh3«¨KÅtî¿2Ôc¯üÛ÷Û3™iÇðÎ4áÒ4ºDZ¢¼Ä9ÿ'=‹”ó´$*óÀŸ‹Ž|¨00Æßê;>ÈælH^PŽXmŒhÉ®Eˆ HýÖ޲|ÕçsA§¤(Þ´Ñ\v!f€qc†À‡£Et6ÿn¬îbRžpçäÓ5J"h§È–¨¾ÓN±®ÁAwÓW•)ÍLjR³‘ jÔ0uÖYnk‹fµÂŒz*0«Š$½|!‘«’‘à¾PR•¹{FX989ÖºF]’+u=œsêVÍ”/僅Í0P‰~eß5¶ q Âš^ô|Vàr6÷˜%v«y³Ù¢ËdÔ˜ëæ¼yó9+2‡ÊÆiä¶ÿÔ“±y£ *Dwþ9Ñ?+žt¤Ž½ô zN±"ê~‹j=]ObHÿô>7³n”óÛkX Q·¢íp‚ÖU'L œøˆòÅNRìTd€ñBØ#…(›‹Qy"¢Øéñ‡“ÝUTñD:öEµIÉ."¿ú A_3SUxÕàO騗~TöÚ^ª3¢¾ÜŸ O^ZW–ÜkÅÇúOù é&¨À7ö(Çë4h‘p:å­²SgTNסœAâËÊ×%?°÷É~r& hCÆC¾üÄŒ–x”ÍiE²Ç½+¬{ŒÕ¶ÿ‰²Ä:>ô­?…ˆß•»ò‡ä˜.`TÎþ¢ð.3’W³äªà1øííL`Înô£N yÈèçÜd”Ôô&hu6Ä>}^éZzqþ¾58ñ0!*ð[¡#‚D€û:¥Ô(®‡üå}Š/;ÓëOÔéÑj ìR¾\6ª‡&¤´ ÄOLù³s‚›9“[-Ù~+»ÕåIšÌÒVUÉCô!RE”(øT©J!–ŸŠÎ[[P +s0ûÉ€{Rƒø5t*Ÿã|¿ñ8byÒQ†®›ðHê8ñ8(‡+a]I¿â ݶ°z¤ÎÚI•‘|†d’üzÖ`P·z«¼ÿÔ:‰*2xíINp'rŠŽ¯J'Õ} ZÖéògêÝ© wþ=@)ÝסP6Še§´„*õoœÄˆfxäG¨;I„§[÷w.[ê.][ONPÎZq¹]8Ìm—m]¾‚O&/‹'S˜qŠöì²-:Àù&l-;yœðÂ\g?EG²ÈªHD_q|xŸ*Á˜'ÝHUéÐÛçž;ÏŽ`µ•Ý¥á[°ât;v1[ÊÄ+Àò'®J•‹ûƒõ*Ĉ=%¯išÿÕß’sdóO½ÿ”L¢«oþ9;@xéX^SSÙpËÔ‰NN?!Ñ)z×ë¨|}øó÷ª²® rI®dºq>:”±¥ÃY÷óJ—<ªžy'›àUꄲ®,vñçû‘Ã@”xá b¾X– qιGE¨Ÿ+Ú^¯¢¯X5“޾UA©™ÄüŽØ&ÅX ©á˜Y*÷K|åzv ÔJO¢§-¡»´&îð>h˜Ðcçåíä#ÙÍ¡¬.üp²ÄçI¿%(ÚPýq`ä}½ÿ?ÝÄ?H…_þ\uìæ‚ 8¿éü”'A.û›i§_r ïcݱÝÒΑ4¶¥ÏdEŒî¯^V<º ¢þ­hؓɖÔg—_™ÈZˆJü*r$uÜ1€Ž†lC}fÏ“Cøó@`Ô“Ð01 é)ú\ú¤[Fµ95¡á~Ç 7óîû­E{ì—,²G-¶›o¹ÅF¹1Q¡ÌÌÀŽÛíâ‹/áò Î œÑ:Ž…SLPHŠLJ*΄ºh'd`1€zÅòŽŽ%âÂWP ˜ÈTï@øÔ¨Ü ¤ðêàÂm¯è@žˆ˜?Å Z—#-wÕw4ÄWˆIɭΊþÚ¸Ù43»—3l `Z"·øJås³ÿòùª‰ DÝ!xÝ{Êû€ÐÑ~¢½¬Êÿö°IG‹$'}d&ÈÆyðq Ž@R¹’òŽñ‹âÅ|± "©º€øpr{Â9…ÜIþºÜ ]~¨¼þ ±Y/õYgÌE8ÛŽ‚z榖[9Ò)äùz:ðñJ®_¨byëwP v\´Ó_Ì¢ëøH”[9Câdr×ËÏ#L`Òø“ߤÊ’ó@#͇B¿jã3h+«¶g¹ÂtlLÛK||DÚ‰rGÝEà˜—çˆ'ê(IËä'#¿9B¼äŽöžØy@Hyöôà­ÓNëÍÁGzÊðD˜ÈoäS ±ŒôÛ‡@5â8Â;õòñðËeqé7ÅŒ³òô1niÔ¥V9VRgó>³[¿gs¾LƒÀNVý¦sˆ}çêfL×`Ïoòæ–Ò‘°vÿŸ“„„ÓäË;~£îóSÊíh‚Àÿ×S¢üŠ8¸~'\ˆ‡”OSyO0]åyRÖB&~ïuûPIQ¯ˆ~ärzž'¯ß’;±OØç<WÆ‘^Zî¤ðI—ã!ê¹#-þ¹Ã³ûšLî@Ø©‘ rG™…$%wlg•)íÓ&Êb@xDˆéù'ÿ™Ðv#¼MÜ  W#i…©è®¿ö‚Fq%·¶fËçèµ(:R‘”kDÝ qʉDî1¡¤r§xVJqÉqë¡&!Æ5Ñvh¶œi;í·¿:ôOB2ÐŒì)>ê2±Á˜”«~<ÑÎUÆé0™'rGš›¾îCî :[%›r«½ ZmÎ0rã©ÇS™(wRÞ$VšgØÐ‰ïSä'dœDÈ÷drbç±¼6#šä÷…ˆtܺ?ý“/ï³—5Û³ŽãVùÄèAÀ½£æì3¢ Ñ’;-¿Ê; Jq29…LAn‡Tñ„)å&Ííz`b³ƒã £lå®hO)m­LÎs‰<ì–Á‘xºÏã<¥ºG>E?¶aâc‚Ü?á|TS”[yÅ¡‚ÃGBÔX"·KõIù¥äŽKr‚Hï´íâBzËèᄎDŠÃC ––Äó#M\¿]eó¼úÏta)F™CÁFIÝ£³‚ÅÁ9u­$_(0P'S4º´ò]V& «ãѯ˜=‚ˆvÄé ‰HËçà‚sÊw¾¸"šº“ äd쾎0$ÖEHŠvâ*‰°Pÿå3$ºB¤åŽi^ÁäI‚ÂËêYž ·”…¿ Fäv‰Aˆ:û«p®’½Î‘øòO±¼E9v"bšçÞ%¸Ì>±AnÏ­ÿLœÌÁrOJ0¸XÑI,cbé>")oåpÊTžƒ‚cÈÇ $–“~véí4D¤åŽ½Üƒ²z<‘Xˆ®ã ÐN@"îP®ÂãIäˆ6a’ÌÀ%¿Svîø}3õr¥åNŒÝ㉨¹SèJ}  rG<AJîH[8SÙÝøgâ“üþ÷drG%vžðáÐú‘Hô™’;¥7ÇE¤çpy£ME~£(JŒõÛº‡%Åw¢–ÄÎ#€\‡/ðæØT|šG)”,žÒ¿DäKt¤HOÑ=בv”IèHf0¸Æ"ŒhN¨ß餼’;Òõˆ|b}(hë|˜€7ÊíòðIåiú’®ëø\NWÞ 5E%Êä!”‘ä¦ëdº‰™}ÆDG‰Ü‰Š u¼bÈ?MUçó`;¡b^‡$ïÊ+À„háK˯òŽü¤mÇ•q"¨—Ûa …“è&ÉLj‡§"¨.м¶VÖÇ‘!;Aî€ÇÕ厤”#Í«— à‹Æ°àÓõ*þ&ÖuøBY+Âqó;Ñ™ˆW0Qci¤„É–Ñ:ÃB⟒ ÜDA"S.àä ´“C”'Ê}DlÃ"WN¸Rü˜PµÒ´•@Ht#zÖGDåSœ ñgüùÏ1‹¾'0‘NPš'æ>…s‚ΕշÝ1:Ê-,ÚneU$A2ñR|ú´Xÿ½Ü!G=ApˆÜ+ÀO"wUOôÄê uÔ>TÓö¾*FG¹' J²ø6`H›Lî89!æ/·¨E‹š@ÙÉíé¡x¨‹WÿupY¶ê°QÀDçih²C@gRcP–„«aØCM0±„ES¯‹”GÈ=Aîq‹•hÃB££9ÐÐ@C 44ÐÐ@C 44ÐÐ@C 44ð[¨d…0™åb™Œ"ãºT$ˆúè4ΞùD¯¸$îÐè9 ŽYÁkäãž„¸Ç&2ÁÆhÁD¼‘ÓúÒhÈ£¯H+AîùŠ£a—`$»À£DÄÄß ž:´Ë›ä.òçä+![‚ßaIrxZéDÅy¤"ÞÈ©Ûàs'l&°Ž¨ð8DÊéN¿¢åçÀ<À¹ï£LЉ$"ÂÈ_„‰r 6Òœ0S’Ä'„èdr'8Å&Eˆl*"±+NÉwÑä ™ýW¤"•Ï„çºRøåãbþˆWrG>’´ƒÑ*w$ᘕ¨ȉ ŽCí<Ҍ E¬«.‰Ü“]ù9ÇfÌS´#SQþyÿõ\þ×dr'tAqHývhE+àçgZî´ I®X·eˆy§’Ûaøcž r{Ö|Å‚©Ç¨´’§)åùÜב<Ÿ< ª:P´q%¹² Šô¿é/Êñ8¹m”â€~Sy"ˆ΃&rMfçOÌ«,“ùr!‰ú$]Y»x"<‘h—ÿ¡Ï£< _”3‘[iB?D.F8êiàD$ØÉÊçb.¯”ÈKÌå‘„]ºÏR§›f+Ä&s×"à…K!ÒòO^îIí€È¥#+ßi¹£Oo>t_ †@NÏu†&òâããlþ!r‹ŸzV‡¯®¯„¸Ç?‘Œƒu<§ìU¸¢Ü)–œ ˆyÒ3ß.:òåéÈ H‡:CeLÄCùFZ±š„á•€ƒI‘ÑŸË‹7s¡|.w ]SiI"‘¢7™ÜäÄGD(K,k‘ú˜%=S­¸@Z`Éï 2»„›¯(W’/*9DLfç“Ëí3D;rÇY)ÉíãšDÞ_„q9Tˆ›TîhÑÎ#ÍÇñÊïÁ=PŸ€¸ˆ øDŽ>+ ŸAn—%õ"ò@bâ‡"LÀñÄoE×Ë|¹`bçר)ÒåšDîD€bY{z|F%(‚0¹Ü‘KÏWà.É;™O&wÄ":éßõ'0+!HÛqÝvƒøÈO‚3è &È-ºuàÚG;$“‹â#Âøx}bç$Àz¹C$_>Ç—€òX/k¥£ é9)k¨¼Ö”(îI)½D#Í r”A_Þ©<“Úy”;À‰f,+•Û„ú5 òE¹Ï­‰8 ˆªÊ3šÈ• "&‰L3^ÿåö1àüdrוŒ´€"~Òù¸´ÜIV¡V¶POœb"|à9Úqd<Ê-þbZÄM± ÊÀPäKCHGM;¼¥åšX¿=—¢ =?‚Ü$Ǹ]Š•X$é¶[£Ý:Ù‚Ègô i¹#¼ÊIpušubIœ|Q7.¨$Ax|®åŒK‚„²Šr+kÔ],Æ„žûá1$i!1-wÔ…¨N%w‚±åNðJ.oLNÉRÏ“D¹‘_ ‚.¦”à˜õñ8¢.a&‘;2ëØÛ>x=¤#M"wHª×™ºOV^7 ¼õÖ[mݺuw㻡†hh ¡†hh ¡†hhàiª½ã¼»»ËIç„ó-1kn{šŠÛ«¡†¦Ò@W{«u·O]÷‹EÞŒg½¦BÒˆoh ¡†hh ¡†øÒ@w·{¡ãÙ >îx›µðÈß(!Ì64ÐÐÀ] fͰ#ø›*ŒŒŒX…—±6BC 44ÐÐ@C 44ÐÐÀÓGmmõÆk'ž>åÚ¤¡†hh ¡†hh ¡†ø¹4Ðþ\êj74ÐÐ@C 44ÐÐ@C 44ÐÐ@CO 4„OŸ²lHÒÐ@C 44ÐÐ@C 44ÐÐ@C ü\h .u5€hh ¡†hh ¡†hh ¡§§OY6$ih ¡†hh ¡†hh ¡†~. 4„?—ºÀ 44ÐÐ@C 44ÐÐ@C 44ÐÐÀÓGáÓ§,’44ÐÐ@C 44ÐÐ@C 44ÐÐ@C?—Ü{®O`^´]“ÉGÆ2žÝÇ/WÀZ¥júvÛÀεV;`ÕÒejeOD/üÎäÌ ]Ö1{¹õÌœc…æØ`œNZ¹R¶ñá~>°ËFöouùÍå%_µf•jÆrm3¬mú|ëê]hí¼`ò—(C¥\¶Rq̆ ?¸k½UF÷ÃWÅ2Nqkêœi³¶–öiÖÚÞ i)ñ©½ì¼Æûí†öÛŽuš•­©6nÕjÕ2Í]Ö>ë(kíœn]Óf[6ûË·í*º:`û·­¶òðÔ:ŽNËVËå­uÆbkï™cm]3­¹¥õ© Ô€jh ¡†hh ¡†hhà7P¿…B ª ¦ªŒ¹ø£Ð4ŽÉårî§_b1B«Z±}»wØž•7Zu`“eÆvCaÌÓ(W­š)X©e®õ¬h²Bg§5š-—a°Ä ¥8>jûwÙþÍÙà¦;¬F~cÐÄÈÅjð?:ÆÐlÚqÖ{Ô–cÕÆ€ð—Æ=ôKÅqì³½[VÚ'n°êàzHÝ€°VÉZmú2›³âëžu¤µ´v1¶}êÔ%_¹\²ûvÚ†û¯±üø6kµ~+—ªVlY`3–?ßzæ.e`6Ó²üûe†ªt[)Ùàþ¶ýñ[¬²ï1³b?ƒÒqϵ[û‘çÛÌÅ'[®ÐÞþ2ŸÂ500àÿ©(÷³©©É:::Žžü™Ê;6’±bɨϾ·uÖ,WK,1À”Ão͵´¶×,Ç·þÒªjã£àã»­ÃÃd#¢€‡9+‘žƒFÄó¤0ÌùD „ÉAk<Òš&ÍÛØ ^ƒª/Z{økª=¥¹ŸZ%cå¢YÿøF]³jL:Á?ú'ÕÍé$ }'ò<¯S-¢ÉÍŸæ¯ÚÚjeÆ…:<Ò™t+<-¤7ñû`R‘²‹‘‰6m© ª3²ÿ©BY4w˜Ïè·Yg“±®Jf2‹N|±N´vbÓÝø °hÑôY ZíÁÐäz}†¾«ž3W;ePý(´Àg€`’yHêö£ºÕBšì=°â  -Õ™§âߢܑV²iÊc߇³8e'YÑƦàYv•E¯®|à7úƒ)ÿ%Ô˜WY S÷ ~:ñÿ4!ô÷+"ý?-e"ß߇MÃóÙÖ݃Ÿú5r}+]B£h-¹ÉýäÁ¾¡&é†9¿ÙD ÿúKžûÿ5jáéAŠªóÛjxj Bm°ˆ8¿Ú•£gØÙÝÉ_Î$O£ÿË©NnPGo¬8´×ŽïÙfGô®·¹ù퉷ª1.ìçï¡ÛlKÿV;ŽÞ½=÷Q›¿hŽW€Æ §¼ey+sè„Åê½ýÕCxdI‰s( Î|’ZX¬–l¤·Øbíƒ÷ÛI³´#–ì¢c¦4 .¥Û÷ Ñ€/µ‘Ö6:𠬥§™¢•ðXÒºl䯂|åò¨l³Sfo±Å­›m^K?Ž-kwƒ{¨Âà°8Jã]aЛÏãSþá‹’`1xÓD€åáݶ¤yƒ³hµufœsZÙ—±Æ`°:FÃ^žtÐ2‘¸_]N㟘þÛó¤²-Ñí”Aåá¾øÅ/Z?½ÐT¨PçÏŸo¯~õ«iØ;[Y¨¶â@Î~ú{hMÖv «£evéGlÅ‘Uël˜}Mvû›íñõYÛ;JdžÎß³/±Å ª6‡†Su6û¶çìŽÿl³{<½ò‡mþ´ªõÐÙ­QðüìûͶaSÖVÃöÌ#ÌžûÒa[4§j³Ú}GpdÏä0 g×l6ËØiŠ´îùA›ÝCCþ²· Û‚é5ëVkÒµ{þ½Õn¹-kQße· —”¬Ý“…Á­yÛü`Á>ò¡ŒmxÂìßµϨZïÞ¬}ÿ_[mà ñîy5;ïâ;5«CšªÇ»l³ÇÖfì‰ËO3{Õ놬K´KÏþk›­Þ‹ÐmÏüš=ûàA¿mtNÒx<¯t~dí'j³>n¶ä²÷Š’=k^yÊò\2¿js»üýÉä<=k?½]?ž±t@¤¿‹*Z9Æ7·ÚËÙVüÑ’‹köÎWYKáÐÅͶꡜ}æÿfmáe5{ã‹G± Œæ OÉÞRå3™"ƒ„‡¯é°µ”ïd5RƒVtzÞ‹‡m”òý×Ogmå#f{ö™}à¾!;qfÍ:n·›µu?-ؽWçíºf/ú»q;iÙf´NÆAŠkL©lé[”“l2mK',¬Ú†;Zm÷.Ê‚Áæ¤[;ó‚Q›AµÝY»ök­öè.ð2@ï]PsuuioÍšéñs¶åÎVûø‡3¶z¥Ùyo+ÚKÿ¢h'0SÝÜD}yà΂­f ñÊ·ÙL •öåì‘›áMÉxUozàc×a`f`g^8jÝ´ã*ÂÁmÛ¹º`×=˜±Ât³‹.µ¹À´¨‘! £ú÷¼z˜fFÇcOÑ¿ •äÞxw«£%<'>³hË––ì‰kÚmýv¢‘«Ëç½pØZ³vÃ×[lãÆ¬m×váU×Í£~N«ûÒÈ÷¯ã»Ü—·'îÎÛ•ÿšµæefoùý;jIÅ(†ßŠàº2›Ú죕µ=TÏÒš½ÿÃL®ÿº…uߺòü?+Ú…‹'ó­‡ú†³Z›lÏêŒ]ñï7ßùÊaë¡l„ÿ> üV +´´Ã£Ã¶aÝZÛ¼a+c sLç.9æ;vùqÌæèÄÚoCû¶Ú’Ìk+ Ø8ºØMg¼¯ÄJ)½Ô,ÓFU–&ÆkE—Ç)NôŸBР^å˜e°Ÿ¡'.9sYûüÖ ³Ì@}txŸ ïÛnÛ«ãV@ÃtJ6§[uÚtkÉt Æ&x‚6‰Nðaá§µô¸ùvø¡Çð/kB!Eí7âçÐPÙ¾ñ•µSòZ*﵃·ºôÓO?Ýš›™@H…{ï½×î¹çûÔ§>e·Ýv›uw³z²PcV¸?kŸ9‡AØ]¡xdy?üj›½ëªQ;•AÏ‘c9{ÿù-v;0û€qs%}õŸÛìR:—ÿQÑN¤S8°¶Í¸øbDò² ƒÒŒõ0HÝ›·÷‚çnð´4‡òµOµÛ«ÿ´h¯ÿ“¢-*6Ù{Ï›æ5¤¿Ž†r‘&sR´Þ÷hïÄ—f¬‹™íî‰êp°ñcÇÍVþÌlÕL³e{²vîBRž´×“³¾•µ}4cwá/´ú38’·U?5»þÍMvõÍ™5:ÑÄÑ×>Ýnïþ1;Nüt:Ç>4Ù•ïÈØ½›©ä×JÝ ^ƼucÓ ‚^_°›ñã€kð'Û¿êÚí/¿É ë”R O”„q$æõ_4»ŸÎ÷ðâŒ} gE:ÝS•çeo/ÚkßY´ãÑÑ/¶|Ëì±MfOôzý=ïÈ'W_‰Áî#’Aûöã3öVäŸ,”÷gm?·ßg¶aEÆ^‚®MøTâñIí ›‹¶49JV“áé†w˜ýøÔÌ(«Þùf³/h²ÛÞ–±ë×›íD¾çGðƒÞÝýüúî{ÐlÝWÍîcræØ]Y;c±1 <„úÄêΆ`K×aK¢¶¥÷_3nÛ?B‡ò~³ë¦b yþ÷ò ´Ì®Æ¶ïͪkÒdÛWýC‡]yÛ°´6汬}ñ»•AÍ2÷³Ê4Æ*Ú—}+;ìû_ÏØ—ÿ¬f§]ž±Y­¬À¬ÉØÞiö½ÃÐg*׎çï¶ÃÀ{FƦŸ•±S°÷¦ ºÿfÖ¾óñŒÝN]PÝ:š9ás(ðøp(Ì1Ë™E;¶·é)û7°ÛæÇröÑß©ÓúÝw™ÍíÍØO°É5©ÀDï|x>¡Ù~ò¢¼ýŒS#Ñ—jžö«Ÿl³‹~¯l/zsÙÎ?‚~ů9TX Þ»!k7ÞD}Ư½è’ŒÍ[ÂÄ•ëñüš™ùï G¹lÏÚm`×,,x›þ“=¿F^¢oÝŽñÏ<Ñä¾õ`ßp|‹üfÆn£^oxFÆþ@}ãFøoÕÀoü€0®8MªE¹ùª ¶Š£ã¶Ï€õí£³¯•¶1:ó›>£‹m r† O7Y§þp´襃V¢´í3W°ÖÚNœT˜†£rpe)à¼*cû±JU²*{FÆGú-3´É:Šlzv‡5ËÁ)ŸhÜ7Â_%ßiƒc#ƒ6>´›UÆ>d¦u‘ MíÖ6c¡utͲŠÎ#Ž 3ÚÄ®Ó:Ç%ï2³-Ö5÷8ëèîu[f%A…ÁÒçm`­5kh}“m]“³[µšž³Sxvø[ÿ™·“àWÁC•­Mn¢<95ÊFƒÔÒÐáËóž‡òÖ{kÆŽ¿X?õõ§mzY¶$ÑÿžTSb$Ÿ|&ë¶! .ƒ 2ŒZz5¸¸¿]Y†Cž 5|äTA~¼†ÏɲšÃS±·Ö›²ög¬†.0÷e3±÷1:Šª’bIóiÆQg+´ûA¥-žsŽ­Ùåï+Úɬ¤µ§åKrLþCzVÙJÏœ€0Ü·k+d§uÉ|Þê8œ m7Œ!mKÛ¤lI6yƒ2­HÒáU,¨Ïm“Ó³Âûsjaí~l’ vö‹aCÎÖ{|]ýæ-6÷²’ÛîØ€†//}wÑ.:'c-µÚkÿA¡ìaîuºZ Vurºk7›~4ô&‚€¨.-=ê¦̃dRm-Vó œk¿ÏJäY»^pIÖ:‹x§«ðÁ0®þSÉZ?+‹‡óoóïÊØ‰I£¬zÞ·GnÍ%´„‡N…Û2Ú{,å\SÙ‰v®k²[ƒ/íœÍ„Ùs™œú“q ¿÷¾œ•¾Ÿ±óßžÖ†#û«ÿ@Y5þÜvvløàyÛÉ(á+e—¹‹“ü7ÄE_§ƒº‘OÎ òËO¹­¼Ø‚êçö¢P^»;þ‹A>Ñðoiß*&k›¦ô ²cþÔ¦ÅcÚ;Îáüæ/Êz5Æöh¿5û¿®‡_”—ÿ©ùRnù*‹‡òå¶byp©ƒ©ç CuƒjV–ZÅÑ OçùÊxŽ1zIE¦"+nEÑdrE:²%„EþüÀCÛ(“!5J]]ò2ù PôDËÓqô`¨ÆJ˜5Ù*ƒBº_¬û9cOØÅ6ÉÆL!N€R¥4¼:Íð9Æ%+ÅÝÖZí³ŽÌÈ„J® ®³0ãTÊj¦Ù9Áò[‡¶YÓØ›qê*Nº©— Z:¬µídžúl¼o½5÷’q:±.Vöæqfjº[ݪ±%´ÌvÊÒè^V.wZsjCîü’³<~EÛntF¦…¿\yÄ*ãl¿åòÍãd¡“Q«j%VDLJ¡Ù¿ yúáo—e+CN9¶¤fÈÛYÛjm™2ÊàÔ‘G³º¶žŠjy˜-«,;º©+CâS8}ì ’m· ãÚ¸.§¡|šf3à7å\eàK);*‡Œ%0gn–¶Êb|h'ƒòÌÖ“âí¢N^³èjXrV¥·9>6ת]óÈ›eë"-ûoaЪë†ØgœÃ­ÏqK=Ä9XV“»§1Û<}ú!Úéëë³;8k»‡iÎ' zOƒÛ²¶›2«u›±¬f§ŸÉÅ›s¶i=+lÛAG°D'®“ÉÌ0[x<0¬¶³y ƒ2ÎÏíf ä8ùï½|êA¦‚š ýiEÿÀƬ @«e>3êÇ™ñ,¶t±šÈ=Q¶ggÖž 3µŸNpÿ”0[û(«`¬NME+EÚmÜ´Ž#­1êh'æ´[²4¿ËcYÜ›±­€¥““¥¡çØ«ÛÎ:fþ²¶}wÆç9atVjŒÁ]ÿþ¨»ÍsÌN>£b£=YÛÀÀ÷Ft·ñÁ¬±9ÿ‚}Óx>p+[méô©[©Ž î†:CcRþØamÍs=ž1–gÖfm×j:àzT#UF=П>ƒí½lÖø«ÄVbÙAÔ_Ø}ôU%C:TÐßv° ý à^Èj­têq…6Á,*ð ‹ ·m«s6/üsL6»ëí 1 xvc/£çA”ql‚ëfu³ƒÁ—|wå»2CÏš„ÊÏ`n»£Žô=™½­]IÙ\ •ÄÉu1:<‹Î©ØéÈ£óŸZœÎÒ!ZsOÖöÑkç/âìà*¶šÁƒºòíøõl¤Âóˆ&]v£7ŒGi±•º23¨sfWñzèŸü0úØ´9(gîà²-Ø#bM:#7¼/k›ÖPδ»ô4!è¬Åèp&»vÒ¶4ïP[ÚÈvÊç]q::CBžÂÙ³%c»63‰Çsï1ÄéÜ v[Ö¹ÈlÅil¦‡c›©«kWælý‘Ënaòƒ<”œ¨·ê„Âß]·qPÜ -È~äÙ;£Ë?³„‰J&LîÍ:ú=‹‘e>«Ï'ƒÎ"Ì`úxÌÌÅ5ëb°)gí1Êbü3ÞrAuËõ[ôDyÑx à U02þ[8çßö°Zñ¬y8g?‚n‚¸SÞ:ϸ¹NG¾´¬ºÓËÕŠ¾{ÍÖŒõÉÔyžq´ÙéøÀ6pÞ@½îÛŽ Pße' t¶"²õÚxÎÖ­âÙš°­fÊg65:ÑŽîå@6¬‡6¸%²& TŸçϯXß²Ù¶(ÛÀO¸³‹ØO÷,_GÉR'¨üüi’eÙw2‰§IšîYø†ôßö°ev#öA½'š:ÌÖyh- ®UVØf½y ÈX@MÜ‹£’ ,u¸ºª0ÂâæMýóÍ6ƒãÓº«N•ÑœmÃ˰yÆæ­˜ž¶æÃì8¾€klÏ®à °MÃŽºØ¦;Ÿ&ÚÂÃu¶—ú8 %„§C0½(NBGCå§œ£~nA¦©¡ÎÅ.^Íô™å¡Þl¡Nd°í%Là˜mÑn­¼î¥¼æpCytæ/úÖ~?çYu~2ߪI¾'õ âYR)ºÞ±]ð0?š$“ßœÁžÂa&Õ*#èûIëX¶!u,ýmy‚ö™²ÜGÙ+´Rö:ö1“#Ýè¡*Óõè† wÜi,ŽEvY`2u16?¸ýËבŽí.^Vµ.M*H†§IPýý ZEá «8c Djx‰dPHC!+Ër²8Ï´Cënf“cÅÉ Š¸™a|L['ón{¦CEfÆ8¿v€Y•Y”ÓŒÃÈsÆ(’¬š »cB+hNÀ§yóœ?Ôêèj»f…N™U¹,+„z3Ñ®uFÄÙ2Ï6Zë|•ÕAPºÕSg[Æú¶Xëøzë®í²ÎÖ‰aºŠÃ—fÁÆ1ʲj¦Ý_Ä +³ûëª>aMlQ¯4ÙÎÚ‘ †ºY›ãVéw¯·Ü®Ûl†QƒÊè Ñ ™oµÎ¥8Íùn¶GƒÑQŽ•ý«mÚÛUË[­P¤¡§rhõ¤‡ŽP/3èØ±»ÇÙÇaû= ®9óWeÐ…× ÝÝNº‡í}²µÜøkÛê—Üå2Ú[²V¶¢¶£‡œ8œ:EZé6Æ¿jÿý6£¸’óH#4Ü:¿èµõç¾C9ȯ¨RÐJ„f„F9D2ŽŽF X>“„Óéèi{-5»Üoóº+l $e¢›qzzÖ–9÷Y®î¶ž»lZ~?~de?%¨[^ËÕ¬ÖZèàγá®e6:ïL뚎Gú- CCûís_x»½÷}mÑ¢E4 øuëÖÙå—_nwÞyç„4=ŒŽŽÚ­·ÞjÛ·o·ÖV<ða‚ŠnSµŠôÔ JöŠçíÌr‡ÝôpáÔ5«í\xºN3{Æ…%{á™l;ÜÌg|URQj!¤Cì|„ó»·åìc¤} ÿ2ð5m¦<Ág´ ²Ó[]ÝfùS¶“"¯:Y\¾â½èæuƒ¶sUÝ–æ¿öP[’M.ýó¢FÝ{ch«Ã9ûáÇòö­çívló¢ÿÍùcV¬wl -áyÅ›Ù>ø²q[0Ä€„íŽÚî¹áfêÀãì<ÁÜë¸3»åë+ï2;êHLCu“:(;+€û¢“¾¸l/ù0!ºFÛ¹w]ÖÞsF«Ý½sÞÄ@Õå§ŽÛ‚y0·¤`´Ý·´³`ÿï›ÛH'T—*17:!¨)íÈ#{rA?ÂÃáü[Äóídì¾ûë´"žV&Q.ùЈ]ÐF;y×qmN¯Ç]Tµ³.e䆜µ"ÃL|ÑI ïòÛ;·ÓîÅgìAçÚú>CÓBù8tÄmm±OþQÖŠó™€XÁ@yÕÌÞúo£¶ìhlåºVû?ØÏNìË]ø„.TŸßúöa;‚Q ƒÁÍ÷qÞó]»{+užr‘¾Î|¹¹:º ÔQÑÓ@¹ =ÎäÒ#ßkµ¯Ü >’Ùù/+Ú¹/(ÙÆopÖîá—8È9õ= Zoÿ΋W›lŒ×¼-cs(äÞÝ À¶±ýø:ÿÀ¾•:|Îé{Æ¢²ëÇ­¼¾Õ>ùaÎU¯«û¡Sðƒ/ºÜ×½¹ÔÝ·ØUÈÚƒ›=Œ¿“jNz>[î_?¸ hK™$ÚtW«]ó•Œýà»L¢Q7Ôÿ&Ú4tüúXG~<×~#k×|›)ØKÄs“y¯zè¡ÚC2îÇ¿­dUø3ÎÄ®¶¹-Éïý6çÐ+Zm{³­feý£Ÿd¢æfÿçïð{´ ã $¯ýp›}‡òúÚ3Nª¸mêÑ·Þoiõu¾²ÑûVØw¼É>ß pMºbCßúx«­ZÍäñÊÿ!¿yÙÙ%›‹Æ³þ>GøÇZíª¿§KéX¶q>ð9íX”{Ðßç°AÁ°9Æ•Ÿx}ÞïUì²·–ìÜ¥ìäÛÜb7›³Ç·âNf@¹?~~ C™½ÄìݪÚýŸÉÚ½â·ñoÓiïÿæÓÃvÖ‘ôA¼Æú4¿ßÀÌí7+¸micEÛÊ”†p{wìa»¢^W€µ‡ s\íl_líhçµ]6£wšMïÁ¬ç¨Ž°jÅ`R°ë1²f]fªc×65£÷sn¨Ëm}ìžÑcsç϶iÓ{@Žs#Ù ¯b†˜±-ªÛ.š\žè͘9ÃzçôZÓ_Uö@–ÙžÙÌà«™/Ýì'ã C¡¶Öf*(–¤[¸å²V¤#ËL1÷<` Ë÷â`‡œ“Ó`©IVG‰Éi¨Ã6RÉLk·»U«¥¶—­¥ëìØ–õ 誶¿Òl»+Ý J+ ´X¥—\Ýn‹Y–ß@£\µ]•¼­ÒfT¹ ð²ç ÌÖÏÑ=k­uøë­¬±x×öšÇqhû+­6OäJ *éTÂK[*‹E~»8ï·ØÊ0962À…4¼*cÛ]6}ä>›[»— z^ÙÅ¿äV%ï·¥èŠÄi ìˆzñlÙÌ2X,Ôúmf~[×°¥ÔçU§W+‰š•Ñ!~w#ßrBÒ‘ÒÔ°èÆ8éW¸ÿÛöáÇìñûm?{Õr­pÏ–Ù–Òv;~úfëÍr¡y×’w;g£ôZBõ€õÔ6Ù1-Ø,øfÓÍ6jUVÛÜ ÐÔLšÊQƒ…Íl}}|ëzÛ3R´Å'œ áF˜LÚª{Ýu×’tå•WÚe—]æâ¯½öÚCÒÓ*k]Ȥ°‚NâBfê4³ÎñKéPÐKàù®<¿M[ù÷¾ó{òQ\îp~Éæ]²ëYÈ äíQVýþü œó¨ùdÐoغzØÐœé÷§•²V,}WèLj°¢Ÿ|ʤ0t.JÌÄNæBl6Ðz7´6&´ ÁdHiC‡½‘NÖ¶¾Ã‚iÚc×ÔÂxÐê°¯ý»Ù¿ð§:°üTl÷qdß`ö¥÷gìñÁV›þ5u âmç=àüs³ã–Ò`Òˆ}öˆM§S2¸0­Uþxß¬ËØuU0ij?¹‚m…×Ó˜r¶ ö]X𼊵ŸDÃyYÎŽ9{Øá€¶ð(ä—Á×V{Œ¢~ó[|œ¼Ìn|­ ¦Ã“•çDwUr¶ji§½å/ð}ø£®^:Mȼ—²½íßÌÕÅ+ öÑç0Ðx#| Õõ÷ðêÔ¢þj;[í/ÿ(gwÝÃ*1åÈ1hËÃûÖ‡ܬɨÛ:ì‹—û|Å}Ôop|™GȺ ­žýãK –ûIÕÎ/€€ û’vRVßãwÓùÛËßofÀMGóÕoÊÙ‘›šíò߇?dÐDÄqÇ`;÷qöõcth¶cŸÕl'>‰½=ÄŠéíö‡)]œv2x”.{¢`ׯÎÛÞŽBèß”µ[¹°åzêÍÜ—›½ø%e»xyÅþáoš±qDSeÛïàïÖfûìUèa>Ç{ú ¼ž÷ÜiöïÛrvÿ~Îã]jöï5»Þ%=&hO`#µ@±ƒ ëÚlí#{3v§°›@w{·˜}åoXaê´wб}ö)gKÇž;‰-›lö(ÜçW´Ú ?ãl,Š^J9½óãvÿý-¶{3wRchGÌ.3ÌD+ƒ; JO@—²Œ<*nÛtô{Ï,Û«9ïß_à,m“}á ¿ª'˜ÉÂÆŸåížïÜ@ïhè¿é…ã¶Ÿ’†*àxŒ0œ”°ÿ¤nm¢M=ù• n.ªÙ]¿Ë€-•¹4:9 UÁ zÛY©m¥ÒÊßMæßN=•]OÏjʲ=þÁŸyZOФûíD<…\öÒ’½?¹çˆVû«³s¶àÄQëÅê9JdÚù•Ü,ÚBê;ド˜ƒ³‡·cS"øÃzrie«›°ùÄ}ÜñÔÛÜÊâq³ж¸ØnoxvÕŽ.19ó&vÓ6W%WØÍ_bÂgO»ýγ©CøÞè»Ö?Þjkî5ûPÀû†)Ûò…,ÜW°·ÿ­·Ó¼ÆãYùu³›®Ä&ºÚíåËÍž‰ÝܯöÏž'Ù0$ rö)ê𪷖íÅoÎØ rÿô¯ ;–RÝ“zø[ÔåÔ=üп¾¼lo|mÖÖ †+ÿRã÷Ã?áXÃæ‚ýË—ä_Þôvðàdå ¸ÂÂ6Àû-_`Ûó9.„ê´>ÏãÙDÞ2•,ç1¶êÞ¾¦Ã¾ùFhäß>üÛ~ù·ùœkÞ¡>¾t§ÙŸÝlŸ_Y¶e¬äõ1qw7ƒ¥ƒïFæ´kã¬Øý ~^«¤CL¬ôsÉÖ+Þè}+ª¢áC|N sù!_^ÞÐù”|CÌWÏð|ˆvþœß6ûÄë 6~UÍMž=ÆÓ¶²¶³Å^ùv ’Òñ]ßÖd`]Á¾ý¿ËöÊá1é˜NÊk!¿7b›WÿþÿÚ¬}òΊ…’Öo2»áVpñ'†Y9_±½¼ã…þ®û&âô·÷³÷ü~»ýóg¹¯€]êE?‚dû rD£ìÚºi¯sè£ãÇ !«4Z5t·G  >ò,kå ܶÔ~€á~[|ŒÎÀóÞ;Ý.ÊJS5Ï ‚ónœ¡Óøn„=a:[ÖßǬiÄš·î÷~[¶b)«‡£ÐÚÇ6Ÿ=l̳2VªÔËÂåXljòôvmï·ù}6çˆÙ¬ôñ¿ƒeV˜4j\§l<@Å8P« üi@˜ïg º×J{Z€¶®¡•6=Ï–Æiį^1!çVÓDv™«…nkîœæö_gÇ÷[Ke·µ L?yFЬ\á>;š;°!;­P®ÔÏj$gâ1€³ŸåöJ'gøzœ h08Ö¿ƒ×0¾GÐÒ¤³Ð‡Ñ@º[=zV*÷œnV›͵±¶“š¯ÕI—ê§g8Îë;:Ø›ç–ÑférP|èÍNî?“í\äÃÌôgÒ ª,§ —Ô ±¶hXïÚa÷Þ½#QÅyçü¥C§«­ ¥^ÿƘ|)$éE¦äµM{çNZ«' ®¾RòÁÝl=kcÛ†fïBŽV<Ï­™íÓË6+[pgÜzÿ­†â´Nh½lƒÑ ®™ßêá¸èSJâH;á\Ê–Fá§ŸfËÙh¹\á‘+±Ç½T·›žø\`îŸ2®!ÑV¾LŠ–âc(Q'û><òÝq+U§Uíﵦ½û§?å¶T˜!c?ý,&xÒ¹££.5ûÀ[G­ÿîf{øÞ¬}êK¬N}Üìó\ìrÏwXÛþ¿9ÛŽïøûk†íÔ…Ìh2{­ú—§Ãüø·Ûí?˜µ«o‚Šãô×°õv Å6n0»ò>³þ˜Yﳋö3¶ÛÝÖÂÔhtQäfR¶m­29ÐP‡‹}L\]ƒ/ÀÒúËï.8zOÐñòúcùÝ&Û¿Øndx}À¿§­ØØf?øZƾO礼?‚OÂk¸ ­¤wõ°2#k÷ÿ¿&»áª¼]Ýïÿ ÙŽvº@ÿ˨svɶµ»¼ÅÁ=ÂYÓìUt±v£Û¼ (ßS«öö—2¹ùãûëO2˜¼ ]­æÖÙWãconF¦uqeôý;½.40uo' ¼—·´²µ1gü:m'õæï±SŽ®Z7ùW×âSŸßl÷ÑIš?ïúü¨µƒ3ÑlyÝÄx²0›[eYi»fu³Ý±žœtBüã?¢ ÂþÙ€bK.¨Ù¬Fæ÷äí‡W5Ùw¿”u+†²Ó{¾Lgïl„2[xôÞÆÑ‰íM¶îŽ&{ÿGøÿjÑf³1•-É&{çP©ðrã•öáoÐÛŒ-sê_ÿõˆõ°Mï¨gr¦6¢ô¸Ùþ‚‰½W²isíííA|½ú£E7péû]é<¿í £vÁsYýÀÆü«O C½À¥OÊÚíÚ³öùÿ®wqÛ0[ÏÒ! £øS¡c½÷þVû,¾hÎëXq½¤lÏY\±‡.%U…Þ·`Z&…Aý®³ÚDŸ¡&ÿ–)¸ÕÝÉüÛ4&z÷Þ×áð´^ÄêÔ™U»è%GKxh~.¾\HÛ‰"d''c'ÚŠ>sù¨]t å¯fëåíÜâú—Æv˜-¾°fϤ¾koÍÄš’<!Rƒ§7}y—mÑ»6gíu—³=™ø‹ŸÏ}×sÃ8["6S¯d¿;b›¸ê,Jé–ÿøóQ[>šÜ8ûžwRG÷󇯥¡ÈÈ*³Oÿ#ô(ŠÎ”õÇJö;çaðÌÖ­eeXAú±›]Ì ùR΂Ãÿqg–l©t¯1¨ŸŒÿ=¢”µ/ÿa«}~”:lWçìÔÓköâË8r€lòCª{×<ÒlWoƒê]1{èMnÒ§í$ïÇßÿÇܘþx»}ìo©; vŠëðÿ ÜVIJ…^.¶˜ÉÅlÕÍq×4Ðg°Uñ/”çRœÒ8·¨²½µ:ÂYÁ‘Ìô¬eP9lCèõ{··ŒµÛ`a|·¢üqÔ€-¨îà#2¡‹"«µ%†`MȤó–9Î9æE‡ôAô°{¼`;Š >³3­C=¬Šæ«Eë€Gæ\Åß_›ncÙi\™Îþîâ&VjÙL~TL¹@–óœlÍ­±Ùœe{.eñ¡Fz5tÚDo9Ï3W“Ná‹bÖ@t_‰ËJ²s9bØiClñmbß[k•A6øµCçe+¿íᓟü$ç¡NAG'ªhi¡ÕN…ú©·jj|лxàÛºukŒšò[N[uNA‡Ø5 H÷ÙN‡ñgÔ$Ê.¿ˆFÿ¤ª·°b{îËÛM÷±Uo[HØNsæ+9S„qh†Ùý¥ñ[GJ[´óo/ØmtÒwÐÞò“Œí ±UíWÌ\:çßT°Û§€œØÔt*Z²Á=[¼-.Z^µc™y\¸ˆÙŒŠ-Ÿ³Õk9ï&D½Cy*.½Á.c`òU~[á‹ü®sDz©••Oz¶¸4G·›³+]l¡é`~”Wd\wUÖîàR‹hØt>çÔËjö¢³x7è][Ã#Å-§#¿ÁàvÎyévAvYÛƒçí´*ÖÍ@0ÏdÑÍÿÂë?8stÿÏ)๔­>yVQú9û§UÛcé\-^Zcå†W‚P¿Ž›Åª …'+OÝé$WÛÄjfuwÆÕ󛨹¶ÉG~¹sXÁÏ£ ÊEöpdJ9Vv—ÍeHã­Kz“Û×·Ó\0s2]ãv.¾®w8çÑQkoö"œuÏ)¬Fs–fçvcì º¼ˆŽç!Un øv¬`5蘚C'?¿'?“N p:³®K‹0hÕ¤æ0å´ìßã&ÜÁÕ”%ºÖ@Qß-tPÏ»=owLeKØjžmTi]Œnn²Ç¢.Ä 4bØp¯W¸•3`¤/~^Í–ÒðLg"%Ë–÷itÆu–Ie Î~¯il³ÈòŠ‚}Øzÿ&V<‡Ð3eµý*HoûP¤¶V¶1 \FÙ.\ȘéÜ싽Ý‹¯‡–÷D¹?uû±}ƒ÷ýbKà•´½»¼Ún8“öõ:&]d“i[zÑYlî ç©À³òÇtÆ)ç–…”ó³Ð5¯š mæItìiåÏ}8o·>ÊM§¬ Ò¬Z_°3ñ£-Îò×®Nð­ó¢]Š“£W@jפ©Â7°Õ“²¢Ü—] }Îqqƒþ„†9ê’ÃYEÎX>t [©ÑËüÆ7o¢Þ“›‹¶í lºF{?€™ †MDÀäÿÍ«rvÝMåßný6ÇʇðE#Ø„mo~4çh ϶-l­\ÓdÏd²\gÿÓv"že'3°é"Ç °­Üdw~óÀ3p{˜]U |N¸¨Æk4Êö,Vx'ÕYˆÌá‡:8×õ¬SÊv´V·Ul`þd{Î6àÔ®c ðÈø£¸@qÈfè&¸\y: ³±Í…‹kô+ÆíwßãëèÒ™M6´•ŠGp6†ßSй᮹ ™ôËÍbBß¿ž$í‘;u˜î~eú#™ÐÑÆ¤fœJ¹â«Ž[Œ¯âRƒgOÀv5™?ÂYkM(×8C¸}“8ø!Õ½U”Ý0õ1ÿÕʬüÎLwL‡ï,"p5ûKßÂ¥d¤•h¿Tpu‰Ë·mã½û.}ÙÁ5Ø+¸´“Š»øl7¶¡­ìòã-ÓÜ¥ðì—ØÙ²÷S‘}•ûÔûýæ‹cÂBG£–à›v0Öl\?¶A·9 YêÚ§âÏÿÏÞyØUÕùÿ÷ú{ó¦×LfÒ!!=¡…Ы ‚XEXÔ]]eÛâê®îZñﺸ®ëŠ â®‚« (E”ÞC $!½—I™LŸy3¯þ?ßóæ%“ÉÌ$ÀrOrç½wï¹§üÎ9¿óëGŸ­ùn45?.cdÜ*Ðëùþâ†B=ÈlÞœ> ¡cæoΚcvßÃÀ¿N]ƒ“p‡ƒ1ä7?Æ—~|”Pp«ì, €Ÿ|´Är8àÔd“ÊÙsKÀÒ§64¤j·’px8 ^el,¥Ì;ÁDó¯!s#ï¬ÊçÕž¤v¼•Ò 4ö&ïÀwÁ]¤t1`¹ »c™‚jÏBä ¡3þL4€üH‹)ÄÓ¼¤t'„XŠó@¹2‰Igݸ˜?Rôh¼LLºppr+CO—$%gýQf–ú’0iTJ¸)3Y@&8$0i’6­òA˜<ô…ŒJ|0$ *C4LÌ…¸ÙPq›ßÙLEw8&,lEãD@ò¿:iGMÓ~,Ä¥ÉÛË—,«ÝëƒéòÃt†ûÃ&ˆU¤£0.!l~tÌ‚G˜yB SŠpŒ^¦Ñ‡F,!Ù·¿9;X–Úàü“PÛ2Ñ„ÝDJóI>?&—Y‰Êö3„ÒÞ…ÁÄÙÞ6G4P¶ÂPï«B<¦ÑŠb·­7d›Óµæ«ž]É€;ñZo½¾& “'ìÕæ Ç=øCQ,ú$†PŸ<—v®%µæl•%ЦCn³n`[Œ|Fc)†q[_È:"£Ðdi)×Ö¾M6 Ë|TD›ƒ|$³¢düI4yF]÷ejª:ZZÛ¯ff^ekãÀ^c'Hí±£,\Ú€ùñê^ÊæÅ¡ö»òóGv¸‡áŸ¶‹cÇŽµÛo¿ÝfNï±9³Êݹ‚A!ÿÒ7Ú²eh³£š‰ù¤3 åc( ¡§)‰¡RT2Mä.Ö2ÿ÷Éïî&´BÒÎü«„À?õ¢Œ]€†rÕ/C¶jÍbÎö{(`u1ÒÃ*S$ººcÒö¶“¶"j›÷Ž&6•**׆L[l`•ä9‡[û 6Ï­hD0£Ñ\‘¯Ô•$”îBJtTv ¢f#D÷mßJE"p¨avÖÅœ#5;m¼Sòg¨X †è[ð ÁJº«:™îbŠ3®a ›dÛ’€Ýñ½°= ®B–B9œùöœ77Ã_ã#»B$ @%TŠF¦œ—²•ö9žŒa–s-Ò,Ò­+ HyØ5¶uOxQÉÁø«Ý{Àú¨¯ ømr9óujþ¨nÜ¿œÃÕ˜P„`.õA¤øñ£SÐ)‚[1ÄDî!<(¯^YöHa…ÊŠÇjàç{"z Ê~2JámÏ¡„õ¾m†¹‚\„…Ž{ˆ¿RˆÞ謴}Rȶ1—4ßP oÁbÑž°È×Â_„\¯<õD¾ŸÓaÖkéö—˜3…¤ \š#Z£Íë ä±kª ˜‹Y…÷Í'úà¢3øórô$ÆTOŠ1û, Xý—ÜQÉíEäwî˜c~í]Z—ýóTY[;˜»Ë»æäÀ¹tîl„wª€rÒ0ÖÏÝ yiôT´÷0dýÅàŸšÁ"Âùh„ Ö»Úôðª á¬U­ÁBCµMIÇÐó¿GüKq‚ç‹f-Ä™À³†:]ÿõòyf½owܬíå(†1W4šõ/z¡ Í%ØV ¬¶jpî§mCäÓ¨ IDóâ¨Fêâ¦NNy×Kô IDATîÃ5fóR,ØŽ·¬BpÄX`Ø´;1l*GëTQs1ÜA` w|ÊÞqe~úXC/nÌ3„Ê¡%í;z«€?æ¼S1ãÁ%Ñ6,ƒ£.Í! `Žz Àxp†{<1µ¾¨`Nd±¦jà™¶$õ+ßǬmÒâ9<ÄÚS=,ÇðŠûä= ÄDƒ`veó/ Àº ¿Ú;nVüfnnÆmáp¤ŒŠ¼)ŽŒÔ1™€Ô1*ÂSË‘à_yý«öf Œ žÎg0!èQIí’Û/U¸0Êÿ8b‹ßŠn-W›B9¶iÍ+¯pk%̽óë€[™ÎîùþâeW"¤ƒÅ'&Fœ@è\lÜTšòXþ¹þ ·kn¹¹ 9<…»•=z ŒÏ¼„9ÏóŒäº_äaSZŸrå_!XÍ(àËË”AùÁå@ ÷hß÷™{ ²× Μܨ¿^ž¸òüÔ­C:ÑåC+!g!CKq…aIsõbꘃ9à8=Ì=Ðù0iSh¤ .“Br§¶•ÐV¢‰*n:™(»s 1ŠSÏ0‡ŠâQ4ŠÒ²A£9ÓæÝÕÝÍö0jÄçnkë9”X‚ï=r‚¡(Áæ&‰q|4†yä£èaíØ-6Çw"½Oã‹É#òOJtL‡üä8ñÂôIße5'DîQ£Ù«.ê°ZúÒÒnêY@TÁ#å™A…$F}0°Ùx%ƒ§ –sô0ŒRŽ7ÊS˜ú†!„ÒèëCÛºŠùeDJâyˆ°;FAXƒAW0ík^ çó¢5„–æ¥Æ,I>«Ô¸H;H1me6¢¤õ奨ÔWÌ•`wóõlÅ”—Ý.…éU'Áp’¬œw…pu4À6ÂA¦kO±q³ÎƒI[Ûæ%Ö³ -‹·ú¤6§ñ ¢rɦš]YÐãNò¯(€¾2KO´xÃÑë]MŸ×ÒûN1mÝñ¬9ÆJGÏ1_’£.¶,aâ› ì¥ýˉ¤JÔU‡HÐå8¢ƒVÈõn`"ÿÅ¡Ð|>ˆ]Á“K¦uȹ_cÙÈ8+mœoe Ó¬§ˆv¾ì:AÔù@¦9 ‹ òášt¨üâÅ‹]÷¿ýíoÛÿüúj¢º­ÛbüÊ˳|ä#Hß úÓM7Ý´ß>„K¯.^´H+sˆ ¨´qè€b©…›žÑ†ÑvJÚ¦Bd.ÏXÍ<¿9µ1[Ǹ’U#Æ0™„/º·qìÁ9û;,”"ž9©Ÿ²»ìóÐtlQÌM’_3\6˜Øì|=CVÒS›O5•>#ÉkÃ:UñÛ(I&_ T‹v_ÖUÉÌ>evÝåc¶G0c Ø’µôdm΄ŒáJB¢@’½¯/hkøíëïÛSü.‰ÏÝeø ]’²I%-NÄ¢. ¸ÿ/çRÁýÝéÖ¯ùìôYùÿ?zo¾œ¢c9Ä›rÞI²¿6`Г.Ü ìCó)£†r٭׭೿È}g¨±*ÂÏåú =¼ãC`â`äê zmgA~ú§ùý\p«ª£ æ`øáÞ»ü8^ÔÍ á‚ @ãŸÛ¶‡o Ø]wÒ7’Ï¿¶s?µ´ø;Þ§ßœN ¡ÉÜZǃAIŒ] ùZ¯‚˜š ÓÃï+é'ŸnÔaˆ?а£.Å<—>|–CÆ;×Fшqæ)ûUL]»Åps‰ùæïÞu{Ââ3'lÉuÅ»`A-.å¶Eí/Kˆ I;ÄÀ¿ãLöêpI†ør/|° ÿAÈ~‰~³ŽÎ" È·>Õíà|Ë=0°À_ðœElv0OžÇ—ÉÎS‰˜ÁÚÙÚëÏu!w´ã¾HЊ˻-ÎÛC}Ö<5ÁÍËfÿÈ\zŒìš“ƒç’Š·ÐGŒÿ¥m]ÌÍ)02çH¨Ûƒì¥ÎÏÙ¿¼/c³§Ò™Mhoð‰ÔZj V€À…¤qa ì_Èo鱕[a4`ˆOYíJ€vpž ÏÝGÄ'rfG£<÷«ÝÁm ïßwŒp=½’;”5tŸÕ“çlú2~à ÅzúËYô4c…Jú?Þ–uxåeŒÏ y¢6ïš'´£·9dÿü¬$:€=cYw&‡?çxp†5µU´Oæ¤É ÷üTbSHÙNú Ì´Èj o Õ5`OÓÆc>nö÷íÅD/`‹ÿvsXï9;8NÓÔ­QÕÃhÅ?ìkŸä/ÖèéÇ1ûPóc•ÿè³f?Ãí´%)‡Ô: çtֿ௠.®¥À®-Ìó;0-f¸åóXe}˜¹þ6^$m}ˆõ~LÛ$Ž#®SËÂ&˜»=ü]ÖÇjæñÖÞ²Û×nÎkzµ%è¯>ƒŒ“.5*·-b7ýSÐÁÄæÀí“[òyFŸJ€“Ë3ö¹wõàϳÅ0›03~ ‰VÁ!,(Zi@9K×Ó¯if_—PÕ †«ŸXm¶zÍîG-ì{bˆw%!8]”›&î;ãØ‡;ÑÓ;òû£ò:ÆrnÝx,/Î.Ü:š_‚ô: )Ûy øõËX,À¼ æ¢e]‚.3ÚÔÊÜJ…ÑÚâÚ„nŒÕîÁ0þ·O |`2„hÏ…ÌUÑò›6#”ä*¤-«AÅÀbp’5ŠƒïìJ´#­wÕžþ¹¶ëÙ[ì‹h„C*é€ó®NÃ@ŧ“ËRN`UÅÖ8®ÆjGUY7jµˆ‘»;Ù˜Ñã'Qsu´w¹3Xˆ#|¹"4_I&oþ½QåVSWÉaõ;¬ “Ï”—Às‘¦IH|ˆ"RøÃùªRZ±™¡W ÊY¿¦ ”„u`û"‹A™”öÀ¡„ñCËú·[$¸&“LÊèF¤Ý'§vÌ>_3Ïûœ\H£'EÎÆ+„räs·¹3KÚ;¦ˆÍ,Ðì˜\™&Ðm‡ðéÓaïF›‚©NL±øJ ž'i«?ñqôC(Ý ³ X[Ò"(|qTEÄN;¶,²’ö…ЛK­VŒç’ÊV±cÎÕ5¸~cJ‰}¥|9„¤„…âä“ìOl°®æ DäƒÑJ­¶RßZGÉ䵓à7ÉèD«7©âDÇÐ'@6¹@¯cˆdn$`¨2áJ‚ÁÀ¨Â¬‡ˆ–ªö¢ vÒ·\¸ØÂŵ0ï%˜™¢q ¯ðòyi Ù‡¹%fsEÕhëÁž¥¬ íÕQx÷$q•ýˆu¥ ö,'†¾ÔŒ…ºñ8NC^†ß&ïev0.MŽˆ–ÙlR1³‰Õ^\=Ú¢ÄïÏÒ°"ÄýžQ„c(3:0¦±=óÖ ›dý¸€ý>˜ ïAüƒ'fž!ò"©e!»æ“Cç™uVÖμáÐui©.Õ-¡ÏäSùü_(÷ äå€]¹´ÄJîè†xDã\šp§‚ í.|5ÿŒŸ„Î÷·—Xç¼· X½è(¿ý–¾«|šéÓÜ–ß¶åOaòÕOçÔýi1šÂ¯b_Àzï›cöÕ/&ì®ÏÚ?8íDIŸ“í7ÿ•7¹ºõepøñ,‚É{ŠrÔ~•s/g;.ø*g¢jÍÎ…¡z7øc}û=Dþ×8›£†!»Øá¯÷”ö5žãægmæ‰h¡ 5V ò² Íä›ãö‹ÿà Ÿp©¶#¨×ÿ+æÁø•Bø=.«îÒŒußÿB_AÓHþk9…Üÿ´­üç•Aü„íÿ‚!®åvéEÅvæ(ò¬ÆË¹Iz×»ú¬¶4dëD’\_úbÌfŒFƒÃ­;ïÏãäÓf‚y2fvØþ‡ñZz3ÐÃ8/ÛöŸ™=M]Ù:šTŸ}çƒ~»f„ù6£ðíìcª¯÷vÛOúa!͆´aJMAÛ|Úø.EÙT¬V$€˜4?4ÿÔOÍ‘BbK±¶5º™;ÿ !¹'ôKô#f"äxˆ¾-EûøSŸ]ÑÂ<½ØÑÀÓO7qÔËaž™Ýþ#˜XúýïÛYqK˜ËLœ´§hŠß®F³Îöäæ’ê<—úŽ5ûÖ76ƒ5ÝA¶õK=ÒÂɇ…”Ø´…Oì‹×éNÐλ aðzÚóH>ÇùW¥8b GhyènéÍ4—ú>0 €Ì%}/Ô ­¶ss`mù5%FDð”‹J!‰ è”gùªÜµ6f¢öÄݸõ—Û¨t{Ø­ùne¨œ¦îòÅÇ‘q™ûSÝÕý‘+âvã2šæoŠ?‹h¬Ç€k¶<·w0¿$À¹üsI»œè Ó΢>ÆT4À@?*d7‘¹Ãü8ö"àÜÝ~º‹ Å%6‰¹óàÀ}ã…Å—÷Zq+›ï^³•[‡Ybº: ÙÄ=AKz8Ÿ}¶¤6ùs3Di*á|—*ìÌë8O„è£1B‡;R¾¬>ñþôckã#rqAa‚К±®KÊcVU[频úÖrö"cN–ƒi" %ÇN8 â¶74nÌÜ Ú0©îcD©o¨·xI™íÜÑá"{öàw(“Ô,"9‘ÃÈ9˜ê‚±ÂÿÄçÌ)+Ì5YüèÒ~ŽjÀ4Qçù5 ùÙ2†™Q ~y- Õ&C»oÿ«òâ6Ù+WësÌ f•áR|(YƘŒF wä˜à¹è¸,úî"!`at‘Nñw“*\¦ ú’¤#ÁXV6"5]d5™Õ6& å \±Šu¾ˆÚ,ÉÎìæû¿³OYX3‡úb\q¿"vî Õ4³Ýô>#ùUÒ;~ÄK!ÌM³áŸÂù)@›‹ðá+â}„ÑâïÁì6K›ƒä‰à)&nP0®IT£½hñŒøÔ_ü5UÏå+–%âj†/ˆ-C&±"IÏÓ®¿HÓBe´³WÄ•œ(b¾<3 ye>› ”! ƒaÌí€$¨ ¥ÚßËŸ, ¡g£Ì ¿˜U¢Í¸M„çò·JcÂ’SÍaš\`“”N rçåË—£]y‘O27•©Úþ$Œ€“{ì>Vd?ù Þ‹ ÷Œˆ¾æ,´ Df›9€­i;Ÿ@°³-eã_ÿ Lëªø(³3Þ“²÷} åæG~3•…Wô·‡¦9k Íq {ç´˜ÝýBšÕlV73ßXÞE9çfìüãRDäMŸ"☽åëQù»ëÒ=–@¾.IÕO豿¹4f?'hÆ–& ÜiÛ,á s|"hBÌ놳{í\a~'·=Ñò+`Χµ_>=g×|½×*áð0†éÅ*Kk+lˆ!?JnYÛ~ÈGYÖòÁuZ¯bÒrDÖ²Fè+øœ‚öT@(i½aîj29ÓrùFËOMe‡Ö“bŸM„»zyÄn¸ ÂüqÆ€µ*ÎŒ×]’9iSò†ä°ãyð;ÿD,,0G²JoÇ"¢õѱºÙ Y‹¹/W€›{íêËbö?à~~j]Ô6~eS{ìŠ÷Emjƒß~ý˜‘;Éü°H·ºi9»öËÀ¯'ʘ騇N…¨ãÁå”ÅZ÷ÅÌN¿ Éþ´´U6my”„«éë*^^;|gŸ—µ“g0ßFgìSËbökÆuyî…Ø“¿Ojå]€)Ñ?êlø¹Ä|;ïDí1!‹Ñî,–3>X€Êm#õnù8>iŸÄÜêÞÎÜ`>«?»>¥üÐÜpý¤ÓÞI>Ö”¤è ™8›€yUƒðà§y*k ªvxQOêOì³Ñ|vÕúˆÝøGLFé¯C&¨XßÛ§ÿ1aK"¶z›Ätù$7…=æÒº|~õ¯'ñ¯¥/UÓÙ¿Áá…TvTŸM"É;¦DìΗ°èe€ÆÁëö—rý‰)«Ejª“pKxIý-¬½Bšï‚…ÆQ©}w !ð”€1> xžÂºP¿òhÉtŽG¦mQ|Êt •ê’߈ŽÁ(âW°6¯Q/¯ φÊbýIë^™nÍÅ}ûÄoqê**Ô%«"P®êR9Å̉r¶+äÞ®ç‰úU˜'z]„dþ+hëJƒU”Å–W”@…?d.ˆ«½a„µë}²8sDᲸ4áds| =A9˜ýúiÚBÕS™»¬ç-ŒÝŽÚ”sFÖ>ûˆýüñòÃùñPÀ™(cÍ5½6³”㵞CÓLcÕ†‚±ãÍ.û4Ì×ÐŽýÞà|ç#œ¸›`8‰ ó?ÌO#!מ /OÙœ± wæ[W4Žõ²™¹¾ŽõÎ-1ƒó®2;çíDž‘¶¢ ÇßsíéÌð2ð"kOx¨ûª^ûQ~ö¿y<þë#i•™.¸ˆ}é])›<þ†#þò˜Ï^X…í¯ íÈ“ 38+kûÅ>«bœ?Å~ð§‡}öÄ[Ê™<— TŸA¡Š9Xãmc‚±RÑ‘=öÉÂ#ü»ñÖçŸ2X=ô%H¾ ?Ÿ±³8Û³ Ú4‹óÕ0Ð7°>—>lè³´nL 'HRy!|xGÍﱿûP‘ý7mhzFl-ùÚÝrv¸UÖT 0V nýÐEQ›Œ9òH¸Aù•ô¡u^ÊxñånˆÆ<ùÀMä¤JæÛ6êan)Ò,ž[Îõ ~l}ŽöÜD{„ë °ÉMá½÷ã SV>.g_þ»˜ýð'É"Ï Àaph¥¬)§çlþ…;s,¾üV’ùª˜í9yûáJ›Ü„Qci£`­üo¥¤®: L$`*!ç¼½zqƒ¼¡0„”åÛ'Ô%"cÎ]ÄfN÷ñù“/YX¦/‹Z{#‡7ii¨eX祰®°Ê•ùaqŸê a+"e>A8ÀLê¹.LLBãçì¾Õvwù1(Á‡ó©“n8Æ"&VŽ:e~¡ã)Ä`täâøÈ”X&%Þé˜9p ½áñE Üb¼Í/8*™æ¥ùòDhùiCJ~}˜OºˆòÔûTšB;˜†AÖÉN»¤7Y±†ÇEùd‰àm¯ö!ǤÐ|/"‚ Õ;Q ¦"ãÉèðyqo!v˜~ÊZvæ‘`ÆzѪêtç$NR}=`å˜L3)VpÑÙTIÆEQ_åü€ËÓ%ØióɰòrâàñNÏ?ïa3Ï;²Ë$@muÚ?y¯39™®Ò.šê·™ÉÚÙç&O¯CBduȤBêã,à‡ë0ZҦʜ5G¹>˜I™³&aøó`Í 1Ù%ŒÕášL_Vq¦Ê<òÈàèÒiá$™Ž–v"  ÊFÏ‘G¹uºGÆ‘~0¨A˜ˆcçgìÅ ˜oqiœücr6þD‚ŒÅá@9|;‘ÈÁŘF!åÖ<ñ7ÀàÍÌÙqGgíHòõOk q®ÈÙÌø*¨»¿º¨ˆ"ˆ¢ꢜ˜5!}L!i¯³é'çlÚ‘YLQ™¯L¤áòLÅ·©Þ™cöwª¿®ÙÔUÞ_—6a™/†*26—0øÇoÀ‡‹ Lu…§äÛ©¶)@‹jÒ6âiÞI Ä€Æ# þ©.L¿1ôÍ…ÂÆLjÔ¸ }‚xŽÃÈÁ ÄÇ? ½yÜœT¸ ÂL„•É_Î%ûºñ˜ ž‚D"R¸*MðQ“©ˆrôN„|Õ¥¨a3?þ¤Œ-ÜL…„·E[ÂEU.Mœµ:`3ÒxÎ:*kãë³ÖŽ„þ¸ 3ކ‡åfE ?g.0ƒ”[p¾}±êŒüX¡(™àW¨¯¿ æÃÓæ‚§ fWâ¿Ö‹Í!ô×ј Í‘ ±5䙃UL­ðT5±0ã"C€yŒ¿;# äSw°ƒQɬB6hraÇïãOB“E@šJÚ›f 4'Wч 9Iêõ1'Ëð±<_¬Ù³8΃¹<Ü\Ò|k$xKÏQ{Â"Œdî±{‚ª Ä£~¸ô˜ŸÏsïzà’Á–ŽC`:Ž .ôYh«˜¯³ç±Ç•ú€hœI PðM›s)B žéÀ/üVÓÍÓm‘§òµ5'ØK k'ÊäÖ}‹1•“0SI‘+ëf 'åàš{ J‡K^åÔ)?®ãvì‰Ìe`=†õ[W•)¢µjŒÏÍ·&$‰Ýøj]³Ýºµ:žà/ò1‹WølkO˜ºžù©µ·+‘¿Ž53—òwö¯DÁî±ÖGŸ©¹óÀõ0ÈãÛ=w½O•ƒóhNì‘Ô¬kKÑÕžjúÉiWyz†ÊS˺RE+öר~£._¡.úQ¨KåŒËožkÀÏ“m[LŽ;‰}Ží}pRà”à!R §SÙä™k)7Çü›s<øž÷jÇ÷ûšR@ÙØŒ5ÎÀä’õÜÉ8ù™cc(`䯵üQÑuãùÜä€?„õÅWÂ0CL/KòlÞ)Àd,óœu©ÀAS™oóžÏ·t옖h~õ¢ñÖÔˆúÀ¥c¦!•²FƒÛsý a¬¡H#s…u¸²esõxú>ªŽ9ÖÉÚ;f>ëwkiíÍž“g°Å  ¿Ï#pÏÚ-¢1å–ËŽÅß|&eH˜>wãm™¦Ý€ÃK€Óôm¶`¾P9-˜Êö×>IüúË™®˜ N ­ËÏÉ6Áxø•¾ù‹°’™CŒ è»Eë ,ßgáÞÔÏ:O·–y‚F*ÆÖáhp¦‚óáidö¿XKž!ʼn#¬ŒÒOí¹Ë·ûèS~,Bô§°|ª±ö&ía·R÷H¸¡(Š…ê@‹h-á×`.¼)óØyàͱÀc<Ë‚o”OûŠÖ½¬cµçמÝ0ÖÜp0žOÿãˆP½¦²ì0=}2sˆ€XÕ¬¥Öõ˜#™GÔQ|#4È×=ÚÁÜR½E9ühvÊë){Ú++˜k×J~ƺ‰€žË-Y»Ñ:ºáÞäI‘0[9{ð™GŸçÎìc×MÀ-äü­6vb¥5Œ­³20Âö­-¶uþ8žub^ÚKÌp µpœ3èиe“¥ÖAZ\–ä8Šz;i4&¡9L V£éÃt †³£³ÙêÇF¬aL5÷`ÎZajºC˜›l·XI/gñ”Øœãf¡LaC½“çÔE}ØÏs¸ç(gq·M+YiÇÄXY:c›ÛÑft²â'T”¶ ¥bµÈ$- Ú‹¹ôÔŽˆ¬µ©Ekl4¯I¼¤§ÈvF[G͹p¤uÃuqÎaßb;&²ÌÒXÁ¾<]omÕZ]M­•·ýŪÒëlr`  š’ž*ë*?Æ2Uø²2GuÜgGàGS‡…§Ý³Æq~i 3‰([e­Knµ·Çïì3í$|/µq¾\ɹ{²…0ow¾lÇ žÄÇÚEÐÛ’,·ÕÑã­bÒÙVÞó¤5¦s0õ*¤o„[ÊÎ! Î,«r6‘j‰"»ånkì{Òrk-ÌÒyú›¢³ÌÄvÜ9—ï‚×›ýKcm•á.õp>‰3Ë.Àû6l°3f8¿Á[n¹Å¾óïìz*FPÀps`(»Ÿüä'6í釬ô‰mÍu?qgêpú¦¦&{öÙgí‰'ž°²2É(GN}Ì»Ž.¤þ0¡(RÌZùAº N'N» ŠaóŒ\Kþ©êêÅ7¹¥sá*´‚øæÿÀ´?yæî»Êé#ê¢6òªÑZsCŸzçÿ,mjÚ ÙŠ$Išù¢’¾™’&}mÂÀ/& ÌâøawnÓˆãI9>ˆ§m&ISh¼Ÿfù7‘¯ºQXi·Æc`vÄ&Ìÿæ& ÄC9mÓ°,( IDATµW꯻­M6¶?©2ú2T’i^‚1I0~Øž»|Ðí‘x¦ñÝÞNØÊjÞîk.ùÐø`Q,ʇÅõîçõ¡¾jÝU¦œ ýEd!P{ÛVƶr4Á_èKa;˜ÏQà0}MÀ9ÍD¶úüz¤Â|ÛÉž’ƒ Ž"<ÒZ}«¦ƒ…ßÞpøÀèìÕ-[‰“Y–¶â"Ž®€aœ4žY"ÝnkcÍ?Žkd®hR9]ÌÕvæ…„¼!œ:dÚ.ú6ElÍsa»€­{ʧ15ýh¦ „jc¾KY[øÝœÿƒ×^ “ÓA jT†è©Ú3òëœ9O}Qqšê°>ÖÙö6h­L.ø;¼˜²àŠT‚Ȧ-ÂÔ3ð…Âw현nÆgW«­&*ÎU ®˜òk†¶Œ¨˜¿ÁmYx©‹öÊuÉ­ùapë~ã†þ²Ó<ÞDH„_‡‚逦¸¯ƒa¬¹!ë„=’ð-pØ.üãÒQÌ7Æ_ûêážt˜,´HO‰»ß¬r&iØ…uqêpF’ä¶êÓ"= ȹ„PçÝøïuó»ûÉ$š¨T})‡Ý…¥+ÏFa(m\4JP˜(挘NêlC‰èeןÅS;+s@ŽlH&S˜¿©=C+ˆèBZ1™TnXÕCˆÄ¨ã!,£ 4Y˜ªâ²U”ú¬¶’P·S–I)… ³HLRñí ë°¿¸³®vÁ›çâËw@pnJÓÖò#‘J‘ì7õ.„Ûïaò® L&©mý’D^fÆeåUE¿Ü™z„³z–@ uᛸƒÈ¥-.¢’¸îLúB|;6Z‡¯Š­æ­™j–smŽœ`˜É⺠<Š•À©^‚¸ôn·pß#6–£)BH˜±NÅ<dŽc`&ÔˆùeЙ Õ'9Ü•¹¶µã.4ˆ œ]áåÛhUÁM®Í‚K‚ö$}Õá©€èK`¢ÕŒf+’CžÑ¯^ùprÇl`¿¡aÆ ×̼¹)òÞh QLÑÒ¥1‘-AÛ)Íe x‰è†ÀÉ"ê—F7„Ý\Þžœ¦Yõ£‹¤üJÌQ‘*õµ¡ý:´>M2!€ÔO‡RÄJg(€ié}EÍEKÔpÇK×^{­é*¤ÓO?ÝE½ôÒKíœsÎq·›ßõ~ÓUHwÜq‡?~W`šÂý‘>#lÖ5¥.VɰÙâDDˆýš“êRpŒ‘ØÔýɳ? qå‘êFNb‘7p½Y“6õhEÊFUì»…#Ž'åˆé­…{iáçG„ FÇs˜úë.C뵯”£Ì(Òe]Ã&.1ïcFà}Í¥ý…ŰmáúP “Yº1“Æ·¸š£F(Ë=~†›Å0ÙWûù¼0ßöÑöý,îMŸí`á·7¼£V|Æ Í!i<%CsõZ’Ê)qtóÞIÓ²ŸÇ”p1ƒ*2Œ©-åPi×^çš0¼Ì¡¸Íãö—á’¿_LIÉ$íÔ?†(«#¥üšg”©ÿY/í+ë~ã†þ‚Ó(æÑܬ÷ Æ|[><Œ÷ÕŸÃáù!Ç&à˜¤ÁË*.8²Ÿœór0„¨ê“Ü`&Ù…ÞÓ­è1IDÒPY[FDP‚¸ eêüJÇTˆ?XHÎpˆ~zº8C¹¢€0î9å„04O'ñUD‚£(U’rü˜$)HD·nNÀ˜f-J¯À*)L £H¹F5T[uY·•óJLê+DÝ ™vБů¢¬ªÖzvÖr†ûÛÜ\ÓyŠ:’b#AºcÓðãÏFÛE€r(êV'mì†áÊÁ8ÅJ0N¢m™¾j¤¹¥¶#Ùl¥Ô)S˜(¾‰e’Ω3¨×‘ÚÂèJhâ¤d”‘ ¸M‹Òä*W¼ ®*•÷[ì ¡Í›d%N³Jl:"øÄíb)·³­™CG×Ybã&Ôû‹€œ\¹G`{1³í²:ìè±mO¯wj¤~¯¢=ÅþMŒÛVo-F_(tCØ#ÚÅq}0ìQ9‘Þ ³×MPzõ™KA]rÀÍq»3ØQH3˜n¢ µ68¶„UY†LgJö¢¬$œ_ûZxt‘.4Ò-„èø®CŒ2gš«hu]´?¡ 1‹þè¼>A¨§½d£3þ2ãƒéŠâu_W_B¥Åa€¨§ºiÛÓуæO«wå`Bq@)âDÜîí0q‰"´0„0G:.BçõíÈ ªšsQa~4r}‰¤;oHš=‚™Z»Œú%´ŸsÑ8¦cõÖl°õ;ˆ¼©#.เƒÔî1¢!„Q…aÎíLRb~(§—/é Z0úÐÅ™ ðöµú”3e¬²n¢•rXKqM ª çBSJ°œž­ÕÖ„Ù€ PEÛÛ`¼ÔÎ\1‡­ÂXõäb7‘¢ßÛ:ÉŸ’— #•ž“øñ¹~7Á·f‰ò ÃU„^æºçð£©î˜ŽdY fqgŸ‚ÉnïÆß’1g˜mÌyšÓDƒ:s‘±ÌЧ$k¶‰GÛÙFàž,†ð0„)¸n1ÝŒ7.„w¦ß©¸õ ÇÏ §i„´0ÖŽÙãyg-)ðöãC¨”…T¢Vú§@A-(—{ð‹†*€×>e完·òL_^¾§(sýúõDøkÄG¤Æ®ºêª!»ôÑG³vê†|æÝô àAÀƒ€¯¤e*oö¾p´ÌTüÖˆ²ê%‡9†0hFAçÊ´3 s’&>¶œeÅhȉܩ¿9¾¼²ØêF×pUZÛövkÎuÂÐõÀH'æx1f†Q‚”`wš&ÄVÊ1{Ý0²óöaÒÃ!ôD4ÕÙ„b•'™„ÃÀ‡5 õñ©¨–QÎ_YêØq D:­¶\L^šƒÒ{±ÝND`<²0=%„v¯Â—±Êа[h!|Eo_~j-˜Rr® u´Á|t…ë,CX„¶)ƒ¶¬‡â­ ⎑g§ìÍaœüÑ*4˜è3„ÖŽâ/lÄñx½ñc 戚D/±´;i9 ëx¼ ||ÅXêŒ Z. ãÔ‰éªÍ '&`¬Ê'Yiõê¯@Ã*øä`d²›%Âf¦¾Höìœ'(¦Žû=ø:ucZëçè_qæ™ ÖÜœ²6µNgêÆù\¹qê_i2ÉTøþEnÀ 6Ìa’i4~œNšèÀûl;}N+1M-aœ#´Ðበ>:Ǭ·ã,‘[Ü1˜ÂÄ7I¡XðÚúÓÁ±#ÝÙ"ë7X)ÑZ“}Ý0ý¨€‰ÜˆÉ®ü ZüÕ–Æç2F”ÒðHV4“ À¼G ‚õ‰µŒ¥Š`ª”¾Ý„ ÏÀ¼ï$¤Z+Lc"¤÷«ÝßµÃîcàÁôCu^Ì^4µ/}éKC=öîyð àAÀƒ€7~Ì™K3ööËÞ𪽠=¼i pH1„ JÑÚÒ Ñ¦N¾}hæÒüØSRJ„Hüƒ0wñâ¸ÕÖ×q ¦›5ø¥Et£=ްøóqÀq ÿÆKb6v|£UV¡%„ñ«¬.sað‹aŒ¡´¤´ÈJ ·×AsÖ£Ø!'g_äÔ¾â:5áD++ƒEdM…VëO‚K$ãPÕk­kÛººaÞš–4M%™âb«¨™âú'f²§æ(kÝ2Ûºwn‚á…ÑF“K£Bqî×L|bCÏXŒ:ÊÊëaL;¬¹sÚÁV˜p˜³âR«l˜kÅœ1A»ÚÚ=Ö¶ã㘆ÁÏ ÉMcÚ[6öxžv _s϶¢¹ÖܵÓúàár:ñ_yÝ೓Èy=À3€F´Ýz C„–2.hE{Ôš†‡b#L5Ì%þ“њɯdÅÀ$Š0XRgñY0ª0’¹ë+F P5ÍŠGMsf¼{tð0ú1ð`úèÛ^W=xð àAÀƒ€‡4©(£=„ÐüÓ"ª'çq)Í™€éL‹›Ta“§Oæ|žr4Za²ˆÃ†ÑþÉPŠãÐjV1iA4!Úµ’ÊzÎY,Æ%?`ŠËª`Š`,C†½GÇL‹ ¦˜4¬¼Ý_ßîr¼$€O´¸Œ@9%˜l¶Y¢³wú\ŸƒŒgQ)QÎ! DùLt¶¸ñWÓ}ŒUœSjKË«€°îÝmÛkÔ{ø~ª]2}-.¯F˜€_"f¸ºÔõ;ÌuÅab{Ñ &è—k+š@?eGh—Ê—¶4@ËDG >«­ÔÓC7XÇhI¹Åué,ü‰íî¾y¿Ì(£oÞ^z-ó àAÀƒ€<xC7Ê(š:E•¿_weЂÃI´”Úè1£¦N¼L@E¸ëyŸ aüå~˜œ þ€˜yêYfPŸâËy§¤„ !bX€–òmÞ´‰c`.ð±ÓñÉT»U•»éG®ãP{!á ̦Î1Ô™~ªÎÏ¡Á~‚±„9IAF20r*ÓO]Ê'Gç%晾Jú‘}SŸè9m ‘Gmƒ[T”–¢<~þ¹óaDZ·®´â¾UVm«­–#'d«’ Z¸æ³QìZƒDõl‘ × B¬ëfYÍØ0¦2 ͇î’ïãBû†b5‰”GýàE8Dh:øŠu$X0L%LÏÝyœ(¶8¸Ã8êTÈÁI>œnÜ`HÀ%€²&Lï( w\tÏ€ŽÀ þ Wœðw:ïQ ¡rˆYsçBr/„Y¢ü+`ƵJï(h ïIÙeœeúšo‹ú T{X‡£0†qž÷3¹z?H:Ÿ|šO1Ì}Å`ŠAϳ„ÂîÏRJ÷ÁCáýö àAÀƒ€<x8Ì pèQ¯pbsøêð÷§ÊÆ!ð‹ÐhÅЬ9GþCË‹EÑ'z ‚@DžuùbBÓ#"Ÿ,b¼B„ÿÍ?Íû :&&‹f‹Cȳ8ßù8óO¦¤Å¥ÅÔ©úŠÄž¹r\M®RU “ÁÉÎ~BwÃMì*SyÜ?4oޱ‰Éáç'–¦À&ñ–{æÊpù`VЊåKQWh-0PP“¬LC9v"k'bf§x'X'Wœr¹¤c*pã㈠íÄç­+\M˜ð hò`ºÐäéh åUýý­ËÃ%ÿúЙ˜$¿c~ò-/ÔÇËŽiTûùWÂiõà Ÿwp¡î]åw}§Ž).ô¸¿Ïî99ùâyN‘ýI¥úŘK£©h¢ø<úÝXç3¨|×7˜9™¨rl,… lKÏyß1†ŒY"ù¿ŽTgTŸ1º sE÷óy4vúå%<xð àAÀƒ€‡)†P \1~}‘íp  à·Wf£Gcâ—9!ìÐ0¹cP`Ò†KŽ™È³SdéÌ iÂ?0Ä!áf0ƒq´_cÆ¶Š Laƒ#hƒòå _ŸÚQÖ2\›òLÕÞe8Mi1‰ìÁokoÚÚ.£-…$Í™’Žeèâ0ç^‚Å$££,6þ${>‚•uh¹t å«›êŸ4wûJû‡Áeä‡pø²óχi7õ/0øÄðþJö¯ìáëV1{ΕÁ­÷~{ð àAÀƒ€<x8t 0 Uýæì@¿´N:ÁŽšÞîÌ0Eôá¿WS« §öBãe²ÉGM%L~­ó;ƒ(3ÈšºZ|Õ*0!Ü­¡:x©kÛ*çÛüù’½œ»˜·_Ì—Cè´—E!‚ÔÔp¥Þꪬ¦_Ëjüႜ÷‡6ÏK<xð àAÀƒ€<¾8¤‚ÊHs—1“†¬ sš*ù–É\ð 'ç#&?4˜+)ÜTƒ˜Â¼fïà×w@ͧ] D“&HJ2w”úíÌ)Ñžú¸”Ef‘yT§÷ó‚€Tæ.¯±<xð àAÀƒ€ƒC6¨ŒX2gº¦ŠRbœót0 ;ØeÀ˜üø²áûè%<xð àAÀƒ€<¼R&£¯¦ƒÞ;<¼1ÈdòGŠä1ycêôj9ø(ø.ëS^ò àAÀƒ€ÞÚð·öøz½ó ð†@@Ì`OOµµµWQyeÖ½—óÒ¯’W1€aŽãÑUV¦sA‹^}aÞ›<xð àAÀƒÀ!!<$†Ék¤7/:::¬··×]b"üIŒ…4L^:´ F>™LºkÇŽîlÖòòrOSxh £×Z<xð p@ð——Ùƒ€¤óÐÝÝí´•••.Ú¯Ç  th~†ˆ`F9Ö'd­­­ŽÁ“tçsîGiƒ»ººÜ<Ðï·r’€£ÁG0ÙŸ$I`’H$,N{šó€öZ`¬y˜J¥<ß7âQÁÔ\k¥¸¸Øá’ýjmh?)Œ£öÏÊä1Åô9«—¬Cãú¡Z¡±)à­»‚ËÈPy½{¯ IaMiŒögMi=ißÑþ£1j=?~—%Ǿ¶qòÞö pXC@‚Œ‚=‰HÎ}:¬ArÈw¾ÀÐk<õ]›Š6£D‚ˆ7]Úl4´Ù( µáòéï€6àBÿô)Ø&ÃmÌÊ3F‚£G4<^+ŒoÆ#Ãøõ~ª1Ô%‘ö]Z'Z/Ã%[Ap¢Oý.¬µáÞñî<ÖÆª÷ø~pMZcº ø_ã¥ñ.¼;8¿÷ûµA °žc­£ýÙ{4>Ë-í=í©cæ1„¯mœ¼·=¶"‘¹¨¤ŠCi[À¼:® HÒÈŠŠ çª ¨ Ó¸wvvî’5×ÕwiE‰ÒÆ;0‰Èճ†üjÖÇÀò·ïa,¦ZpÜŒ®‡ÌÞŒýñªµR  ÅîøäÍ4n/ázá6i š¿m,Wü?oôwá8Á]Â0í/<9°z&œYX{²÷ A] ,ÚûîAÀƒ€á!0˜Y>çÞO:;’¶ySÞ¤aï§{ßIg† Z’ñYo‡ÏýCÀ6býƒ!ô‰Ió W\i6úi³IS}öÁ§¬6ŒÏ㾘»tÀ>ê·,Iì >ÙgcŠsÝß›n ØöÕûÓŸ|6¶&gçÎ3¹w_¥;™Aµ[ÌŽ6%1‹‡+^؈ E'mÊ’´g¦38¿÷{o`,i·ã½ás(Ü~a*†Pš¥¡RŸh¼ã“Í›7Ûï~÷»¡^{Õ÷.ºè"khhxÕï¿•_,Œ—ú¨qŒûu_xM÷5®ƒÇKϽôúB@0×þ«ñÑ>38 WꙄ/C­©ÁùþöÂÐð¾{ð ð†A ·· )Wó>7g!=Ù줡Ûó—A“·q•Ù:rŠÍÊ, ó–‚Ù°ÂlÑ“«í·£/ÊXE]ÆûÂ|)¿-~"d¿þ~À:KÍf^ž´Ú¸YÔvkʆjL6᳞6Ÿ­Ý@Ý->KgÈUxe_LèP¸·KIG9#¥|ì#&WÖ>Ê®I¥­Ñfs I’Ë‚F`¤wÅlŽ5jP <»»`Ò“,\Jêj(š³0𨒢ÁÌ^Ʋ‹1é“€ ÿ±Þ‰–ò¢HÿÍyzüÖË"c°âòŒ…0JFÕì•ÔFÁb(¢I›²ž)Ï&•×ÔÔ4d¹¥¥¥î(ˤ¹´Ï:ÛýÖÃü/´ÞOŸÔçÒ¸´Ý…»<çkÎA‘@9Ì~&7‡tì½ý,Þe;˜0Þºu«ó‹RÁ2ƒ®¯¯wŸÒ‚ÈôMÏ5ç¤óRSS3¨©yàôu0ÿúçR€ùVZ$XêêÞ°„xŸ ‚'æ×ÞsrP±#üÌaýèñ¹ËL•—1§ü»Çn¸Ws˜5¤“~Ö ^°n•ÂÐïi®ø­ƒõ‘eªC9L 3ý=®ô»¯1C¨õ08i®„O6mÚd?øÁl̘1¯™ùP]7n´N8a=Gø¡¯WëÞ¬/Ëü(üC\ÅÅipã*¡#x¡·ïX¡””0ÖÐà~½~¿Úõ)†C¸]ûîP¸­€óFÂm‰D†ýzx(Äãy¼¨²ä=T*))p¸ƒ×:[ý–eúËj‡ÿ{%=Þ²F—íUÐ7ÜZìf=öR7–FåeXGýÍ^JPVXSÊ ªËû"‹†‹w˃€¯?ùûõ­¿¶uëÖXØúõëmüøyöá¿Zlååc÷Î+¢L&Eœ6‰£ß‹6p²Ùt4ƒ4ûýÃf[šÍ:»}öã[ŠlÆÇ0ó+™¨ò±é±©TP^WtX‚Ї“0ª.ò¡±1úD¤C ¾–”†ÈOC¼clxù*ö*.G]Yˆ–ÀfªYvÓ¾¾‘ËÙ«à7´ ˆ†·÷ëë7Þh×_ý>óJ‚ÿøãï™ð%·—ÚRb/¾´‡^Î?–±æô³úlÞ»öáwµ‰^Û•D·¾Ra7|+n/­òÙš–ü#½sögºlî¼^»ð0ôF"bÏß]bß·uŒÛUÿ²É¦7d,6 ß« W°ŠhÒ=={5L³LÎ;ïóûô§?=àÎÞ_3]kÝPd7_Wj·?‰`:\](b>¾ý‹vÕ%mV‡ íµ»“>š@æfqS¢;ˆÀFŸ±ò’ýoËW¾ƒ ci†þõ_ÿÕUÿ±}Ìžxâ »ú꫃pñÅÛÉ'ŸìæÜoû[{úé§íÖ[o¢©>{ú¦z[¸"`«ºrvÄ…möñ :-#¥$ÁÔ376ÚÏÂ7Œë³±G'ìïk¢œý»Õ³©Üî»%jþCØ’'eì«_n²ÆŠô®±®”T[ÌVNÛ$!ÈË/¿l:ãöµ$™@Ž7n¯"ÚW–ÙÓ÷Çì‘¿„íñÅùÇÕsÌ&Ïí³ÿõ6›^G´à¦{âþ"{ìɈ¥&÷ÙÕ½ÃjKÓn]íUà›äFa}f|AK÷}pûpãUÀw…qª»O=ÞfÛ·õKìe…|ö®÷Ö¡åò9!Ìm·Ý6(Â\ŽW]uÕf⹞¨ýæÕ¶©ƒ9 /ø2Å IDATG>²GÒ ì€©ÿ+ðö„º´U ÉÄäÖâ¯cvß_ðã;)e_ýJ“U!ˆ,àуXÕ>‹:˜xq`e¢+¼äAÀƒ€ÿSüüç?·dbT9ê¸AI’Ê¿ÿÜíöÀ}]hþP÷‘ÂွçâñîsPv;å ;n~ƦÀ¤ÞÎÆ~CÌnûcÀ_n¶ë£öËð ܲ…÷†íÆŸ™í¤ñmò5<õ²œ]yYŸÝýÕ°=ð°ßVs?Ë&óÝOÅíêk{mJƒŽG£öí¯çßÓ&C9ÿ2³¿ù×øíûoçZ³_-n«× ¡¤ I,?þ“¤5;mÕÅhF¯¼¿Ø¾÷-³Í¸RJN*Â]e]vy¯3’ÌÙ²?ÛM7˜-¡¶>Wl¿ø®ÙRÀ³žF‰=÷“f§Ÿœ´y3“pR¶«œ•ëÌZÕðåœD9•Ý|ý’ˆï£>ân¹ýêæ%vÙÓmŲÛ²¹ËÎ}Çî½b7ýòKVU²)S¦8]æbNSˆú"ѱï]^jÏm X+ý)$ õ—<¶u+C¶ÞîKwX4DDÐöˆm~ªÒ®f,¥!”V§ôÎC7ÆíÙûböÀ;"vÝÇ8oû⦠~{aãÁóf$ýú‚ÄDbT>þñ;‚WDëücçRhëÀO1ÔþóŸ]~Ý¿å–[\´¹þð‡.›˜Ú‡·{éÛó´¿_)#rPÚ»Pl[€Ñ;Né±sfsdL¢È¾ze™­Ùæ·ÆY½öÑïî°Ì瑉„=Eöƒ/–Øâ—–ñuÍÍM6Í|˜9õfKÒü ÆšK .tÍ{î¹çÜo 'ôü©§žr‚)1ƒ§Ÿ~ºvÚiŽ)×; ´;åX»¬Cx¼—^ðÙ†µ¥vñ9V ® "ôIvFìO7ò XO¬ÏØ‘cLÔÝ…ì÷·LKͶmñ^¹7`½×€"%¥ÚGÊv¬msÐ^Xì³MÓCv.°¡’4É][böÌæB–ÞÖ õ]N{Žãàö_zé¥nŒ®¸âŠ=½ôÒKvÍ5ª?éùà«z¾k-nc},Ù„°÷~¬¾ÈBÈöVJCøVM¯/Q¼ímo³ûïÛaÛ¶BI ‘ÊËGaZ"²6O`E" v2DFn…!ΈóaEÒ’A¼p†Ù•f!ýí‚1ÛòrÔ^Zà·;ƒ„r‹Ù¸Ì<Û¾Íì6L¬6ç8„ý.¿ídãqÖ<Êý³ÃÖ‚Ôñ¿¿¦iƒJKËl¥Ìefwý"³¿š˜'¥³0;4ûÛV– ª úÜþë aK#g3Ž Xý¦}íÛ>{b¬´s°QäyàfÞÍ…mÎ ³Œ5ûúw}¶’ö×OB6 ñôåWf›¶CÈ`žöžj³/~™<”#SDzF6ÅåføOÎå!{ñì }€{…r¢ü³hûÀr–wùíóçÁù¾ŽiÇŽvóÍ7Ûƒ>Œïh§=±­DgB,m¿û}Ü6mì°Íë­~ô4çÿ 3á‚™Y&°ŽU¥öLSÀš»Íj&¦íÝÛas0ÿ}üæ*{a©ß–möÙ ÿ]lÍtØ(l†›×…ìßcßžïÔÑç&l>}œQž³»¾QiOnôYó*6vê~üÜv;¥ó:M±^Æëu„ÃÀ¢Ú]DÏw¿û]N\Â-[˜”¤YÌãIL§yìÒw3íŸá’¹íŠ+ìßþíß\iºfÏží¾ëO Êšõ{næ¢ü¾ìŸ[mv#g†íÚÓ¿-·{úlá-q›OÛq0„e”ŸÚ°–-~‹7âÄüΓZÃ,.=Vâ½4œz¶À¤0ç|âù‰¤?”’h´õV3¬ƒM,]ºÔV¯^íÐë®»Î>úѺ¶IÓªß?úÑœðá”SNqLƒî=ùä“N#%fQBŒ/|á 6mÚ4ÞË÷mÜq «Xµô“AkGаºÂ"ˆÄ$Ñÿ6Û:<·ªJÙÜÉL¦×bc:ì„s#V4&l™â>«Æ²a¿˜Æ=¾3«ÎÌÚÔ| Y·tõ0·„£Ò €½Lí`tÓ½Œãó0à•=RŽ<}L„ ,)(ÑM…=r¼2÷÷dÉÛ¶m›É¬ôË_þ²«à›ßü¦=óÌ3ÎÏíûßÿ¾}ö³ŸRÓ.V.•Ùíß ÁÔ3^àë8°½äï;mLG±ýæw~Û¸<Ú´[þ³Òޏ˜¹ìÀ)ÌXãùÄZx=R¦i¡áDZaÌ‚bL ã¬Æ XŸ-%øf±>e@‚éÇæ`wáw_P2´—5ˆ„/þð‡ìmçž`_|‘ÓØJ\HÏ>û¬ÚH )<(æ±,ÎbN³¤rŒÁŒs36:¦‘,$™ú§mœ` ¸ó`'­ÅyçF-6.hò^+æ»ë>°Ú²ÐYæ·a^•EP†y©ñ°d‡É}pn¿QõœÖz¥xð ð–…@ÓÖ•X™=Ü¡:«óuV¬X‚©Ñ$uÉlRW!ű4аˆ”ɲ¡´ø¥ "®„\5Ñlö±òÙòÙ¶0~в­óÛ²VBŽÁ4$©bâ´Œ•øñ…Òë€!©‡_á19ë^£㸈wW¬ƒhÝ_+•õ5³1“±j6ަd¯,„È@bÿÊ*? Y%¾Híð¬0ƒã§›“³Õ/úlüÀjÊ*ƒ™Å]Ζ®†`cÓŸ ã7~ ›ÐR´}lY$ÜÔÑDÝ//†Ö£5õfsÎZ%ϯ3[þ¦¡0€o‡B+”S\7¸ü¯vk+@yߟy¢íJw§­]÷"¾£vùØuŠ‚&UU•Y]]ÝÞæapºén "p)uæžsa{§¬¹-dë6ã´9`ÝŒk²'l;C®Ï*·nrÊ&ÏIÚјˆŠ!Ür|ÊVµã˜ßŒÖVŸ­Ø¶ªu öîMþõ$jÕ&%™FÍ›7϶b€&1ƒ³Ù¡gó¹‰Ï…}ô_LãÚµk]Ö‰'ö3+ù7Å8÷â¥á BgÍ>¶×Ž—±ôö%V¦í±EAëhâ9k ‹L]«ÑQ®ÀÚ‹¯ÚÆ•QTŸº1;Ýî³–êëoT´¿™êœÕT¥mûªu“_ÏÒ¼¿~yÄÆŒå¬¬Nü|`’ü˜¨“r¾<)´S;7!LáhœÒg1˜NYOwnàÓŇ‘Ðr BøTbž7ztÒÂh«÷ø©¿]#}ˆÖ¹]ÒøIóZ&"“« &¸ ?:k³`J¨O1‘ò)œ:uª½ð {1EõI«(Y5ŽÈm˜$o–Ó€Kƶ«)ä?¡*ÌÍ+r6 íŒR÷Έ5mbAÌàYF_Ë1ÿ,V¾Þ­]ÎÑ ¼#ÿç Äú6`ß0³c˜¬ æl†:ÀEóR~oÝ­aÛ°ž¹.|Ç=Г©Ìzà'+—‘Ã<|dž°­íô[+šM¡ÌzÖCY<—× œë…ï}akÛî·–Œ)tyP~Sõø#b\S™÷3ìFkßJ;wîôY7ýF1;¦ÊSWË‘D…ÂÔŽƒäƒ\ÐòŽTœÖÕñÇ¿‡ÉáùÁ©Žˆ-Y™1×À\™vøa xyñË·@ž  7½¶îgÀõ3\‚uÓÚˆul šò7¬˜‚YϺy+3õ´Id]nlÄÌ”¡¶ª•²(;ݰõ+9J€ñ ‘?ÃÚhŽÕg¡î°õ²Ž6#àL%()¢ìRV­ Á4Ù²Ƴi=c#FÆ®¨’<•Œ5B¥ëËu·>DZö")ütû×ÑÁ1u–ïuyuÖF7¤,Â\8I{¹jrÖjë7¼ÌÞ}œ‰È *›H‰á3¸—É*0Ô.¥)&GÐkGˆ¡Ý5Çò a>½‘LÀúÐ’oÚÀ0‡Aò€6­]RYR%B²*æp þµJIæz B4ª6‚‹[¬¡2Ÿ²âQŽª‰œ¥ÌZ Ÿa­k ËXeû8V¿ó­àTWyé‚Û¯A1V=!mµø4†…ð$4iÛ¦Õà ÅÒþ*ðfc&³sM±.ÖÔÆ à p@ŠÊ5þÌúzæþîêËÁH‚¯ÜqÊËËÝX©L!<õÊð àA`¿ É_RŒKA‹Dâø³e0ߺÃ~vãõ6vì»î{»×Ê¢€º Hd¾%ß²øÇÏÛçÿþ<+§ÒÈ7Ìß ÄŒ6n%ÕÜ "nœ`vÞ;Ù`Ž.yWÒžX¶—!d׈8&²‡~°mß Ú†8¸[6qkÔÚÐ(žÏÚØôÞº;‚¶`YÀaÒ•“0³Ð5¾ŠPøÜ ½6‹º¢˜äýøm1»y“Ù“O±áCÌ?–(¤gúl{æì9›ŒÏ]wDl™Ê¢I˜ÅëØ(Sˆ:F;ª)kÊÇÌ&@ØUO„ižOüÁM`tkN£œã³ö‰÷öYÓô˜}î;˜=>ó¸‚`íÑD96ÈÁåL<’‡¯sú§ú''ñ]»v]pþ…vþù@x³ñ‹†HJå?Ç4º1WP]~EÚû}0c“ØÌ§¤mryÚú€ÍñïïÆ¼3f[ÐÚ´ìA¦‘]›‹lÛ†¨=×ß§·¾ÍNžÙg3Gå'Âù_k%ðO¥` —Áø?úH‰]2•]ú NZ…ã)D@éw!ã –BuûŒÙÿBh0%L´&”d^¥  :¿Í%™ýŠÀ'ñ¬ŒM…yS€…0f§|¸Ëî¿§ß7Ê€ZiÞ±G¾Sn+ލçMëÂvÓuuÖðÝM–y©Üîÿmm®Ï  Á»E¸qÒI»à¼N»çú2[²¦…÷0D?ýçQVþÉ^[õlОy$hñãúì+_Úf¿µ1ú×»‹yxÍO7Ùd˜ŠÒÞ€=ûûZûÀp0†LwGè{iÊ>ò×ۉЛ¶r²Ì$Ÿ@ÁZZg™¬éSÚY1Ø‚ãwÞéÔ+¯¼Ò.¼ðB{ä‘Gvi¨¶Ã_Ým“j‚vJyÄ~Ï:^´¸ÈæÕw[yà y|T}RÒF‘¶zæuaÆÒjíçÿ…ßẼGð<ž¾žŠ¹éüã{¬xK™ýû'ã;3:ðAùÚÝú€ÙgÄÜÆzå³aó Ú 'vY p#öâ=5vý÷G€'Óæ(S@e^ùÑíVÍœV’`,±³è_Ä,‰YúCà=ùð÷wÚIG÷ÙôQdØ+‘ƒ6o|®Ü¼-bÜó)ÆIìÇôÙ{ßÞn¥Ü[ú`•ýåž =üD.]®znÚ¦Ûg—| ™È̪ïࣘõo}ë[ÌËíÌ3Ï´÷¾÷½ö©O}jW¤)”†ðÚk¯Ýuo¨/Îâ`uÜ^è8óô„þÁN;c:ÈÝúG»êì¥ç¢öý_ó¸uã <“-f¿ù¯[®gb¨Þ÷V{ïé=ÖP‘Ahˆiã}µnl0„p§:ޱ¹äC;mR)‚­UEöÿ>Yd5—`ªß‡ ŸÔåì«÷l¶È‹å¶þùˆÝüK6À4 LGÏÎØôSSöQÖÆ„‰M‹Jm邈ýòaã”#76SNNÙqç&íÊwwÙ¬O £ÖgÝÿÛbÑíEÖôT©ýðßó~xbxJ`6æ^˜²«>¶ÍލÊô`9Úß I{4Z CX·t%æÚ;Àyyœ¬<ëÖ­sªß{ï½.Qá½ÁŸ¹SßµÓ&ó€îïJ>ðŸ,„’-E¶uUÜ®ÿv‘ùª`Æ™ÒŒÕæ¿füÙŸèbê¶ã§õ²¿"ÔYXn÷þ6bwß… øîˆSÍÆÀÐ%Øgû;ÿ›ÛìølÔV¿³Ç„,6!`'Úi¹æ¸­YµŸâ–¢zàmÍ,s¸$àºð+ö~üwª äiÍ•öÿ±µ”KÌ97no¿¦ÇÞÿnü[ëaøX·‹ïÓº&Z9x4éÆÎûRö¡ì´#Æ÷Ù(æÛkMbESÝpà ö•¯|Å1…*“®{Ƀ€o ´!ü÷M+\e¥EgÛeï?Íþóߟ‡žÃn %D’Õ¬Y³Ü§6ö~ðƒî·žÝu×]úxUI„8Þ&an¸ÉáRÌB_`sÿý"–ØvAð `”rwNT˜ÿé¢üEÊ$„Më (á»ÿ3‚¤ž _Œà $‚|Ô¥fÓØ!j¡è“HãÏx¿Ù¯~˜Ï¨èƒE•HrŸ ÙýkÍîd[óK‰ö¾È*ÉãÝ)g¦l޷¶€ßOü™ í!,í\'^x<’Dšª[›! ·>ì·‡¯9†2•¯Í/%Ó–y;.p ÒÚÕfßÿÂÀr|Ÿ$è¼þI¾¢7Ýôk{û¹_±SO=ÒÖ®éÄL8¿`½ÝqÛ:;íŒz{ü‰{]Pýè¿UØø»왥EvÙ=vÂ{r6gN·?§Ã¾zi³Up4Hš¬í¶ý‰¡°3ŽG›XH¾pÂÎzÒÞ2æû wßøÏ¦¦&ç+#SFIË5÷ Awîc^meÏ¡7‹ÉíožòÉäQI&Žò±”o¡’`¤9î4áDÖ½üœ›û¶œEû1}zÂþî7›­ŠH£ÑPкð£meb¹yÏ;i*hy ­àKõö??ØŠWИ2wf‚è1ž-ä|ÍÌ­·¬áI¤×0CJ"X[^b¶£y@£´h9 Í64’ŸƒƒoM"•ßú,šÏvæd綸u®,¶(MÒ¯SßÉ\§œ•w›={+ïU•Ø»Oë±wÌ›zpÓ>ð=‚ ŸHÀôï =!)xÉØ±cG®” cçæìXÖßï~ê³çñ~ÿѽ~/Ü‘u>Œž4­9ˆýŽêìk×l'D`Q9–sòð|þ6´‚‹í©aûÊŒ°->=¿ A,³6wçó*MÓ ¿=ñ8 ˆ@È:ÿš26WÙâg£öU˜A­ñ#Ž%?Än'ëú±›CÖ”+³ó§¥—¦ˆÖÞñ(fè|—`én4Ãnùr¥m½¼Ó:/í¶‰üÞ#Á ¶/¬³/-b[šxSrÄt½Âøü.jËŸ ÙêN³kgEì»ÿÙ:Öðæ»?€/ï/‰êü`Ð6¯Á/^jÿrQ‡Å´ßd)~— ’ˆà©õi;s“±?5ž¾ÃªçùmúEy¹¨½Ü^òïOÏ>…öt‚ŒÎ6¢Éùí?VXý%íøÆ"K.)±¿ÿÖ 7épäiyÜ}Kȶô–Ú{ŽõÙÖßbÆÛ3Œ>ã"F3ˆYgzeýòF˜¼¥-ÐÓe·KpÞ£À9Q[fŸ›‚/5¦ð/-!? ~òÌüج}:d›W„lZÀ¢ë3G;µ>Û—UÙƒìÏ·³îhӌ׆¿ 4ÄBâIÖ]GU©}ó²v«d¯:X©ƒ£¥~þ“…»Š#rÍÕ·ºc6® ÚÚU;­­“ÉÝŸ,X`¯¼ò f¥¶_þ’É4LênØ/¿ÑhWëÅmtHEJŽé´¼­-+þ°M [Ä3­'>ÄGMâ“ebàc]JY)×#Z*ìگƜFV¬i?«5[Ëwù$Æ«ˆ¾#ds2QÛ°ì³²ÑÐ~ŠñCKߎuÉóÀ·POc9žŸk`Äïüf©kLج©+]^iû…¦Õ0Ž¢'²ÉkMÝÿc˜ôö¬Íž–µ‹F‡íë×cÂΜ:—öÂñjü_ùcÈ~†ÉÐÜãƒöés ;•¼Ê$Í ˜ÁÅ‹ïå5?Û_e¡Þk<x8Pì–†0é 9 a~[ÚF³° Á— —L%½Wš9sæ>M†Øÿœ«Œ¸¤ ;ÿÓgÏ=¾fó(›‡á[Ỏý}›âÇÞ“°–‡ÐÌ-òÛ-®. 3îÞÿ-a¼‚sW< ¹“Mø½¼÷Þ^ˬbS×è‡ì¯ðÁ¦Ò×}-&§ÜÑݼ%¿ñ+K'›û3¿Ä¯kæ[gCŸˆOŒê†Û¢»ÊÒ; LÃNø8~޼#)s"|!}yv…Ù‹÷¡}¡½3 ÎP,Ù„sÍŽš™µKNì÷+Œ$ &Š`’ªxwþ'Ø„('Ç÷¢=ÊÁ4‰@-çý·5æžæcjëÁLò á%ûÖw®°'ŸØ`GÏ-Ál.f‹^Þjíqîsg3Lv7ÃŽŠ0q›¾Ó>ú:»ý.$ºÛñù„ùxävˆðûãhcÛ›xNÒ>ñþ6ëÜŠ9-}*$ÑrZÐýr„qÁÔŠYXÜ üýšª««í§?ýé>„…ŒšÎxAL=™±¥Ÿ¡§x1¡c ÄÒ`Âè­6ÿŒ´Õ%v K¦ ³²ç2[ü4e „bôåV›€­é±³ýV;.k‘ꔽƒ M­(³—ð3Ü€/åíÿ³÷ðQUéÛÏÌdJfÒ{O€!ôÞ‹XPÀ‚ a\ËÚw×µ¯ºÛW]uíŠ ÅŽ¢ "ŠÒAJ :!„ô^g&å{Þ;™I!@Âú¡çýergî=í>çÜsÏ{Þ6ã…TÿÝ;)¯æî÷õÏä!Ù€Ý:%¾\HHQå’'Z¼>r^ÎUÑö°b¿Icfä1 Œ©Ft$?FìáB¨GR âdUuèÑGõhHñ2ŸHHƒ‘#GjµÉ\Ó Œs ~H¼^òFÉ&2EG¬(æ3¿M¸-ÒJ9’„ÔPÂ0b8™9‹ÁQ5M Û;üñ 1—0'•\¼Ê3)R>'q K¨Á¹óè7®q^8HLæò¡~uú:ørsj,ÇEU ƒ›(Õ(cˆœíY”ò¹–òœT5t“0…—ÝSˆž½œ*óÂçwc-¢»h+½R®qî”rd{ÉÈlx™ê¢yä©nK'X7ÒQSöʼ»ˆÏÅ…ÛßöÁáßS‚UÎô,ßFu·ðîUˆžcF:ç#øS‚Ïû[»Î$Qã½ï¾û4†AT ?ýôS­?Ý6„m­Kó±›D=YæW7éé”Ê›óC,U`…*¹pwߊoúWúr¨R³äã¿ùc+ñ,/§ã+öŸ…Ò«±£ù¨²oÆM¬†Õi×îòÅwÜì«¢£ƒÏ†8#©e‹fGoöߨó+чê¤ÖœPÔR]ÔÎr¼É˜i˜Î2!8—²ß{FÕa+™È܃T³¦jdÜ('n™SŒìCðÃj=Ö1I™o£ϔ âß>HãóYÌt·½’ƒð¬îðyer«¡Ý*ïϹ–sÄ:/ëÎM#™;‹DµwÞ¼Yxê_¯bûöít®¶¹G h 1B«bÑ—Ï ,Ü„=õ°§ÊÄÄDüðÚª¯G;Äsµñ‹£ŠÏßiîNá%o*„ë‰K΢ -'zJ Å©–$™ÁñŸÄñEú½[isÎþÊ$“—ÂMÃ6+¥‡Ä“jÙ½†80ov1,”0.xÆŠ”½Ô¾áÆP@“>“ñ.’pm~ã9íÙm¨çŽ‹DJݽé|æ-Öó37 öerÙϥõ#ª³Ýác˜0º#9ÿJ¿=ý6Я¼aÜÅM³ ¨ Îq"kŸ 'bûÐ&ò:öñ™Š¡ÆGB+ê­’´´4M +Z)nêÄîw©Ž îf—•bá»é¤ÉO£-Dt‡rŸjÝÓ¯™M—´ªC P™Ž‹@ Åiß}ÿºæ:_t×›ÒBª' ‰ —ìÚ‹êœØþˆ¼$„´ã‰þýô‘­7à'¾ÐU^Xý#U5È@XƒiKw5߯+(!SWÎy›Øð·°—‹3ÍùÂBH›üåÈÀÏËi3”J¯tGÆ€EtåV»¿•6ÏmXXÈ I[ó slUðw…»GprçBzñf.ø8Ÿ'&Ö"žï悸È~ˆ/Õvl:X\pgðþ™FʪácN Ð=\°¼îÉd {òû^2ƒÈT2/} Ûp¾«¾ £ÀúÊB¸‹L۰Ÿ¹«™Ïú¸ËÀNYÐlm('(Œeq÷±±ª­Ê"î4“xl§AÆ]Þ}bðã÷¹TU ¤DÆ×ñ…·ßYÈsÍåx³æðån°côÔ˜#è ¦@£Fr×j¼Ù‡Ù‡ô˜ÍÝñáT«‹åÛZbº;D¾Éõ.Û¡íÜ.¥£•¾ ¥ÏL´ãÒEHšÿ!ȆpðHJ€ÙO=¨âÄÈ›Üh Û‰l Vºö« ç[ªfEË"•c½˜ÒÀbÚ¹2`çn2h‹ém®A`´ýÖh*ˆt;ont¡£™À,'lÕ&8st(àNüºãßÏi±kmLIvb¹8  $–ëZÔ˜ø<ÐE¿ïnâH,Oͼë(§…ähòeØ„p;b˜;›¿ÅÎ57 :íl ØÉ\Œ{ñÙ=$‹TÙw“{^qÿ0`€¦®ìþ}¼£ÑŸÞ}£+‘lò¦¤‡j^›¼5IÓ6Û·Ã7a 0Ñ6 P“¿Þezæ˜%Cf¨¨ÃA>Ûn¶‚‰Ž«V9(* á¢âÙåT!¯…}UàdEÛ@’ÆH‡@fK LÅ&d2.ÚÖÕ^|– È`Nlê IDATt¯›@'åÚ’khokGÿ>X¸PÍÌM”õœÿèH*OôãâÜ¥»Žuuœ×¸='û½äˆëWrÃd?7ŽØ?ÂLɹŠ`;"õfM%²š÷óóJ¬dDK9Fý8ö„1’öv&É&‘ØÜŠê¨,hÅ&T˜Äö’àÒÔ¡‡´³ Ìœô9OPº[Xà…Zêì³+µùC¤Q+0€ê¶I1õà#‚8Žÿ>ŸE.Ç3~6ö7=g5ôŽ ^ß!ÚãþÑ0Ñþ¹ÊJìéÀ„‰ˆ§ä¾tsb¹¹UÌÖ”6`êMUC-æ*ú˜hO–{ˆ #û¡Ž\l9l­ãff9û+RbQë->Ìg9‰d(åùÛRy>#LvDù™Ic7yî¶­6ÀÆù°ŽjŠò~éCû4ar:‹êëKˆÉ2¼9ÿi>|III´oï†W_~]«b؈áϹþ³Ï>ÓTå¤h?Èû^¼ËÌ!#à䢑þjIlÔ<I>þ`~í/éùœhƒ”'dÁ#ÎpÜäMUiï.v$r1XIûÆ,ªL 3#‹\‘ŒtŸ¤G·(—£yŽ;“–.]ª©#»Ë‰ummê+¹ÕÆ][âÝéL5°øÙ1€¯§]G¹ÀüÙõôßC—‘dª¹pd¡ZGlò÷‡È€UsŸ±“±ìˆ53!ñú(Y©Â(³akoq±¹­ænVQ<‘M'zö‡c3°Ï¬”Ô“y “]ÃvÅ÷!3´Å5?Ó9!Î/d-ζ;Éì’Ù\‘I[ßv$ÛD)—Cúøj$Ñk²Ø`qRÜÍp Õ”nR„uÓ#qƒ›K]­VÒñ“Š÷×5kÖàœsÎñ0îžÖ-é•cSMw/v—ºÈ •ÿvF5Þ~ÄGÈÁŽ,¦SªÔT3q¥ä(KäûL˜v+oJÙD’iLÇ{i|™}'»ôÍAöÍÑC”@1“iD»%y±¬P:ƒ‰£”ÝDÜù\$„p. Ðc7Ÿ S.Eådá;¬çÙ¬P˜K6³ŽïƒÒz|—þá«åC`È¿2Îw"Ùr‘ëù `ÅDQ"L‡H,{ß&ª/ò™¾KDZуóªƒP2n;ƒ‚øŽ4¤ÿþ÷Ã,Î÷Þ{™ùÁ˜1ã2Jå?ßÃáJL~?ïŸèÑ£‡VeFFYj̾ÌÇ# ç÷s®-Ã`j…ÈûXž3aÊä¹´ñÁawxÈÀòá9ûŒI4§Vmbá/Ž]m0¿/»„PòϮ↖ƒÏ %ñü‘í)é_XŒŒ  4›‡’VŒ !iŸÛ”²*“}KàÃFjS°“|ü˜˜>4ið¢V@·@3JÈð–rÓsgŸ)ö¿öL Ñ#8Œ <ËÓš¯•ܱòñ±š0›š"dDÝÔÙϪ»Ü3êXX¿þùvüëÅ×5Ư /+—|©Ýƒ/Ÿwá%¸äÊ«Ú}O‡J¶á?gµ;_Ë ‰ÃpÛÐwZž>îïïº[6m8îõö^ðáüé7ßóEÅqhá»oiÌ ÄŽë(½òìÍô|eÄ o¶ý¾¥®…ï°î÷?ÖâÖu´î»n¹—]u9æýáÖŽ¡òµ±•zä‘G´”â9ÔM²Ø[ž¦$Ææng'#™4ù¾ÔÈÌÌÔëjpù\'qç¾è2îèrÇÎÉ*¶qˆncªAÃhÂt¹|l¼H',7Ö"º—Œ3`×22W´oý” 2­q9€ŸóÃï]„ÜÍc“½N¦²»ëý"Ëú,h?ÏñH^M£ó5šc€Ìé&Ø8ÍìcùúÁ›/®Qã©ÕòŽ.¢¤`8=:¾êÀ2/KÈt®e:ùÉ áì±µ˜~[ zÑQÊ_éùæo±œ àË÷´$°±m—Ìsâªkœˆd,³‹XÎâílÛôŸ\iÜåL½I /×ÕÎÿ?dX$ƒwÇ‹ÏmÆM·Âöm¹Ø´! ×Ìé£Ù’J8ŠÁƒ­·ì(C)¼G¸ÇäE†ZÊ?ñ— ü}„Ï¿89.´2 Hÿñ"†:Ñ÷:š^H ‡v2j^a ¦Z:C¡ûúÐà±Ö\‹†ÊYÉ 0 ï“E/— ©šzÞhª‘Ròu:Hbp¶F†Å}Í}l™ÎD†"¼¯¼!¼µ!»#݈¯6XqíXÑ×#“ÿy¥¨¼¸Tû‰ç_pºúDƶìñ»º×uÑýŸ~™¬Ìu6¬)¥ííl€9ª6ÎÀ}ïº6å9uByt$ÌD1U“®¨AÄ…Të¶òHLWSÕ0…/ˆ}Yz,üK¦ø±z[tÍ­Äã¿+`ßø"“6ÁY…Üdä†PpjÈ3J’~–ç³”¶‚¡ô@}VŒÃ}ˆ¬§c¢%´ݧú=:|ÈþšDÁÄrÉ?2ÅÇ1&,§)ÙBÍ•žx‚œ¾°•ïÞì>y•‹C'·¤^⊠á5×\±7?i›¼€¢È%aNNDµ…Ä–ã½5’œÂlÉ1c7M¸±ZMuNs™ªžÜl)ánúœ”N4.$³<Ðl‡P¯YU¸ˆáóúUiý¶m/Cj]xS£%”×GÜÎgÊ›^M¹Si,§†ûÿ{>SßþÇ9ÃÍ7²ê”çEoº9OìÖ úòu®­e¿yÓPì™qøþë/ÐkÓè¿}#žãNëüs¸máGÆGTyÄÐÿóÏ?÷H1:rW6³^»rw-üðþæg´OËrÜi[~=vd­C˜o ^›¾ ×-ƒÜ²#-“·éwlLo\=+ØvÐêµ ±jÍB-Ç«|Ьü¶‹3ßwŠŽ€8ƸöÚkµrÝuëÖ!!!á„u¹½0zyÑëd×:¼¼Ã¡Ù€µ|=h/JÖlT‰òï]ß?¬Ã¥·ÒCÕ|¼é2Ü—“!UNDB!/ÐHJ;Âhè=ÆŽI”ºó·ì,^>…öŒh¥Ëx?ÚgØ8‘‹]†äóÔ’bÂT×o¯ÚÙ¹sÌrA·Õ‘±uš»wn´#ft9>£MâQÆÓSÍÅâS‹0º—²ä Å)ˆ›"À­ï81“í,á´\ïe!ÜÍô%w*{4¢^7ê†r¼u}´IÊç®¾Ž»Údý¹ûG³1hŸr3Ë™ÁCUJ˹îiZŽËéL:2çrÔæfkEêèI4vþÇÓRöþ|¤Œî‰~evLúQ¡Ô>ðFö=H~ë¼yߟñYêNI’Ì"U§)V:Øé1ÁýgfˆS™M”òÍ݃Áý¸vÝL{Ð#z¤3½ìØM®BHDüfüéb ž"s-‹òí }¹ƒ^.ˆEØá&‘À ê[A™ñéÙ9‘x‰‹ª¦ËöèKí¸p|.Ü„ÃiÌÒîoro¢=wî\ÚDæjBw!?qŒp†å¼§4Ý5Šw87ÓøÄOnä »q¼†pS n+ÕaéTæº+¢Ð/™ B.6ó¹°‰*β ŽaH€A=«étĦý8r2ÌøðÙ$SÅIÖ/â¿ôˆ«?ŽÁ‡ïÓÛnOr<ꨶ$vi"õ“¡ZNg2ïý# ³Î­ƒ6W1­œe<ùX¬;(‘¥^Ý!¦sSéºUÿÉÏ}Äqª7b굄ҭ~T$S!4'óYˆ ÃÅ{îl§22LjDV<ŒÖéÿI×øQ‰5ëfaôh3íÓîl5y˶ˆJrWª'=ï£9ÇÌdüK¢j&™ƒ–äà<’·—Ò鬸pUËùürÎÑQEÖ%oråYJÛŒ8Ùý3a5ímS–‡#“ÏÀ†\ùµò\"í„´ë©'CÂgˆsʲ•dØqÃØÿÏ©âBµaÒreç¼P‹ ·3ŽéóiãM&ðÕ|+ Ø_ÛÅVŽÌ…|=þ4\‡7x®˜s‰/m>gÜIµDö¡™Ì a€¥/û‘ÏÏÉn£¡Ú6$ö ¥jt^âJ`z‘ ”|:Úsúv)Ä Ã¢±`+Ÿ¯ï¼q˜Žz~êˆ2|{)=/â'sä˜?s^¶Rr&w墶lÙ¹ '}³•ŒÀbndiïöµN&ÿ&$Ò>­äÆU¢¦6Êé 8o¿öÚt¸Â9~áUó'óàˆ#°|ùòãÖ,s—ŒçRöÏ#ÄpŽ8¶¡S+Ä@ºÀåÖC‚¯|xdÏyÑJ<—¡>¾¤´4ƒjÏ -˜½*¶B=2©ÚÉGTsxåÉp’/RG“WKCjJç‰EïÃø_T‘ÇKÓ,HO¢æûm7è¼¢©®ÁµE^â3e¯1bЪz¨¥T—íæŸ.‘Ï?2/6A'iT+—㢃qǼI°o\ÎÍ!Úú6¤q[Éòë=å¤:åÞÔíH\ó-íÚ‰UÙÜåÔá¬ÃiØE&ÑÁ‡‚ž„¨®& Ua;J|Ôá‡/RßÄÑéj…œÔ[š¶bD2ðéŒô‹×Ž­$oÓ)/êDH0ïö¸vSGœñr×$ºÏÁt%.‹}ƒ¬ÀšÄÔ*.¢+mÚ»t&ÍŲž64v:«ÈÕŠ ……o)QSÉ„Pß~aÚÑFû¡þvÄžç‡~Ðêo¢²°5R/΋¸† ‚çŸ}úônIâ¢ßíz_fG °¸–©Zÿ-Ì—?¶6.Bƒht¾B†Ìh¥š‡rSgî< ·GTåáœËUã~Q%‰S‹F×+’Ž;lAœàY¶m8¼[™‘ùÛ¨fgYTm20­0·"Ñq/Q,d$-¼`ÖLÛëòmH㪌/¥pÞ‹?mܨš/¡ÖÊqµìÔÿ 3h:^Q1d 8ŠzÌŠ¥ðâÜësá4 %ˆâOÀ{ðpxÚrÚD•d#<È_ó(+aq$äV2Ðf* ±O¦MÛ÷ :ÏELú:ãà»âˆËžROéÌØéåH$óè- víF&—à‚ü¸‘1ØÈ(•QRC{zMåŠk4Q.ÄÆN' TÌEGK2•ÔÁo9PZ¦jûo™ ÅJÅ4•’K ^½¹.ëÃ…Ç„q$ÇÈ·<)mç˜r§{·º¨ä±ÆUP Å5>C¨¬þÎÀxiÌ̻̬¢æ\Éù|ø%Hç q/:êMwçò%æeÜHØG&ýÆqJgdä26ëñÍaJÙ…zn,HœÁ"†V(¡wÊÚÊr ÝÈ©u7šN-BLW Æöóª”hÐÙŠ‰‹a fµúå²…2ÞY*„鬨Ï~ܲʠvwdº:ÆÑÙE7ưs%ïÔÿ矾fsf £gÝBÄD§"mg9–|E¯‹ç€C–ñKÚV'™9ï;zÑk*ÇD™‡ ƺ ãØÓFŽ,@9È¢s®XIDª÷à¢ÞA.¿^b#q3‹È— u­ÁuÄXû4 1 ®Íü­·1!=j¹ˆ§ªUÎóv걊ýTÏ>¦Orç,+…v£äñ‚ä7Iæû!º±g_Iƒb®ª»pã ÛÚ‡ª©úbרwÕÍy…C&bT ÆnðÃVz²<€ ?p#é(7_Øn†2s^"£½Á ²œNSr)½ZõµjÒ9ÎØßþ”СD泆[ÑÚÒÿ$ÄŠ„›xë­·<1"EõWÌ ºQê!¡çž{sæÌiUeTåå]‹‘ÓK‘aðE*Õ¢3wó Uô øŒ“2R›¤ÿX;ΣM™¯ƒì Ü?Ò¢fÞ’Œì«J׬tbøÑ iĄގÜT7Ï;)£sQ\=ʺ»rKY´ÒúXÊ éU‰ð6äÑ“S2*Ù+ޝaÄ=žL¨ý>ðñ¦zü^J÷øœW&“D¬ë(w>%Ô”¼w‰µ¡ˆ}žÃ<Ÿ£gS¢ÆñÌ'ëÛµv3äqÆeKìKé/Öqì¬×òNÿ[ÔÎ%†`wÚEöíÇÍ‘1Œõw‡æ9ù›o¾Ñâ­šÍz¿Ý{¤Q=ô®ÇæM®x¡2·‰S¹c6}ª”¶ ôb+Yx´õ–Ší¥ØÎ˳#¤a,ã°Å@4™´P²íÍ÷ʶ®óž½Ü ux!ÄÏõ'$a*ù¡iÖqÉ]ùÜÓNæžÉ0ñà ¾DiO³`¾ÆüÝx‡Vwúä1ÚbQ¦$*vDÔ)Ý›q:ª¨Ùb+ÛŒ1 ¸ ‘°!yÜùäü]¡=é„ãâÙ¥ˆ£½”¼ìÅÓaâÄL¯¦Íñ/ c;m­¤ÒÞ”Ö2½Åq1K×áyéLS[†àzQòÁõ@«O œßÉû«ÕÌ'89pà@2X1T_r`Ë–-ZJ§%ùÈBÑÃcÃÛZ0‘ôB-í£Œ¡•8Š6\dê©nt”¹1 å8.l.f¸ž]œçBTd|#΢´Ö›ó$Õ¹N@ôh;úQý,ã ="r¢ˆ\¤„žM»<¾"¼9¦&ú “‘hï”rd]‰ÞƒêÍ%eÅ uLgŽfã¿‚ ¦ ¤znp° Özœ3^_J9ì∄ pÐngÐð:œž«}§cq"NcºtéÂ÷O%¥˜ß¿rJf)9ÝÌ™Kç|%Ó\¾m$sH`½}VCŸIÉï)j,™‡&©H˜âBcðj3 )á—‘SÎ ™A¹)µ‡ =jøóFk|œè7´µ\j$&Ó/ÓÉôàåkGt×z i€‘¢VæËp•œ“3>a9ŽH(ŸJìÄQTÀ.W>?Ù\ c¾ádFYŽhmÖŠ”˜ýÙÒ×a—W`8ƒ wãBµ(±‡r³©¡n ¸í›\Œ)SŽ4àç]”XpÉ—Ò©HJ2zÙiBS̰ fL¯CØ!’±/>Ês\tëCí„D'Îeè ïs'›rÊ$c^bЇQ!w°ú7BÞ#â¼$33Ó§°å3âjò¼ßäi%¸ÁÅö°•ª“¢J-G%yÁ±ô{E%ÆÓûníazÂ¥Š»D+D4LŒ{5¸9~»R #¾K-|ÂÈ\m¢GɆ¾1q@ ;×Õ7ÂÈø³o | b( éÓè8—„F6#—côXªvÒè|KV¦^„pÓ¦ç·ó&•"<Ö‰³'éi‡ÇIjÒ7>½©5Bí‡0zª¾b½dzS ÇçÓÊŽw?Ÿ!ýËaeZ_Âûáx%3È'^”á½Ïø¢¾Tu&èTåÉÜÌêÓ—ö‘! òÐ:ÚÊù| ç¶µ8ûì³pÿýã±wß½8pp+BÚáo¢)ø¥/Kú$hïX:::n"j?[?zlåX—ùD0àæ¯Ì'zb> }¸ù’ÈX¡~5”×`ÔH+ Ôœ©çšpšXÄÓ1Ý :jâ&l®sÎgô`DÓ!œŒ_j ™X ç,nØéêK¢ÂZÏ>n:."¹ Y‰Ë§Ñ “•s*7Öj©6ïËð51ÄþÂKK‘ÏMÏJ3&NÐôO´Š(ÌÈc£©>šÌyõÜsÊèXÎÑËqnjÒ>5Ó kË/ÆèºÐß&CXC6»0; i_|ŒK—~ȉÀ#.9ãâSܰÞù§?áéò|èJsq÷¡Ã(ýë_µ ƒŸ*Ýî+˜¿á ,ݽO]¼ó¨zëØ'8(jñÀ’™xfÚbüiÑÅè5×u-€%Ï›ŸÀê½+Ú]}nÞ,üø¾vçsÔÿ£!'_Qí.£µ "i6¬·æ9jù²ƒT}c€U.Î;o" „Êò :p¤µ¬í>g³qõ¬ÞxúŸ¸Øpµßl6p¢ì…7^Ù¦9´&bÃ$iýïµ·iÓ¦i©]$AC‡õ4DžÇ¦î=Ô—3=ÕD ^|Z:Vcã—¬ns»úé'M:(@š’ŽÞþ¢SÍ‹ ˆz,ɦçC¾TEÅÍž+}©rÛ’ôdü’/ÊÆãçSE¯”±¶¸^K›'?.‚Eݶ<ÇD;š D_š‹`JÆSò5þª–¥œþß"Í’Ýu‘bɨ`9f\„Læù¼g2U"OHHðØ ¶Ö2_z¿Lâç.B‹2,¨ ÇÄ:a^¸ð‰ ãH˜å&4‚ÌÌ$'¸˜¡5ŒL& eU­ôîJ¹c<&L/Àæ™J›@!oªOëâé e@9]ÊДÂùIUìcˆž2çü» —1]ñ‹ i_(c\§Ÿ6’ Ì¢š&*£/¿ü2CØ<€Ñc¶áª™´Ùüxöi?üþ:nÚ&·¥ ¼7ïL~<ì¢VIGuvÿž…øÇ«ÜÀà½ÖPÊàíËE$1v¡Ö˜íÙùßÝß,]J0íh÷9ÆŽÀ€Ørüç,J3òLt4å€Ú Þ­l:M˜Ò‹!5T_/£J¹Z£?/ö§«_‚úâéùžÒ=_ú^”‹>|n®%ÔMuW‘ýCG]»­ä IDATMž«1íÁjL¦'ç*Ž•ÚîJR‚(idþ¿ ={öàÁÔbÔŠtIÞ%òþFðÄŽðxÿ4||Çÿ¥d¬®#7—£‹y¿7¬kéùÙuÆÖú—‚i{|Ùül\ÖìF+ðÜ;ôÔJu^#Ãå˜)…ò%ƒÖ’^Óò ŸJöνՎ‰ÒWãNÔ›aœßlœßÜÏFϳ Гý?åZn´Ñs¬ŽbH?^o:¸²ýZ>Ÿ½È¬\¬Ã•,»¸LLØŸ¼Ï@:zr—ݲUíý-¸×ÕUbËæZ|ðn6Öoœ€ùóS¨‘ò)=ž~ŒòҗɘõÃý÷•0mæF3®äe÷•$ï}Ù“¾lÔÞ•øÃóß¶ƒ^ú EbÚeNþoãóZË÷HÞÆÚèYÂ…’WÚdN ¦ƒ_¹^KóC)MCª9%D²=dþ.éN»üë›”C7ë^<¯É9~52\ϱãkÙt±Ì«TÇç¼)ýÞdN¶VaúcU˜ÌþoáUÜ ÷[°hU°-çŒæµ¶ýWnf%>k/úõooNF ô¿zfÝõý¢Çõ?| SÊFŒÿúcŒ(xòaœ5q"ÄY  ȧ ~ûwãSŸ2m688XS=Õ† ˜Q´—¶+´Ay-¯ÓFðéïÂ6J â¸Þßü4Vø ÌNјƌâ=Üñîï¾Üæ£ÄU:• ÞîŠäå9£A_ß}®£Gq#,¡¢"3c%Y¨ºÆíèRGËVùÎ,Ä‘†ûp¼– Ó¨èÌD š6ƒ|³£z+vÜ{{»nbÞ¼yZœ±eõÚ zd£Ð‹ãÈõ9Qzñïì€/ÝzË2X¤nòrõç‹~ì¬͆SB/CôKRdd$„¦ðÈÚ2dn`;ç“1¤Bbûýþ¦¹øÃþІ&º–1dænISwjXtµ,ÀBÉ‹4t¸¢¦™vV¢vÈ<®ÒÜis‹Ú²¸·oJ‚­öµHxÃë~ŒÕæÓà9Fk_Ó‚NÃwq*ãVC¦pÊÔRêL8 ¸ùF_·ŠñØz4 ÖÜ)Íàø ¡ ³F.üw*cMÊ)DtõI@nh=%#^Ü `驽-$ö¢^&'½örð‘Z>fª^ óäìºÞZš¶ÔÕÑ4"õýòË/5BÊS„‡zßOGxd*ÚBââmfÜ·×}ʸ>%"n‘|hÛ t‹ÊD?€êÈ'Ä”’+½O ¢¹¹"ÔîÇ{>ý8úPÕ•§ó aeþºñƹ›°ø+àÑÇ¥ucpöYw¢{·÷¹ñÛ×Ì)Á“ÿ #—ÃÀ70¾å»¯1ñÖ<‹Ëf˜˜ |ñÅo±RÂé =°QÔ÷ÝgÔ¡n¾^ï‹7ÿnã«‹’W2l2³…1MÍD\«ôæs]GÚ¤#SêËg†{2µÖo¾ìWýr½µ4©ûdy~ aÓÉ8:ß¡ÏW àà~üµÖ‚Ûø ¦Q]^Àb³$RÀQ£F¡ô–[‘¶qî~ïm¼n+Å]yµÚµ“y²ëPÚ÷iÊËXsh)Ÿòþ»úALJšŽÑ]ÎÇËkòdwÔ0¨´Ãµ–£üîÉΊ[ÚבüîQ ´ÖÊi˹æ’ÙÖpkK)î4®ü'ïwz9ºž™¦gÚöýdùÜýضÒ:;•¬uÜj¡=ö6lØ ©º7NÄÞP´QNNbvòT'OÑ‘¾iYj[0mK=®4ÍKoKÙÍs´õ—Hfo¿ývÌ·›ÎÀ€÷Þ§#•ù’»!o¡k×o&§ pÞ%‡åçOõÚaÅÚ\'©´0od _|ñEml”ÊÕÎ'‘ÐY{åãæ[ü±c ½!Su8›ö°b»—|–ƒÇVbÀÀJ2…vÁÑŠæsA+%ÊüÉ:O:ý¶’µ­§$¼ÞÞ}»ˆy"³¸Œ$~õ ¡Ø dgÁ¼m£Æ fѽž#i ðZl6Ä>P^²k( ”8±—ÁêuëìÝH®©†WÖìß³Ýzôl+ÖžtµõµšÐÛÔ|§Jl‡ÄMÐÒ¹Ó”Û]šÑ:™Ù|d%í7NIªc—ÒMëŸÚ,ß NÐ~?õ½Ë¹‚ühš¦€îàÜi´„üç §¿Ÿ§N€Y<ýq‘!Ç!K~¢g³ ìò”oßl.Ö®e-˜£ï½‰Ñ)‡:XcÛ²Åw©Aï>ÑtJÒµ8¸×ŸÒ¼NÙšÓ dÜAÎ…QÈ ;)9C‡Gjçä_y9 ué|ÆGò+R(:Ž@Äq¤{¾]ù¸©ø½7PøÆ‹ôQϽá>­ŽD@FM¼§{·ü×z¯^½4Çeâ ¿½$ë {Ò6‰l{KWéÛŠ€Ø”‰Ãaôš’„-JOO÷œ–øu­‘¤e—Þ:wN<ÀŠví·Þª1Vçã?Ð6W¯Î¤Z&UÌÒÜwß}XóÓ-¬¤ù%)¼'õPÞ3ö²_²ÿ»º£¢ý1÷÷£ñÇÛ^$ßãÒÔ“Úõ ¡Üd9¹ûóiôü§?ÝŽqãÆiÌ œ×ÝâæVH&Š"Æ"”£ XqtòÑGiñnÄ.mIÏX²t,âÁ…$RÁõ£û¡ÿ‚E=^û½2>à´3MÁ“ðÕölܳ3°é¥NùîëkÒÆHa¢:ê–ŠCq$ãfüÜ•}ñÙ^Íû¨\úûS&÷%uT(N#3ç3OcgpÑnG3gð-üÑtw¬Çÿ/£Ñn„‘¯ÃŠÎZ›»Ä“¨|Ä™—Pki\WÔÿÿV«/½v‡Æ»ëûÕ3„Á¡á˜zåLƪ×b×ôèÑCc…£U‰ÿ$*£©©©Ø·o$°©¤qSTT†ŒƒÉ—üo¶®tÎî~¹”Þ݆S=n:Ÿ<$„>¤•ï/L‹>ÝCÆ›n®T ? TVæÀ‹ÏmÖJ{¦ôîüTYh”^3§–~uG3Ë=y$¿¢“# Üšx;y•âÌC@ì8_zé%í#­Uº{ï;óî±£-– EQƒné I´MäšÂ©£È6æS7bq¦~“vù´ÂëDó‰¬Í:›DzågÓÙeÿšÊ“箵þrÛ×¶œóNtïBÈMâ¯Cœfµ'¿;¯:6" ý#Z#-ß1òœIµ<ߘ³ù7|‰J¡_=C¨'`VªEŠº¨~ˆŽ²)a÷’Èd×ì< òró4æ°)\ò²÷ñõCPpHÓÓ§í»ÙÝBúàú‘ãýÍÏh± ;Z™—ºÞó°–½hÕ¨J?ØÑ¢:5_×nt*cn¾3+¨««×ÔC[+³%³—º=OSµRQ)’üŠNŽ€Lèò|(úu# },693g*QÞ¯»§ÕÝ) …Ào Ñ pÓon5'ܳì:É®”|Ïf0ÞÙtfRM\ÍíØDuôTw1$¤DŸˆ‘ÍBK¸Áoíé—a ?ò!,Ø¥°o-][ÎéÍŽ™ %­¢1¯0„")'2Õü^Ì+uÕUšê¨8œG4Å«Wjß}úÐŽý/,Ü qö"¶{¿4­[C bE …À ;v,ä£H! P( _Âç¸é7ÇŠ(Ul›ÚºÁhzq«x!=U ˆ·ÑN~Ocòš–¼ï&C£§ÍS L/å;ór°íê‹=U™câ``Àhq"³uúT”0æ’~G¬N¥ Â ƒèÎ#é:B¢ŽaexêêòV³õåí¼0Ûf³Ë hMƒbîFÑl13б¥Õü':iâ=œ¨nwÞ¦u;U” 6µ–üRŽ"…€B@! P( …À¯ßC(EÅãÔÉbô‰J©ØŠ$ñT¨ÂQ‚o÷£¤±m¥ünè=˜3ì-±̾ƒé¥st,FoOo¬¸I#ú¾ñêiçRK'3«ûÆkLaìMw"zîMžôÂ<‚éÛKÓgÍÁèñgẫ.=aVÿpÜqË;ZšUkbåO®ïrâñ§ŸAÿAƒO˜¿µ‹Ó¯ù]»ë^°ðAºCNñ÷Úû &6Îó[}Q( …€B@! PüZøM1„Ð\‹Áÿñ\‹]¡\¦ðT‰Z©¦°­ä¨­FAe6î[<d;˜^«Ol¼(l4fä(Gt¾#B·êhkyÚzN$„ÂP=Åà¢'¢Ò’R¼òÌ͸zúÿiÒÁØøÜqÏýZ–î=“(!leŸ¨œ¦×D²×žºçÌúUˆ«Ð´nɯ$„MQUß …€B@! P(~­ü&BaìDM4%%E RëîÌÌÌL”Ñ–°‚¶ƒu”’{;xð ÐæT%„îrÚzÌ*MÇæŒ•Z`ú¶æi-]IIÉIƒñ6Ë·n]³Ÿî[·num×QªC†0Oa~BŒÈ<šJÞ,ÍyÏÉòœ°À†‹í©ûpFЦÚxÒö¶¥n•F! P( …€B@!p&!ð›a÷<„©;[í›`÷Ù ,_ñ½û—v4Ò1‹Ø”u„¼ô&yG·+ëšßB>Móù™CÛUF¾§¦lÅŒ«g¶+ßñ‡GF²sÖÊ–v¾ðæ;¸aöÕ8¸oº÷ìÕZ²ÓrÎ]÷¬Ë.BAa:"b:?âii¸*T! P( …€B@!Љèèi³>õ`J+ª:±XU”B }¼þßç°yãAl_ÎSO}óµ³´B„9ý­QLX0bù9UVV6“˜/:¯P( …€B@!pæ ^F%ž!ibÏœ~ûU·´¼¬»A!ÿ›xMÁ,ÌÏ×~þu7mÇ/ñ]1„¿êªN…€B@! P(¿,MÂß„Êè/ ·ª½-ø0ì|Û’²óÓüÁÎGQ•¨P( …€B@!p&"Ð<û™xªÍ …€B@! P( …€B@!Ð!CØ!ØT&…€B@! P( …€B@!pæ# Â3¿Õ( …€B@! P( ! ÂÁ¦2) …€B@! P( 3Åžù}¨î@! P( …€B@! PtÅv6•I! P( …€B@! Pœù(†ðÌïCu …€B@! P( …€B C(†°C°©L …€B@! P( …€BàÌG@1„g~ª;P( …€B@! P(BÀ«C¹T&…€B@!ð‹#P‡Š¢ ãG'!” ÷ŽRU~*++‘o¯ƒÉ/qáf í\Ø;7áßóîª5;jößÞß„›F†ÃÏÜÎr:zg`>GY1J \¸ûÆ"*ÔoSûY{)§¢¸9•N­ÿ´r¼JQU¸wއo *>v.†]x#^¿1©]HÕ8ªQ–‰Ü*' ÖHú Ї»Žõ¸ò<¬;”‹‚˜ýûÿâµ’àc1´«|•X! P(§ÅžNtUÙ ß8v{->\°Ÿ±Ú6!eÅ”‹âÛ”VKTÅÿŽÖ“‹TéðÎ Èܳ+¿š€ÄþS1ÁïN¦°}Ÿ‹ùüòoÀÚµñi‘ ½®}ïÜÖáæÖ+?ÁYi®|„5u8œšÔ‘Üí rü†/‘éÿùã±ð…4Ü{\ý ž¾~úÅû¶”†r–¼ô^Ï„ÖZ91õ¨wÖ @$¿,±šýQV!ƒª¤«@ÖεxëÆÙZÙQSÅWM¬±A€Ó‰BJ+Y\Mm-JË«ú»嫤 …€B@! 8Í´o]tš£ŠW(~}3ØV†Ðé”eyûÉ`ôFx‰;8ˆª¡fÔ9+Q•·Ÿ/ûNJoвÖb ñú’³qÛøpx·wê3Co²£ÞPŽòrjzQ¥°ƒjR³|„ôò2³Å ¿ÇMÐyÕ5ÁÈu/W9:³»ÿÊÑ[`ôí†Ë®¿ƒËðë: q½£ŽßœV¯¡3PEÔRŽŠ aô©JLé4˜âqáœk1¨°ÿht “±ýÒÍV«U' …€B@!ÐIȺD‘B@! 8-ˆ”.ýp zôè…€€€Ö±mÛ62Z"qërÂt­]Ô{™à7ÃGÄÀÏfÑÂÊÜP=š”Ý™(«,AEI:¶mËBõÈÔ‘!Ô×Ó.­¦9ÈË/¥t¨N½foüBàï+Œ¨FaöaVP-PlëëP[–ƒŒŒ êƒàëgƒ¹¾ Yééžr:3,Ö†rXޤ1¸ÔU…eS¨­,Âѹ8Z]…rª2Âä‹ÀHªE†ùÂb24Ê ëªQZT€âüäæ•Â3 &#,ÞÞðóADd,^:—-"Û‡º*×}—£¸¼šµé`°XÙ¡ðñõAxhµ.y?5väIGæÑTS¢ƒËöEPX ¢Â}©K{;–¢QÕfEIi9² É“ôf+|lþlK "Ù)Û“^KÑäßIÚ„?o˜tÚßF~aQ#îåyÈÊÌBˆ9Á¡îÒE¼Ï²r;ì: Ûm„0l6ÂxŸf½Øñ¹Ê)!ÄBµîrL6øùz#4*Žòø†"Ô¿AêK ófžø>ÙÆâ\ªŠfe¡˜e tuUE´ÍBFfBÃ-‰ˆàx«‚ÃаKƒ½(ûÞÉ>Ícº£…(·ÛiÖh„ÉÌþ GDt(¬žþ”BÆ©§?ÙEÛqúÓuê¿B@! P(ÚŠ€bÛŠ”J§P´‡£ óßù3-Z„qã’©")Ê’­O;o¾ýgÔÔÔ`îug{êñòÒÃÐ;=½‰‹úA“pá…½îoÒò×ÕTcÊÀ\{÷X·=N:ŸÙ±d= g÷$ƒf„Ù^Š’Ì­øèùðŗ둞KÆÁƒ˜ž¡2zF†ÑÃ{!Á/ß¼ý¾]{ûŽ#çDÎÆOðNm2FO…¡£û!¼ =÷”§œ<}íõrÆŒŽQÃû GDs§6õtB’¶}µ;·îÁö´ jΞ}+æM®QþðΊ Tuá>lþök|¿ä[|òùzd"–ÌKâztÃQ“1óª ‘c…¼ŒŽÌ]Un >§zä—?mÃÛÓyRߘ^=îô0Ó¯˜ˆ®AÒž:Ô8ŠÉhÄ—/ÿ /½ö Òí5ÐÙüˆÃ Lšv ~7c$¢‚lð5RòŶTåìÀ²wßš)xûÛmÖ>ÑIHê=C‡Çœ9Sµ²Í­õóן¤}“§œ‡1#“ÐÕ?+>x+—­ÃæC.Üs7Ojö"­_O\4s’†»Üç¼ÏÍÛ3‘¥CLRú=½’ûbÚEÐ+<ÇSÎú½®¡å)§oλ¼/²ÄR'ôÐQe´Þ—æ¼Ï7N|Ÿl㺯`Ëšø1j¡äÅ Ó~Äú"”ê‚é7AÖa2Úå¨öwÂ+®Šrüଭ@qöA¬þôM¼ðÚrlÉ<oŸ„EvÅÐó¯ÀìÙW¢wŒó:'ª²[ô'oÃ7¶·§?¯¸|"ƒ›/תÿ …€B@! 89:zÍ«O=˜ÒöÚMœ¼l•B! øÿ˜°`Äòs<'*µ´}ê(•”” OŸ>CØ¥K,Xð!WÍ£Û\ܰad2ÂZIO©‰®Ÿüå ¼óÕ:¬.Çðë?Àë·õib×çJóÁ­Wཥë°2›N`0o¬{ƒãkP±ákÖP­3hÔï0hòµxÿz£ÖÆ÷gŸø>ÙÆ'oü?¬_›Šìm Ã+ßÍ×ÊNÍÈEQt žóÞ¿- ›_¹«V­Æ“K¸Ãp q³“ñïÏÄèQ1ˆ(Û¦a~¼û”þŒz3–}~¸]¡ÜÕ¨:¡P(­ `µZ¹é®½5Ö‰æ’"…€B@!pÚˆŽŽÆÜ¹sO{=Pi‘’ž™Úxšßìù{pøÐ,m`âÆÌº7ÿû}|½ø]Üw^ü-zÔTd¡²`Š-ãðŸOãÚ³ºc‹Ñ-èsÙÃxéÝÏ1oò`g}ëa*Ýå,[þ™§ge6*óRq¤L§©6k\vçxð±ð÷{.ó\Úý黨øá{X_X‡¢´%ø©¸»É Z©zÉýoa¥…¯=v=®êO'%µ¨ÎOAvI9²©Rš·w…Öq’8öJ\õØgغù;<óYHŒô£jd.²w|…å:d|ÿVÒQ·”œyQÊzöÜ'ñÊ‚w±à­â¶i}µöYñ¬Yý-ï,Céί°„ž4 È ÅõÆ´‡?ÅÆ-«ðÒ/ÀùƒbPë¨@îÖ‘V\‰ÊÚc•Fíy{ißÍÏ~Ѭ}Ùd®v.ÇÎÚQxìÕ¿ã®ë'{pO¾èüý¥…xüÁ»<¸Ë}&›†Y¾ Áý¯Å!.ÐH•ÌrTæïDfíH<ÑPÎ9‘.ˆÝåüãïO¢O@-llª´VX3¥ÚrŸËèæ¤÷Y•„[{ÿ×í¸ š².ãçি½Š—_Ÿ~µðã»VdâòÊõ®u¢æÐ"üwi ^ü!‹¶‡VøE\Š'^yÿxä:Ü25–©œT þË6¦aéÊ4J—ÓŸ;R×4ëÏœ”ϵþì  .ëT¤P(¿eZ×Ýú-#¢î]! 8-”ÓKJÊV|÷ýÜ~ûí'µ)|öÙg>‚ÂIlK„UKÕQ‰!Ø”ôæPDtŠËgù9Eè™Ä0´Ûƒ= U¥.OÂhÕÕÕ@¯÷E·ÞƒÑ…¯œt2%zøDö¦ä3‘ Qæ]+®¹yÙ…žrtö’&åÔ°‡h~6#±{ìuÖLL={ º’q0”użí;°pÅ~T–BqA4æ8‘ä“„³.˜Šàø#pÖѯKL,¯–6ö —¿RWùŒ©GÅZ§Ã#1+9z6oĦn}` OƨsãÐÏiAb·Þˆ4Reu_ r³‚þT¯QnÝbB;C[]]’ÙÚÄeÍCêÞ|œaG%a•[©.+ÁÁMë°!q ¼‚{"yx$LÝ€˜ð®èh¥ à±"DQ“mÙ¾½›·`SxwOû†é‚Û…ØZÔkò·¬¡×p·EôBÏädôЬE%mï÷üÜÅ‹¸ IDAT"DôêĘ@èìŨ.•þ©¶Ä/¤WØÐ%iòÖ`¯?OgîrzÆëa¬Ù®ÙtJçSHµÔ9¨vëöˆµÔ`wÆÍYÜ¬Ž œ•^T³3ja©-DÅ,§5*`Œ¾‡Š óå<­…‹¨,ÊÃöo>eÙ9Ù+~q Û/ C D"BsËfI:á´hßz2àµUC<íKHèŽn]âKû9“%š1ýüq÷ ChP0ÂC¼Pe¦áž"6’V?˜+Ž åÐ^ìË 3ÎX‘n¢*|ƒ\åû¸ÎšÊ ó§ê0M7›½Ø¿z:÷±Ò¨þ¤÷é+½ÚV‡3˜=B¦7Úè'l·ÄiZ¶8YÊÝ¿Ž*aùŒ´ŒÁ×LFÏXl_ÄÆGà¢ÝFГjH\¢èü¨Öç˜þ|G—‹Q=#<ýÙŸjÙ±æö†Sq#¤Ž …€B@!ð[Gà—euÕØøÑS8jL†3b¦uÛ‚—ß,Àø™¡KbB¼òJ'Õ9¨º”¿;³Ô#~>–S)NåU(Ú€@ÊÖlX—«¥üÃõoaËFð“CûÁÁžÜÏ<ó yäíwqq1f̘•+W’QôG:=vvU"7³„ÍÅú¯‘ÁÑéôð˜†ï^¥êçdDkŽ•f¹Ø,w+(B£¬ÉE<:ÈmQ‚$å8ª²°òµ‡ðìŠìVËi…/ÒŠÑ“{è’ /£«&/£1ÉIü½ÌU 4©Ž*ª•Hùú5,X¼«÷—5´¡õƒÁê°^Ãqy ðñ ¢¦öÊb|÷!ïµ!‹Oh"F]~/žÿç¹(,%³\-RF G«*ñÌ=74¤j~°3>_%õý’Çá’˜âS™Sm§´oV-’+½É„ÞSÁóÿ¸=ƒmp¹÷i,ËLûJò¶°}ü4$‹H¾ýÆ_©•ªYwº™;Á’?ÁŒ¾÷—~ÌEiµH›SSFÌe%ê.‡éÊižÃõKOF]îsjì‹x/¿ ÷IFŒ»±('ÛRÛ¤®Æ+;Ñï+™fzB 6À gz]×üþáÆgue¨,}L®ü8 +J“þì?ù.¼ùüÕ%6ÍÇm³*Õ…€B@! P´ŠÀ/ÈÊK”»¥«×b¯M²^Ýpqô!."×!áÜÑŒ%ChmµÍí8iGaÆf|ÿÜ_°`ãlÜýÜ4ôI"E …À/À=÷܃;ï¼Sk†0‹/ö8°‘ßí"JT`æt&Ǫ£7˼õ¯áíÍ™XG^Ô@¹>ç£{ ÎÔϱò£<ÌàÈ‹¯Grß8oX<¬éo`ÞCKPPè.©å±±®ì-ß`˲×<Ì §œ=`=ø¢VNþqÊáڥŢN*s¡Ž*¥õp—iG­ÆzÃ#T#åÝðÖ×›°ù`9¼}1ñš»1íœ!ªÞ‚ÝãÞgW56PgC`ô8<¼t5&ÿ¸éG2p0ã6~·[28㲪òüøùËÇñþecá]Ê/C&È:÷¿0a&/—X14g`u²Š0t_H7†Ö³âžO–ãü ?#óH&v¤÷ÓŸcãA2Æäƒ £‘òù_ðñÅ£ñû ]ãׂÖ‡Ó¾´=©HYCO› íËIû?—îÃû—NÁ¼‚›w¶‰}\ž½kß~ă{¡ç`àØ)¸tb_x³ÿî{zR÷IÛC å´~UbvÇo}‰ñë7œü>Ïr©ÖzÊb¸Ð;n«ÄÛqÒβ®NFÑ‚ñ÷\ÃVW…jâ·fÉ:ˆÜ<°ûP„P Ý;tè1ý¹yÅ"l:ÜØŸ[ýï_3 ó°)YU¤P( ö ð 2„ÒLƶ¢K½¡–nüÖñ_ _Tí4Ò¢žªžuT'e,Y/µ‘dáÀþŸc×έؼu+v¤V¢&„µQ@pœ×tËVÉ ¶ ðóæ¯ñÃ)|ðÁÍ’—––à‘Ç\§äÚüùóµ >ðÀØÊçÕÇÇç˜|Í iåG]­ù™Û°m=ER弞Eʲ¶â©¿‹mûr©2N­BÚd»æl„PK@4 …×±×(Å:_|)†LDòðý¢tŽâ’šµRêiוµcJÝáE&ˆævšdЄ <åÄYªðíg'.Çiwà›—?À¤^×¢g¸¦â4¼÷êG |_AV!6S<¢#­¨KcùN·uÝì;7Ýt’(UJÿa-òölÖÄŠ¼=Xýí£xì“LTGcø1nÖ,̸úZ¤oü½ò-vÊc›írš©5AuÕ ÞÀ¾"8éµÕéÛ}“Ãà_WˆìÔoðÂÇQ]Q‡ø1SÐsDü>ü|ŸqÿòÃÕ¥'füñ^\1coú/Ý€ÏWì¤Ô°Šaœt #sqsÊM]ŒÝk?iÖ¾ ®¿¿›wKóöѱ‹ƒÀÒ ¶§Á={×Uô@¨¯® îç`øK0kî$t³Uã‡%itœ#*™-哞¢<åT´’ÆQY€íþ[kcÖÑ6ÝgÓ÷JÁÁtäæ¡œ>?4T=ë(ŽŽÑ²—òf†û(Û‚·_߀øÙý`)ÞŠô­ßâþÇ¿Ðä¢#ç=†¤Xªò_Õ0oÚŸ3gÝ€ëÞmÒŸn¼äžÛü²ô´K}Q( ß6Çe빃i§áOa! ,HÁ!¡ škæNfÇ_6Ίd§ç ¢²Õ|1ú0`t-œ¢É¥½Q埅¹‡qx= i_TGÛ¿àèn ðaaõ•Æœ¸€ÐÕеùص;÷Bf)í1b´Y6—O˜W]T(:¢â,ÍÚ‰èÖ®]«IþDê×¥K—Î(þ˜2êj(9¼ë) ôµxk a9c·mØq˜v‰d¶|Cé3 CûG›œ–²£òòb:—É`¼·4lÛ Gwng0šh:›‘º6¬ï†ðŠ|TÖ5Î*îrjŠ·wãÅaÍÑ´X·¦;òBÙÆ’ýŒ—˜ ò‰0ÄÃ7$ÑþbÇÖX¾Ø ç!=+ i©{±ÿp‘»8Ѥ$3V²¼ýX³v¥|•NêD:»‰ §MËqÏŸR¢Ùˈð„nœÛª+@Mmöí܉õyð†pG*RSSÉZaI*G"3Û‹bÝúm´ í‚ÒZŽÍGd´…eÓFÛÓ¶_‚ª»+kr¾¦ºø˜öõÎ/¥$¬EûØ@3måÖ›â^š‘‚­›PB¢Mp¯®®@aA6tû ew6m0¥‡] aËþ“æ¸Ëñr£k8™Ë&m¬g¿È}nÚ´ùEqm¾Oi§PEÎ^ìÙ¾b€!½uÍ”^u| ùñ}bY‡b^aÏ£tô'¬éVSÑfdl߈Ô={ùž À m 2aáöç¶fým;¶?ðrµBýW( …@ÛhúoÌÅ7[=Ø·+—«?gòDÇÆÁ×ϯL¡ìn;‘•ºï>½»÷ÅQ2—'M…ó@%=d·ámZƒ=XóÕ»X·D}[R¸Âq`Äåcü„A5¤+|ů÷I©G €Ìf…7=ßé"D¹ßØ'ͯ(€x9s¦æ};ÆDyš£}ÑlˆHœŽ§?y “†w£*q âÇÿé˜þÉmÓþ¼îñ\ýÙ¦ Óæuª_ …€B@! `È¥úúÔƒ(mâ–}ÑG ±rÅ·Ø¸Žª^´ã2ñE‹+fÎÆ´é3Ú…œ“wr=º ÂMÿ¼}Ç"‚;åÎÊmxaæ]Œ½4–‘áþ‘k1ûV:,øÃ}è?¸/DóMï,Ų¿_Ž ‘c¾ÿ~ýrøp+÷Ø¥FkM²#g÷Z,yò6|QüÜû— oŸ¾¢)‚@LX0bù9UVVz½/MkçŸxâ ,X°@c…ù‰àˆ#4›Á‡~XË2dÈDFFB®‹Z©¯/ýö· a(ï`¦ }PQeGuS¿fe˜`óc«ªŠzæ JŽœöJå ##ÕN‘ðX‰ð@=ªÊ N^ê©âgeØŒ@_†Æ±WP¾y…Eøì}x\Õ™ö;½j4õ^-ɲ-÷6Æ…jz 5 ÙM6u7›ü›Ýôì†MI6 -€)6ØcpÁ½É’-˶zoS4}æÏ<²%[&`L8çñx43çž{î{Ï=_ÿ>Èj©1±¼€õMðöwPèm'9-ÙŽÑãé¶ššÁ`w¿âÆ*¢µînî¿~¸hÉ œ¼l¤³†žfG°[Ñ»Â9؅ð»ák‘œQ€d›V}>&ž—o°:`àxÉ&5ë2)N7Ý(YïnÀ#ªpg]?–G°Z“‘f‡ÍsÞ!ºš°¨}Gk+†¬w(ðHBaQ!ÒS“‰Í(êCQ*h}½,DeKZÄÆ6Á’d‡=ÙÌt‡2¶È¢zjÞ"Læ7Àù).¼ÃóKu8xm–‘ùE87ŸÇ©à>ä§•UÍ{i2"+Ë®à®Ü¿­¹ OÉ,Pî_€5ÃÌô¦Ï©É– G±°Aº–vöô(÷/>NvŽîönZ ™¡Óh…‘ÖPQḎ¯¿gB×)ÜLC^':º{iQ&ž¬),EL€ÓCë1­”¬;iJJE ÇV±Žd0àCow'´ÀK92晨¨Œ5Óí0 ·[B¥Ï«Àüt÷3áÉæ±~N½ò‰€D@" 0·‚ÙL/j+­cRa ׋&Ü·B ò¢6÷ì‰cÈ–Ú6èR/AšÃŽ4›•Ù(]‹JY«IÇÂΔûøYDrhQŠüœtÚM"BfHï@~~:ËF˜Ðì$Ãóë™ðK2[2ádG‰€Dà]# I·ß~;cÎ|(**¢ë£W]u•2ž¨=w ÍËË;à7ÞéÕ°¤dò5Þï§ùž®:cÒ˜´ÅlIe&¸Òja²Xéb¥`3ƱÌ´0Óg€àEP²†®‚Zº(š³Š(¤¤Lhœ”ÌìQƒGÒlHóÓu•‰c@a'‰&¯Âw/µ6G [Y>™|Ë”dãyiy››cÔpÊc’¹¼–Ô4†¼&çbG^³™Â Ž×©ã5Æ›p!ÕtH˱R(OfýC RÜ5Õ°ì,¼8ÚDEÁ3ÙtQL§U0-}HÙ’£¬¨7™`4Ð"–0vü'ÞéºIÜN7?}y5 EíÕ´’šYRà¤åŒÔÄ]$¸›ˆKˆ¤!"®¸ˆkS±Hü)ØXR¢ÀbSî_â8¦üüSº,vd›,ºN¯_Ç…˜o²)sŒ0C¬ŠsÔ“ÐssO¬Ç¨g ËÌ<®?1Î9"2«©ôp8’x…‚ÆšŠñ™§à%~â9ǺŸÃ‡É7‰€D@" L1B³™ë$죒P T‘¸YÈdœ]c¦Ð5¡-ƒMt©e‚‡[4‰ÌŠ‚BAýTþ–…$ÆéX™e4ÖxNø5ZXƒÂEF`ø[ù&œßˆ¸@Qlþ¶Ûn™hKK <1æ[ƒâóÙD-8‹-Q¢Œï;ãÍJÄ¢é”ê£zТvvãœ8ZMK£‰Ûõbã7ޝ7˜•×øNú…‚¤Ñ"^'}?ŽVM$ 3‹ yPPˆÔRˆtpN£Û™ðKè}Vó‹á.*:ŒjÄÅLËä‰v¦ó“ÐpîÞ'ﯳºN1¶¶ÇìÔïÕ?9)°Gµq®á¬ð5 ü H$Ó"0¦@¸üŠ+ÑÖÚŒ­›Þb ‹NÑ‚˜†=55W2ÄÙ5=éY|·˜Æ½ƒ&dˆ;¾«úÑßÁG¡F5Õ3Æå ´y¯D¦·u±b};Ad§7•l‰À‡!ì « l‰€D@" H$ç/c „VKn¿ûÌ¿pÑH–Ñ .Z‚¼‚"Ƶ0®ï¬]{ôVT-ºÎuÂê×3ÐÚ?ËgZqdÝï°¦Ï ]žÊGÆlǪ'ßd†Sî¹m*|, ýÛu.'bÚòùúhêa'Ü„H/¢W]¡d“HÞoxà%»èDÏ3VüàD•ý$‰€D@" HÞ=c „¢x®Ýž‚©Óg²ÆTž2ºÃ‘J7æb±(gÕè”e°!cÁ½¸ûòcX·ùQì{C‹5LÒàécÒwÙô±1ã¾@èÛ_Dˆ×ð7­»;-½ó–.Š‹²ÎJÓdǦuÑ(,'êpÕ%ÈΉÀY!púd0g5”ì,H$‰€D@"ð>"0¦@¨œ‚Ÿˆ#™Eÿî&â*’‹Q=s.Ž Bk73ÒEÔš^†©vdU£(; s¦LŸ »¹™è|8ÒL{ž)Sù®²49ÌÍî`u°Ó4àë™%ÎÀ„ "_o¶#§b.¦x²ac¾|—H$‰€D@" 8ÿ8cÙ‰÷sÊ"}¶Èb'r• ×ÔS…AqvÖ¼b ®d‡II¾-R°GÃ^ „¨ÝõË i%ãHutO­Ìž‹<&žßü)Î!›D@" H$‰€D@" øh#pÎe&!Š6†8wÒ °˜ 0ª4),?‘„ëî)RjdÔ9öQ˜ 7Q­â2:fù¥D@" H$‰€D@" (œsðÝãÎÂÑ=,É,Z(›D@" H$‰€D@" üÝœE Ý¿û\r‰€D@" H$‰€D@" 8áyt3äT$‰€D@" H$‰À¹DàƒueæÐíOý]ÚI¦OÇe…5øÓÓ˜Ýräf"U3v.ÑP Àì1Z¥¬Ä‰2aô·5¢»¥M}>èL¤æä#+/i¦±Ç‰ ¹Ñ{äEüì&Üzß"”Nb-ÄÐŽïÞv'kšHJÉDeE¦’¨¦ííߣ¥Û‹íš•øÜ5%ñaä»D@" H$‰€D@" øP!ð „BH £cÓ›8d ÀSž‹KÒŽ`óÆÝÈZ8¶l „¦“°ŒR Õãÿ¾ñ+Ø/ý4 *&ãÂSèCíÚ_bý›»°½v¾s2ÞPgHEaå\\{ï=˜‘k€vL{¨^g6<ô8\%_‡F;ˆÁúl|ì·xeÿüA&·Ñè ·p¬yŸÂÜ,Dú`ýŸ¾…%‹ÿ„ŠdtcŽ}ÒüåG‰€D@" H$‰€D@"p!ð‹1A× ¼n7ܾ ‹Ñûárº…bÅç£Qð_B\8^³í=N øø™"ÞVÔÜ‹ÃG;ÐãÑ#5-vkö4¡¡æêÛ܈DF 42f4è‚ßÙŽÚÃÈ*N‡ýpuÁÞ]»Ñ´Ð2È즆0"žVìÙ± mN?ÔÉÙ0Ûì0D]“ÁPdd<ù‡D@" H$‰€D@" ø° 0¾…‚V8aør-:j–t˜@½ˆq¯= à ¦Ç^}QøTºeêYk0ª£Kf2Ô´FèÆéôs:LFhiÞÓ¨#òx5ç¦ †¡â+ "Ò¸«7·¡Ï´ o»߸k:†Wcõ«°fc+¶l=†k¦Û¡×žp0O2Ð×€žc[±ÉU‰ÿœ—CÇS¨?¼oµ'ã¦o?ˆ»/ÎA¸m êw®Á—¿ó¼Qs3®Ÿ= Ù9n\9I…UÏíÃE…saÒÿhâӑD@" H$‰€Dàœ!0®@øÑÒÔ„7×­U&³âÊ•ÈÊÉ… ßmë;´ =ø8­qÍh¢KçÌ«nF¨Ö¡2À«©FÃ^ø»6ãÛ?|(BòÜY¸m–ƒû_Àú¡!Ð#¥##†qt§ Y¸åŸAsì-¼²öw\‚ã æ¡zÉ ¼\·…ÕU(¯ž‚ÜT#¬–¸îŽj¤MB™m¸ä}´C.\n=Œ%™0$¹9U¨m-=8t˜~ß u´ FhµT© O¶ÁÆÀ@5U,ÐêÒ`Oýº¶F ³¦"sêdD~Žú–{a1Ùdÿ"ïi— IDATÀ=pG.Mþ!H$‰€D@" H΄À˜aÍÞÝŒ¡ÛCµ™ÉS…]4;ÛÛè®ÄÁ}{1gÁÂ3;ê÷HÀ‹®k8žK?¹ “«‹1³ÈŒžÌÔm{^Êp'"ül¨Z¸}¦aÙÒ\¨§f¡õÈ·Ðy´{#¥¸øöÈþóFäUU£¨ºyv=L¡*Ìš¯Ñj† ~ô5×a×ºÕØ¹åºué˜>¯”¡FîFOÌ ¯/§O‚œT¨)4š’3`´õóûmعo¦ä›<ŠŽC;QãŒw …)4jõÐÛÒhuìFwŸ®!ºÖr.²I$‰€D@" H$ cš´؇ΎvšÆT‰/€4 |Þ!:xð,¯MÄ!úÐ^³ZÍ,\0.^4ùye˜yù­˜l·ÀA±4©¥5.óV.ÄE×/@1]T g,ÃES“BWMlÅ…H×káÈÈCJf>“Çh KÊBf– FµmGQ¿g#^}æ5ìo„:¯³gåC¯ãRÃ!híóòŠtŒ/äåÂ’^‚¤L&—QïÆ†·÷¢þH-êönÆží뱇%(”HÃ<¦ÄG ŠƒÎ! Á³ÄEv—H$‰€D@" H$,cZŔԔŽ4j‘Þ%Ö„Û¨x}ó!èg<`L¥É0™µÌä)´Hòr´ ªSEaP€ ‘oµ!×:|®¨9e۵¥@Õ¤3á ‚4'F˜ â¥T¤ Ö{›ž} ë^x o¶Ó®ý®\¶ËNEÖ8uƒèp;QOiôÂa‘4¥êfÌΘ‹o¨uøÂ÷¾ˆ5<ƒ)#z»]L޼B˜FèXŠ"bÉÆ´ `ÿ¡6Tæ¦%¥üO" H$‰€D@" |S \~ù•èêìÀ®íÛ;§SËÁ;RÓpåõןåuéYÛÏŒŒb qõÆßÅC)ð©Ðׯe)î1˨žŸ[Ðæ]Lo>J‡¹Î¦ãð E¡NÌÚ"2†ŠW”î›î=øóŠÝÍZ„¦|?~ðF”f[a5`ÔÇÏwê´µÖt­·¢(D‡KOça ºà˽O½úYQ*Ý ÜÌFú›;¿­™®§t-KdDm8ÜE9°"Y 0”M" H$‰€D@" |ˆS Lq8°pñÖî‹àhýa%ް ¸e•°±þÞÙ5 …J3ò*¦ÂÏ:~M‡Ö¨Áä|=zëÞBÓ'‡LW¥Pˆ.ìÝyˆïd.ÈF°m/6p£GG«ae!taŠgß¡'<|ù6¸êßab—øM“QXV„$­Þ/|ƒ,MA·NµÎެìd¦G&4‘¥(t)r‚! –”‡¯xë·ºpcùPæˆÀÕÞ€þ#5è —à²Êt&±™ViVä¬|Á$;,\¯§p*›D@" H$‰€D@" ø!0¦@¨×0wþB—bÛæ·•Ë™5o¾Rð]3V,Þi/˜®§óg]‰¬ç=Û6¢»«ž*37­Å~—6Šx¹,ïk~&šÙß@7RPŽÀ‘µØÖ‘¢y•˜ya9tè@²U…Þ#ûpÜh@sÞ|ïE§Û…ˆÙM¤5››à§à¥õP¥M‚Ù>W¬œ …¶øY”si™ÔlDº%‚¾Îö7Ó‘ÕMAò(jÞ^[Õ\ f‡Ð]· GöîB°læLÉ€a4ìBÐ=OÔÂÏÖ!”ái—üñ-ÿ¾@,ø|»P#cŽ…U?JŽ×~Îñ|ÃLÎçÜ"`2hÅ«\£çwy¶‰! ’ ÿÄ÷óy¯ŸØÕ¼·½±†…`èÝ„V½·sú¨&è» ó¢ /öQÙ&†€ÑȺîÃâ˘¡FÍ™ÙÙ¸úÆ›'6êiz©õVØgÜŽû®ß‰ß¿ð2^Ýàalž*u.T3lÉ|ÙôüÛ‹µiÑ­ÔmÀÖDi)TaÒÊoàâå³pÉÜ4nH:Ìš‘Œ'7>‹ZîR*¦cnÄD+]ŽÞ‡¾6$ÌEkÌCV™ K/­„ù$P¥ÍA~¡U“—·ÔÀ¿4¶²%(U[q)×'ò5¼Ì±„M2)5 ·ý×=(µê c¼cÀ3ˆÞC;Ш™‚üL;R“Dd¤\„ ÐË??Ba°‘J•ó±òù4uÌ9oçx>â&çtî(Ëu(‰Íä=w˜Ë3M‘O¢47 „çó^?ñ+zïz&b3àö¡gpè½\Ž4!}t^´Ö§âñ7¡e'¥Œ NÇß[œh—‹š0ûö£ìÒ.øYêÁÕ!)-å „ðÉ8EÆ*Ô•øí_>Nñ+È̤~¸úá‹&!#3jPu"M4+¾ôÜ@„¡Ùnƒ~æðãk™ u,Å 70µšµMzqôè&ÖLžI·Ï~<ôí?bÿýK0BpFéå¸óÛ‹qyG3¼ºƒ2ÉÅ– ›ÕÄd2ÂÆ8„Áö&lyl Šnú ª²#I)[6‰€D@" H$‰€D@"ðaBà „1H´z3R2riÎeÆRŠ}ZÖòÖ§…î—ÂÒ&ú˜-v–w`VR-E9 v±Æ¢ñ,¯”ü£ ¦¢€ÆêàPïªÒ&#³RkÊ^Á«oÔ"÷ò $e˜a´$#«ÀÀŒ_C—3ZLã^­îhm<€5M…¸í3è2Jëf|zïjò ‰€D@" H$‰€D@"pî8§¡¸<5“¸ˆvúˆ; a*‘Œfœ^ÒèÁðž4“Õ˜’ó1mj>^î@ TJmáO«7Žy¿³O±rª³§¡"/‰®>Ò:8&PòK‰€DàT"jø½jô¶ià>£(ņ½ø^Q$;‚LTî,ö¹ˆˆ›>›FÿëÔÓ'~õ³ìOŸN§ †ô 2í¬°ªžØ±‰ãÈ¿%q¢¤ÏA·Ým¸è=ǰ*®)ÖúµYy˜ QzÚL|EZtq,•)‚œÜ Ô!®Ù>5Ôt>Š"77@³09sgŽ?+Ês±‚|[ê5ˆ$GaO ÃaêQtæ¡eÞW‡Ž5«Õð0\[ÙÛN¾)Üë"¼WÞËŒ¬àiyÁhD…°O‹¶f*ämaͤ&Ç‹¡%À-²Ô5#ëDͼbMˆSŸ|ú„£>„òjˆIo+1öñ9ãs'"lN¼Ôäê-©A*H?Îâ9û‚ñá˜r"½ß/± ñ¥Ð{3é}Ê»£÷‚më{¢ôþL€sðL:÷¿³¸¼5 ïú7t½âdû(K_0sèi&24¤ƒ5­7}a!Jx3e“H$F  GO½ÏþÄn4bVIÈ穃Ê(ôcÖ²äæùQ˜š˜PU“×ÁKF,ª #§†‚Ý™˜!»[ט°i‹E×âãW Àð.½-&|ý²ã?,‘šñõZÙbÇêǬØ[Ïà 5\SiÕÀMŸêĤR?r2ÈÜOP‘êHÆËYq¼] ý$?>÷•X{mعڄ7×Ó‹§8€Ï}µ™)¡3®wð9ʘNÖ4fÐ 2sù|ùLè?lÅ/¿`Fhn˯Äe— bluð?ì­{.L…ƒ/¤ãhûú…rkŒa}¬æÅ{•_éÃÇ?ÝEÿ®ñ…¶ˆ_WC ùo&ï›ïFI•×-sž:(ÂÄub›æUÖÄ?Ü=ì¦ßˆ7ÿ”Š£ :ôÒ¡.¾]‡)€GHCt¬€6yYÎBº#ÄÜcÅR ¡üæ}B Þ·“Þ “˜Š÷*Ê— ÷YÅ~̸øìé}{Ëô‘oPé¨$› ½?ÓJP ÄCCÚ,|ìg‚+ö{æ´+‘É?§L¬»ì%HN ÖÂ3¨Åþ]@'¿=…\¿mÀëg¢t¦üº“,¬Áz:ÉŽÌü<üM;Çd“ŒAvºûW˜«C¤Ï€Ã^~±Q‹SÕ¢¨”lï º¶Çx¯÷Ÿæx5Õ`Á~¾> E ·kP…@žåÊc–áô±½Æa„Äu’ÕIfù«€@:ÝÖ9îyÏçB:ã~}`7Ð3Î\1¸l ô¾}ŒYì'½_ûç³§÷ÞŸŠ&*ÉŒgAïÇ8ý¨¯¤@8 ùA" ¼ÏÐ AÍW\{]}SQ¤j#èÚiÀ2S^º)tõjñüj¾p­“)µÏà‰  ƒÕx`åÔ}Ì‚lb’+ßÏØ„•†s©ÓõÔ4*±Ðáaf^sЍzÆáNt ;]üÄ ´ÔPNôІ8i ¡*©Å>ʇö/¾©Ã†GNÜíŸq£°€Ì|‹ ¿úuŒ™¯yÛ·S‡ùÿÝ®0ìârÃtwö{Åï|.è¨NTdð¹H\@JЉò݉õ­hÝ'‚›& -_4¢@Ø™„5Üœ=ÄÚÂ!|áß-%ûQZî=EH9íü&rÞLŸ(Jo ü ŒtZ†u¼§¯®ÜZ²,Àì«ÃÈ0ó™7‘çS¬[§ØÄR{;‰Ð%"a ܽlx2¶¶wýŽOëYÏA^¿x‰çÌD…á„hį6Â}þ&x)cRùÓ‚˜6Ãa\MÄêø{Ø·ÎŽŽæQ¦RYC!tÆUngЭŽs 3`¥¿! ›7ªÁGG‹¡ÞÈBõl,2ìÓs˜î|ï°/'#DJ-Çɪ còTŒÿÑ2pÂÓjÅöõ xTðrRZÆ•Îõ£ªÜ‡$Á¨(³’ÿ}˜õ˜ÐIkÎVŸa6”_æÂ,º­åg±Î]3/ßgǛ۩ì`ì__£n*¢N®¹×õèêÔ GDqU¸^ §ûP2Ç,kÓŸÆð3”ø•ø;8¤EïQ ë'ŸX£Â]5‡ktÚ4?:v™qh‡Aų°“Éê ŠÉVô1ö-ªŠ0³x@YÛBQá0bïéæGë}pЄ;õt‰GZ¼8pcçÂgDÄOºÈ‡©t Ó$ ¹'OüèsÖ’ ½È¤à¡¦Ëç®×,ó9×#¨^6æêƒšnífkˆô”ñpG“°w;ï?×M<:Pܯü’²…ä>¼„=: 6«°í%Ž4òK2¹“(îG.cŸãýN†Ò?``\£ïp¿qŠ5ǽÌÌþ3˜QåUótÑÛlTö-û(ö5cRY•aTOwÃÊýI¬Îó¢‰KçDÄKC¡¢z‰¥\w®W߀ÚÖd¬ÚB‹i/i_ÂëÃßg"n|Î\j¸èÖ­Î5ïâ |ÔW—‘Ñ`çºd¼ôˆÝ<‡È0ˆø—™]C"Ä ¥>8„ Û‡ /­€½:´sÖ¢}áÍ.L%Ãî ãâZ»þúÈœ:¹¶’ÓÂL<¢GÓÛ6¼ö'ŽW£‹÷ž:¢«Û%C˜Kæ}Ùb®‰ ¢¡À0ÐbÄÖWìøÛ“Ãk”ëJ09…Ë©1{Ðò ­Zô¹g­)Ú 3ƒØ½…­ bwøI (ÜQ™R›0¿nÎO²Åü¦^âÁ<1¿ |Pw[ñöKfo¦”Pß‚Cô—â)5ü°ÐçFþ]A$›Ã§M 2ÁË<Ï»Eà(¢â‡³ÌÚ¦iÌ.?@ñ%JðJf;QFëåÆL3ž³ß€kíxå9-:(ŒDľÂýBܯKܘ3ƒûÅð{ÛuhséðÚ6v_±.&-öbQÄë"¯pt8¥E†ôh«³ f‹/p¿¤þÖÏ=ËšÁô./’)èä™5h®µb÷†$eß2dÑË¢Iî}‹©N `Va†±NpÊÏí”ÏW1„"îÕZ È"™“¦š1·Û–ôü 2$Ìu|h£ /?lBë Ÿ?b_Ç=ÄlòœfW…pð%;¶4¨Px™3¨¸ËÍb¬â3viSÃQᇾzvï¬NFM=³ÿ3¸tA5ìØõ–kù¬ñÆ{‘ÉXÜ‚jCì¦2ql'8™ÞG[lhêÒŒ¢÷uoP)LLé}Îü §†Fè}§P¦rØDzïÞM—ý½z¬}å½O) #Ÿ÷XÐû)ÌI ò¤Œ×Ή@Hõ3"þ.>r =>j,Æ›Nâ÷>šŸ°öáÐ=ù*TOÇ”"{båo‘UIqs:å—søEø0¶¼ø$Ö<÷fþðEÜ=ÃÊŒWýð8Ûq˜U0À ÷ Æ{óy ŒÄ¹N 8 ŽâÛ„Nw'\Gq ¦®@XÉ`¥ü~Òµœ2ØyòE4܇wž]‡£Ýa¤\wµ#n¼ýÔ`ݶvl­gR½†D)ÈŒ±)´´ÌÄÿù\2É… ÿ÷U¼µ¿{Ûõt£#¤Ph½å3æâ‹ßý8r«ç¢õñ_ÂpQ“V…Yçæ1:O`•Óx‘qŒqq‹´(Ì Âü’Ñ6ºà 0&çPž{Nƒ£õtë$Ã9åÂ(z^§°H â&ZXŽÒ-«ì(]š¥ÕìáŽ<‡R±‡–ºkȨRƒ®éZÞÂÚ£R¬×á¾cÔh5éÓဨCEû,ÑÂßhïçG‹Qµòb~û×Zp|¿y·¡tЀfZ®Q‘"š‰ÏPAÅEºi5|ã×I¸àê~L£dî¥w?×Ìû¢;&%nVQD˜œoð@}œ1ÕT e—r?ȉímGh•í° ËÂü^4O… @";i-‹nêß2±^µIô’˜}Ê:á^wÐ7^0ào/i £‚jRUÝÇTðð˜MYÐOWášNŠ×µxæ¹Ø¾uÑå@ÇÜÛ:xO×ë.¶¢ò–~&<ÿîH"ÓTg„ÝNËœ†:Ʀ×p‹ï­Õ!(H ŽÛð“o™á"›b ‚"•áb!~¯ýÆŽC3ü°ý VÊm]]Txc¢ÅÃ&,‰Z°—Xuu.‹ µÇ ¨ÔXqè(iQ“¡h50OÅ“¥UžÏYz6ÝW/cü賜Ó*]øªÐ‚©×QÒ—m…Þ E¬"˜EQ¹ôþ5&ƒK ÷?Iü¨”H¤÷Í[thὡ÷ä8 oð z¿Š|B#ii"½ï&Ÿ°ƒŠaAïÿëî>î³<`œvN8YóM[&}>ÿ 膉¥HBdÈ‹Vғŷ܇¹•y˜–®\þÈ¥¢"³®â“®ì1#¿œã?ÈÙh¨ª1òöX´Üuøn̽³².ÅŸ—3±ƒ™ó‹ñ]# ýœ7™":³s{Ÿ70¦3±­=•Fˆ"‡ ŠšÚËëc}2Wø±âb–ÑúV&|hy‰®¯,+à§@(vÒ³mBœVž‡†æ.¸—)Û9C`L¢—ÖñK:¤PkÁp’q]cC?™PÃtº–1Ò—îé¥5Oƒÿ9Ûhp„–î'h¡øúÌÑt¼ñÚìj\q• ~°‰5:ø*­ê‡ÍàÔ­ËÕ?¢y²ƒy?oG¿ºý¿»hýÑ!t˜Z‰á&Ön ‡ŒãwÇ›ßίžó{æÅd|qª ³±ƒÍ|†¾ùb3Šô:ì}$ o=LkþTsLb»ÖóP¨¾ìsþ¡P£—\F0ɇrZ”ŠRÂxóu‡’,%J¦Y‰1N˜Y½îÿYJ‰ïS7dbO‡­Œµ^»&ÕsN^'T2=B+ôA–0£2 è:&麫ž½8´Ç€Gžê—Œc÷PȤÐ#޼Xz±åyF¥IûPÅ3X*EŸ0‰óèÏ»Gÿ9}¤F¶0<ƒì®¼©%ÒŽ¯ÉT(%QÑxõ•N\µÌ­ÔØ!IÉ¥Xö‰[°tFšr F!êÇîWŸÄêXÖû IDAT[ó3DW0J݃ۯ›J×.úxÝê<€5ý»¹IøÒaµ•pœ±xZÔ®hkjÇkk[‘Ïëª{ã´uùMʲ;oFZÿ›Ø·ÿãüz¡3äà²{À‚ªt¤ÐÚ 1&i½‚}ó€}QQŸ2\3/˜„ô³’Æw4Ü1µû…§4σ£`:Ê©ó5÷Âçw ²¼ËWË$ LKiy^ÕáÞwýó\ŒýÈG^a9æ,žÄ>,XˆÒ'Ö£eÀ‡î.CKÒP9oj× Ò¥y;º)¦òŒg¾/£g)?IÆD@,_ñbФìüÚVGS Eµ²u¨ú©á&]ég`G 56 KDFK¥c 9Ý¡¢ýôl¥¦[c‘z¸ŽjÁ\±ãΣ¦ÖØq±iØç/Ð`ÆJ`ÝËÜ'˜(@mCg¡µe™[•õoë¡"­t_7E³f3î„îvåH¢  l~Ñ·5]DƒÜ_g©ÄcârZßvº¶°öÛvjüýˆ$G±êõ“Éé ñ+â÷gÓtĦØaÁÁ;®CΗÿƒ–ÍJ4vPM-Û9AÀJ’a°ž8•àFXta½a¶½Ö£Fø#Q2­Œ™áÂK0ƒÙFsÊÜkÃìAåœ(HFPœî}º&ÚôÉLÂÀ~²,uÃk4ÜG+QÂ: Ÿj㣊X1êNïçÄè<2îüš¨k8Ü8<¿á‹céèŽï`‚'X+Mè"ñƒh"ŽKZ¬cXÄÿ×p2Ð5ѵ¨¥ûð·u01aŠpçjƒ˜*:Þ›®¸³ÈœãÇ$ FÆcN¿œI9¶R‰ÔKfß3cGˆ›ÂXeÖê p…y¿ZY¢ä3˜ßA!eóäyx¢P¹Ž=Š˜¦å8zõE`J¹(ç“B}/—ìyÙ„¾~Êe´¬3VUíçõ’.¼µ>6Õæ6=è‹‚áuœM·Ït†&ÄŸ³Éó¢ØB yÚ M«vÃf U–4¢»_ƒÍͼn®é’tÒ!æÆ8¾ÑŠw(;ù@å³Æã´¹dóÙ)£«O®¨úÒo-´þÇöûÔ"5²gž—°_“J Ãqzä1ZZ©èH¤÷‚ÖÆ½=½{˜XåÑzïkˆ¢ãã9è½bŸŸã ëxLéN$GikkÁþÝ»°gçv<=j— br-|\%gÓX`+<ÔÄñêP°|>’ÒhÞv5aßîèq9è¯Ýóýè÷öâÀž˜½ ™úµƒJª—I@†\fÞcå _'jöî¤s:{]'[àë¯Gݾ݌ç²ÂfÍCòP jöí‚ÚL´Å¶öN8ZQwð0n2{’Ñçr¢›ç…™·7Ò<èìgÀ{+kyy8·y˜7yÄÑÞÚÀÏ»q¨¥A !o ™/öìʬYóàî„Û냛¾0úA ! {œç`O#ëÀœÀUt• Pt‰imcB©IœK#thß³«•%Ëà óu{c-vïl…3ÁÀ]&>q÷ÃírÁmØ…‹—N¡Í#‰ÚéZx¨ƒõØ·w7ƒ„½°1FÂÛzCÎñƒì 3¡ãu9{›x®¨mèF(ÍDXÜ=NìÞQ‰Ù“/Þ#tñqãÈžz õÓ vh€~ËÄsÇ6\t1&Þ§Ð`;ê¸Nj·¢›>û~/“² Ò±#EÙK‘ꂳ»Ÿ÷£7…>ú ô0Þ2@ÿ„Ý; ‘å;Ц–Nôõ÷"J öî¬EY.…5“™ŸEŸm¨£ð.ýGØgŠK2èbBºéd<ÄNîBc] Œ)W!57Ÿ–[úÎk’QP2%Åù¬ï&8®Cƒ‰©ùÕă±È 1“Øx}$p1ÙŒA/a/V« dpélÕ!%·I4ö!ÜÓÀk¦æ›sB¶l³E ¾W+¾˜\gíG¹'Q(RqwÖÚø+‰¼Ð`;™¤%Hņ†E“Ò™|†ñ "†@iclô^=õwÑµŠ®'Aj,Ô<Βʽ›®knÁqÔ¡‡4 k2BFÆÆ[€îE2õÝœ‡ŸcÙ˜áOG¯k&]Méf$š†`2Äy…¬åª¦'½„+ª“ž›nvÍþ²˜PDh°•þ dÅÄch´Ñ¥°˜''q³9øüžeÓSQÉP „÷¼ƒMo"mé%NÉš–Ësq±g9¾ì~fÌ©T$ÐB&Ø”×JÏ 62ª"ñ…(ïn2¢–1±C*,Ø‹ò›ÒTü]y)ŸØŸL§ˆR| R£ýØ1cýïãÚru2©É:õ~Ê5ØèŠE†·ëVi#ÛX#œøNt£#ŒÒÆœŸàœ”±FÏO\—^¨¯ÅsR:Û‡bÖÃVQ¨suSpÛe@¥‡Þ-y?Æ&®cö8Šõ©åõ(ÙV…²†Í@šÚL:Ãë§ko±¾ùÌ #\P[[è*JÔÇíÔf #;=y¦œ‚|t)í#¶j& sÑýÄÃý>L¾ªå­ïÓb÷-våTéÿĶ¡4;é}ký©ô¾WÐ{÷Dz?Ø€àÈ ñÁèAzßËg©Ÿ`0…ku˜Þ‹à n¹jc „ÎV=ó4-z€^OŸ¾¼ UÓªñÝÆäÙ|¤3D¼Í8ÔP‡ ÜC,¦ÈEBFÿ`ý[8Ò² /[€+î˜sû:üôÁ§ðÖŽ¨˜”Ë*+`|®ù V`꬙˜^l‚ûèsøŸŸ> û¼›0ùŠ©¸~Q>º¶<ƸÇñŠ»ÍîqGq;ö}ƒd ò(\ý±{15£o²Òñc Ìô¬Ä½ÿï[pnxßÙ†Wþ¸% ?…O~ù6¸÷þGk[ñʯÁŠç£,T‡w¶nÆÓ/î䛾Šû–çÃÓðj÷ïÇïþú J—¯f¦¦kÐâ[#=‡1çcW¡"× Ó ]ŽÓßš–BÚïÝ=k±fÕ<½ºÕ·~Ÿ¼r2‚‡×àðþ½øëßb ¥ÜXÖŽÎZì¥xSÇrÜõõÏ#MÓ‚Ö}oãñ'~‹µ+®Âs3#žòÓ4÷ñ¨§õü[Çð‰ïýóÓдé5ì[¿Ïüo%ι†ÞM8¸c'¾÷Óµ˜vË¿ã«óákÜ€#Û×áÑßÿ \µ±>â$ CÂ߉àSßü ³L5aèøvüσ?ÅúåË2¥ ƒõ»ñ³ïþú‹ïÇMŸœ‰ôp:j7âç¿ý"¶¿âúòn8©`‚—Öž«pËçîBz´{ßÀCýy³> -ħ¯ ¢}Ó#øãïAå´èìÙ0uïÁ/^«`ÉôbÌ¡å¢é¿àÛÿû¤L*‡“QÌ+J¹³%6¥âçqì?Aö§J¹a–Òz¢…%g >ùÏË‘–á@¶1D!Ý gç>4·zÑ;dE)™~èþÄìŽñŒ› ;–¡eëÓØÛ‚1'S&¥R!ÂzX…³˜eô™š-Ø×À5U¡cºå„6q>òo‰Àiè'ï¦PDmD“ðÔ¯´tã!fâƒôÅ>´ÿq­¤%ÌÆ$2÷ßÚƒÃçbýfÆßOÑÈT M¡Â£’‰ 1ØæàsV*€¨-äoÓ¯ñã3·õ í… Ô’axj;ËuxÉF©IÜ`Bãýä$ÑúØNâß îâÔÚ“)Ú×b@ µÍS. àŽëû‘Êø º'2ñë—bý|nư«ëÉ$˜üÈ`*é 3ŸxU·jŸøŒUôÍVýç^ìÅ2&ƒ.£ÎcfÔ2-Ììƒ%U~d…uPOË`ñKênÁŽ[®R&sèëÿÛœ¨zø¯¨§°y½¥‰&v£Þ§ÿÌkÏVÁL¥u‚xuc.e–ÑLƬ„˜1wïŸÓñèkdþyÒQܱB¬V*xßÛ(Löõ1C"™¤úƒ&ÉØfÕ"’ Mp[=úº‰4͈õrxͨ= ¯QE¢à¸qþ(ÌL˜!&‡arÄÑçVöÄùõs~šøü:‡çGkúÈüÄK4GŽQ~: 3ËîÚŠº,ä-áRfSžË„Rbo¹_ ÇxvéÑËÕt«–|-( B¸ÿ‰>f«zÊ:aœ3…òÅÌEÙÕ®QËü|=ã²™mRÌwæð:î Ûr'3·öÄÖñ¡̸NYM¥£xÎTvf&'ÓÀÂì»Äm÷Q&øºÉìf&ÒÓà­¿û8žÂg&³´Q99@wÜœå,ãB­‹–.£iF&Byž‰²Fb‹o=’Œk®¤ÏÈ·ç=GãÃ1 Bïé= < ½â§tòH¤÷uꌓGÓû?>¯õ»±¦ÐûØ>&2„ÐûV ú‹]•@ï_]cÀÖF&Ž?<ÌÉocJ«_xŽÖ´|И©KØÔÙ„`èÀºW^ÁŸºçäqÎðY¸KPÝÀ&N˜xÒ[ð¯˜W‘‡ jô *AÃ#ë±çx;šSí¸zÉ,L6>ŠâêE(aŸ|Kê^y ßeøäM×búœ<Å%£¬ð«uÂÚDZíÙ-¸á+<‡›~é¸êΰ¼Œ‹ú?Ð)Ò+‹qßÿüçÒÍÀ¶mÎ t6ᮯ}‹˜ÙmØ“Vƒ—·‹¹´èrpá’ëP<ér”.¦u3@K` ˆZÈ' Q<{*énØlnB…؇!º%DiIMüß¶ýex“IïÄüËJ¤p‚êI÷ ¨|ÔÍ[ðÌSoaѧhY䳦¦àÞßý—Rë­s£5ÙŽÞ§6£¶¶î*ªÅlº5~ss—/e¹˜A-½ón*ÃÂkïD#Ó'ˆÖƒëÑs°6ã-øÿ¶’7ÍÉxæ,¤µöY¦‰±‚E±qß/ÿ—”Û`ÔMaë2¬üå h8ÜŒÚîW0ÔÚ‚ní\ü¿Ï Õ__÷Ô*ônØ€u¯ïÀbjüÅuÅÇYTʲ#îT´ÑÅì¡çâæº³Ê³Q¢o€£½´“>ð öÅ,žÿúµï*Ø[è.«ósC/,&³ 9 ÁèÓ|œ¾Ž±¾TT¥Ò*13éVâ8;Wá—_ù1jÜ)pLžO]? –ôNÆhÑÕ¸»øf$´˜²¼,$3sâ3%â){˜’1"R8*ÜÆ8“‘_KÆAàáû³••#Öw ‘VÄXº;ïéûgùüŽ›>ã8œÌ¢¸ãù|<û”°Àðk&eQjø1•x€Ì*âôÐào?ÉAµ‹û7Ÿ‹3Ä´ë»^ÌÃú§Uhib'n!‰µÿÄ“î¢Uò;_ÈG5-€jÇ·à—l‹–:‘Ò`a  ˜âÅß«GíYr‹YóœPÅÆò4ëYÀ‡ŸâÞÆ]oåg½È¢ûœÐªÉ hŠÈDÌSa&…†—~ 4ÅNÛªJsÑòàw±ãGÿ5ªŸsÇVìX<K°çPÃÑÕU~xÐäôaFE ÂUÉx|· ¯ÿ Û³S`!óžÄµr˜kJ´’ˆ0ýJ/¦¥Ð½ìiœ›õØ_«Ç÷%¡ˆ·ýÍÔv“íHÎŒàŠ« ëʈ8ÁÿæÎ”û±5Ú²Øõvì@e½ó7a­ž Þz:Ñ XæBÀ„f"Í*YÎ ¢ óûçWpÒüV\>]wz‘ÔÎóç²–KhÚé@›À³Ü?„\-2ÑÇ„òØ@‚Ò×2à[Ÿ)À<Òñø:eo.¾„÷¡?ñ>ˆûÆìOGÑJ¾g= ¹ÿΆ§œIp®¥ ÙÎõÈui.‰àyjìªÖs­¨é6z×w<°qüT†e«(é*iÖ²Ø4γÿé%Be^”:á]!”ŠŽ<þÉkcŠ Øø •Tqµ?jÂÀ:^=jÀžu©Ê:^G×jÁ>åSѸâr>gTÆUÝÌЄuéŠ+Z^……©L#ÌöÃüMæå,)"â*yÞ&fÃü ï—ÈJK,WÞçE÷sÃ%å†IÄŽï‰<?~¤› ÷ñJ¤Hg¢÷í¤÷Úz/h»8ÞŸ@ïÅ3a<"½?ÖÄ/O¢÷üfÌ6îWÓÆ.ôÅ DVBñzï™{L§ÙÙLù†¢2]øx"‰Ì‚†î}Qá÷L'~aꌆ¸ £Æ¡¸ ÜËÔtXltß‹O&Êl_¥p4p¼ Rø½eÈHÉFAŽèEê¦A!š<¤¦ˆ[%ƒ@V&’ÎŒh¬3—Êø®Úµ…A¾Dà¨UëìÚ‡šÍoâGÿýMúG ÷?“°6h¸qž!ÚÔ•êEL„£ ÿMˆ¦*Žl‡GkGʬ,PÁ2ü€èaNJFÙÔRDh±RÀ¦UL“ÂÚM:úm‹Aøn4–+p .¸Y³®Á Z/oþ#þô_ŸÁSÔëTÙô/Å¥÷ÝJ·I3šŽu£¥Õ sÂËG •ƒÂ¨ Ÿøfô6 ‚M"&‰1zª âCGŠáùh´&¤åGÉ€jè?®!Ä.Ï&|ÿ“×ÐC¸N ŒB¢«š¹œ1‡Z„ÆU˜~”Uyu¬o˜¢… ˆàä@0&Ì‹SiˆuÈ;€–Ú¿à7ÿ÷C Qí¬îÇ\^Á1Œ×x/‚½-8Ä8Ítö§þw¤§¿k¶¿ö 6½¶›š¼È_r/>¿r9J‹¸VÈåÛØØ—‹â•ŸÀM7.ǬLµÛ5xþ»¿FSïf<ö—ÅøÎ}3i5ICfJŠ÷r`™ÍéUTé(œKþ!˜(‰5spáÆ/õ Š‰6ª¨¾v^ËdõF–±a¼ŸI*z‘_@ƳèdºêvZÞ:>ÖŠjÆç‚Z¼Jk\ÝZ æ9ÀýŽî>L€±F&—Ø­r>»Ô|·Q ë(ÁÍçXìxT2#©™1‚m|¹'—¢ù8±˜} iA´1„f³o¨Ññ6­7f2 Óùìà<^° p³é+ûqѦlë^ÌHÂ-–[%·,¿a ù” Œ4gãÏ/rÔR7“£§ôyLòñåÌŸ=4¼?ž=á&ZU’ƒ}W_ O-'1F 34`Suª_Ü€~j¿;)zŒŽò«÷Z‡W8‘:•ôúW™xz=]…»¨ŒåÈ=à ü¢Ï:±p¾—õß|0F‡ð¯äàá?jÑØBÁ&f¤åº.i“— aÎÕnfpfì`Œ` º:96M;EöD!¤jêHÅ‚)ºˆa& QÑ’_£©©¬A8•±®Ãk´cE2)7,oÃ3;˜ qKi¶Qq!šKÅu¯¦å(¹¢{Ôüê¹æ±˜_årZNX£s.SïÃ9z~œÚ)ÍHáæ ‘§óþ…™Bx-úítì «ï^ Ò4iÌbL!ï—k¿íÌHÜy_,ù ®ŠÅï(…ÁÆØ}È_èÇ×räÎ{îƒXli‹Úq1ÝbSö›·þFwfÁ¢ñ<)ÅQü˃­¨`JFõ* ½"X«ûÖÓrc#"ö,Qóºû{`g†Ùó•º+|ªrµüOlä‚åeØJK(“®ï·¿ž‹ŸþZ:jÁÙ[Ç‚ß\|ï >ekéò™7Ë Ç!#Ô[u G>);Àðzõ9ÃÌ0aWX—у‚B?cÊyÿnéÅ…;Sè‘G÷Ð&&úÃ|¸ßû¹dN ⪻{¥@¨ÜÑÿ‰í«Ui"“è _Mï [Y¢ç$zÏ$ÌKr‚Þ_0ÇÆ$mÀ^îqz¯k¢{ó1zY$Ðûäl0LŒÞwÒ³ÇÈó ýÁXM¬¥SZ9ãëÕ1v,¬X …p(â -+¦TWŸÒÿÝ~Aý…. #p–ñ ´øŠ'>Ð2Cõ‡™1‘nÚ›h½:qM!ú-»¸QS11s§¢³ŽeþÔŽÚ…ÅÙ„‡x¬ «X äs>­ÒAŒß×° Gö5 îˆšuqV"‡1|Ô— ·­•©Š©Ê£ñ ‘ãFO ÃQ¢Z¦¢f2”A Õ”‚NÌ›.Z`:™ š¦vµF/â…02i¬”ã…¤*¨Œø8Q4ç`ò¢›a£êk€Bm?1êb†¿MXpA­j%.LHRC³@iâêë™1ãØ?1#bÉÿGæ-æ6LõÂ~>Æéª°xå|¤p\--pô77!’[Á4·mès‹‰Ÿ'†—¸¼g cÇ/Qœ+äaüfí;ؾˇҙKaO¦ò€±za Ö>¾š£‰^#³âßñÆ1uT.P8w\é#$é@+ÞüÛ*ì?܉6M9]=Us¡šVccQô÷ yúÈ sΩŘ\V€¬$-ÝyI¤˜‰ÎåwáàÁ6ÆœNg½,/|T 19Bj:…âSî{|.ò]"pZÖf+ˆàò"JÝ@±Ç%6¡ŒA"AfØ(\CJݸì·fоŸŒŠ–îÎùùdˆèfâ¦[Ô ´#(¿ˆ–¼RÆ4Çž‰ foLu¨QT¤aŒ3ŸK&†ÉΡ™?Çcªr¨ÕË,¤©¹±çÐBëÉÝè„@(ây*zaõÓ4~dNUá¶û}pÑR©ÖSÅ#\‡29×Åܧy嬖QHmý'¨-îbáqj…“š™ ¬téªbæÐ¹7óY™¶¢7§«Y“Žç 8úuÙ9·tuùŠ]A"*£ÿ6ЊŸéHfY¾_{BK/Ýá¤O¦”¤&°œOG/U­²½DIwB°ñ>Î^ÉÊsE±û+ª¨Èl“‚˜|¡…Ll‘D÷2a¹.ž3ˆ«üôÈ`ü‹‡kT< ¢_^¥EUThP f¸pédž0³—Ù»™¥ÓÂõ®KscúRôY¤Ýd€ÃnÐ{Q¤ áÖO‹õ[£ŽTº“šyQìró³yn 3oèçÂ¥Õ',žEVQ·?ÀÄLkò,é/8>/ãÍ/·2€b®U‘åäùq•qýrLºËÎ!-²ÉÔ ,.>rMçÇ’`c™)™¼w…¯á#ÏR$©“ݸé/™u6¢æ=äº{Ûô cÖ+G3Zr¯üاiœ Â˜Û!¥0VŠñr̈ôR¦Ÿ€Y܇Ìë$‰Ê]q„cV…³ø{8“®ð<.Ê{¢c­”\ÖÑËæ~ÄŒ3¥ )ðYé.Ê}Ë«deçZ$„É`qx×qé¼AÜèÑ`€®Û>^ŸxÎL|Î*/¤=ãÄs&š!ÃKEÍ:Ëg!›1–ÒiE¼þ ’Ȭ’™^3„‹)o>Õ‡…7 Ÿ¦;i©òó‰ýÞDÜSˆmU>«(#„ÿK ÷J‚—“ ‹Þ_w·Ÿ±€¬Ó™@ïE,~€Ê¨8½¯¢U×ä$ƒÇ¢«Ä1êëA{=6¼´ó çÇŸVà3|]âMÌG¼GiÊRà5è+°â–O¡"ÍÈÚc>2«ý¬%µYåH±öÅ2"Š¡ƉÀä¤5 º‰d›ÁºmØU£ÃW¾pÓÉg2~2ŒÁ¶õØõìZå{µÆˆÿÏõª6%³Ð·ôS*&K î@-^~ :M“a™~®¹ý2 \0ÈBmr.ŠF" &µÉ$8Z#Z+3Ö²+ã@Ÿ‹Ì2™t&"bÔù@@ƒò|‰Ç µF|ò]"0&f:ÎâJºTN¤™È謸‰›¼‡B¹Ñëm~XÉ(ë6a€fÈÍçó5hz t•åT ,Q3a]§mdœøÈd–Øò&ѸŒßˆšÌ–8`”U:Ö±1R¢‘ I%óuëg¨É§;žÎÂ=ŒÌ‡y &7õvªJr¡S¬ƒ…ëÕ âÛXþ‚~äÏdºy2î*òTÓRèÀ>i"£¨Pâ7ÐÌ{ÿ錇ˆ«  #Â3Âõwt °Æû]¾´¥óŭËìÚ$¯j®[K‰p›úMÑTdBmѸ4—1ž~2«t¤˜¨ôÓ“%áÒKë?vÌÈÿ©n*h)Óñ5Ò¬føTáÖÉúbkÔh Ã0—“zE%â9árIoDr£IYGÎ IDAT|FµSæ§Q|MÄu!þ9?ºÌ¹t­ãkΨA?B„àÇR K>NŽtŒ¦Ö‡aËwâÆ{©„ÐS€aR> ‰bo;¹Ý|ïð7ü)ÂDn7ÙZÃa¸ +žhã݇ä< Ïõ¡lUù<ÇÙí‚Þ³‘aÏ¥ dv5ÝÙ¹o 2©Š†î“: RÉìsÒv;æƒúÓyשPâeÌ¡²í¤E§/ÀµùÜo¹·{x}äz‘œÅç‘ë>‘ލ’‡0 >ñ:Ñ(Tùqõg…­ÿ¤fðcúu~TyYŽÈ£§"†<Ÿ›ž‚a|¿?é¨ÖÇwAï¯ùÄèý’AR™*b ESèý$5e6&¡ã:ÞŸü1¶J*rh ¼ãî{°ô’Ëðúê—•ã/Z~ cAry“O'_žîTbÒ‰ËãäÏ Ç&ü”ð'%cJf1cç‹Ïaõú4öÎ`ÐzãóžÀ³{{¡/›‡ùWÌ¡ÿþ‹áüÊŸ±~ $gtUÔ0;ž-ÇðÎó¿£À|Mäv;Û(f2RB…Õº¯}åó`’íÈ9iaš²I ѳŸnœÏOÆË ׸öîÓǒpÕ× #}Û°ô~ò• )„tì_b¿Çÿï­û=öî¨Åcfá_~•q}v$ÙldÊLÜäÈÒ"=å"f’Ú÷º‡ñ‡ç¦ãc&c v-nzkÞNÇâ¥Ní}â5ÄÇ¿ žQ¹^{-Âk^ƪçã†å“a AÓ-øæžÁÜÏU¡`šØ°ãxÄŽ¿uEñï(x ïh„¥-<ý´1^qõ#?ÁÑ!j­D¬$3“†£y'¹ãðx½.Õ³¿zÙ7Ò£aã³8èö0ŹÓ(Õ}àU4Q£(&è6¢5ä ×ZF—¡°·~êÕF¬¼ ‹Is6âí´VúRQ|Ë,Øè†§æ²ˆˆ{L5™ÍLën|ÊñË’ï÷*'ÌB v…oháFg µp¤ K¯tfÑ;ckxøPĺ£Ò ¯36Z›õ|e$Qýý÷6žWEfþŒk”kÝ@æyBmd~ê-; bO¡àm§¥iB÷MÍûkc鉳jb]˜˜@†¯ñšPX™S¼|×ãCþ}|¿u¼÷ס£ /^4ÆËö^ 0Az/”Yâ5ÒxœŽ ÙÒy/ÞMS ‰"å¹y¸íî˜jFÔEÔß]œëV)¡Öb ñJü¹š°cw;Ò raÒ“²_¸ )ühعúTºç™íCù–]Ðg ……L—fÁ«Ë•žöºR¦ ÌLaá •¤BƒÔ¬l¤$Ç1–€JŒ‹l¤»ì°ÄÆ!!%žyåè?Îm{ʘTœ¹äÒó2B9«œµ$ep …AáÙCsIæB}ÁE×S&ƒ/ÛçFî°|)akm-ªkQÙÜɤèÄ¥¦0fŽ‹åÌ÷WÕªCáè¡P8«×XòúR‡aź<:ÙŽ“1 y0ëpÛa'½Uii%ý¸ÂE2£ÙJv¦l¤ÑÕUéiF]];šÛŒŸÈ•dÊ•ÇцæòRt˜S èà1Ð)øPë®fºú çw]*ö5K$;JSaP!Ì4©±F) _$}oo¬ÄÎÝûèW.ró0'Ÿ9iÙHf¬$©Úº°è:‘ƒþ÷~æèç©3:©ŒŸÓ2CµƒùwíÞƒv~èn©50CjãE¬ÐøZÑ)ajÇ(Úñ¶3ép¯Îб2öS$YOg·uH׉٬¥ÇI*¶í”°ÞÏ&NMJãõá¨D=ÍâS4ò‹ƒ—T÷Âë¤zÙoðàÓõðGåàÇý©®(¯‰çVÞtGÖ’W`EgMšÐØL¹”šoËALl l©V¯ _ÿó§XºÇˆ}13ñðÝsØ?¯Ãƒuæî2 Àoi‰qH端âp8¤Xâ¾~ow¸¼(gnÉX2“¬0ò†õ2Š~7ï¹È 4òl±n°ò5:ÐÎŒ,@@ æÚb¤÷<Ög+›†ÖN²?3¸[.'ñ|ÏyQÄ3^Œ£réC²S"ÀV…°rQ-_æ%Kh¹WUd匊¦²@vÒS˜G‘î --!…ÐD › ’´“º&*3‚&Hâ—S˜l©Èݵa‚‚”Ä%íT•T5-¢¢„%î0EÄÙê±åë­Œ²3!uo­³ ê: ã‹ fþmo$I 7«©jhµ³zXyÙ®ÇÙ7BB5½‡Œ#Q÷ŸÙŽßÝJrÞˆTºÕZ½„K÷JGóMØðHPCì…•W­ÑÃBüÙœ’»§’Ìd½ãç¬x¿à54b1çgâÜ<ºÌŠÓâczŸÛÁóë¦jmB YW5t4¯w^ÿõ¯±[9† WáÇåô£ÅYåx)„bA¡®¥c@dRŒz’øß»¯±+ºz@J* u¦"`‹â3…ñðò5z¦^ú¸…g’ äâ} õ߈‘Ø´v¸ƒØO÷×oBØÓ´Oñ|ÏyQÄ3^Œ£réÙ¶DÎÕ%/ÉU½Ï¤û×Î7ZKc¤å‰ ø$ÁÊd¨ñݳ )ÈËnÞ ]éRk8Œ@R>Ç¥1¹o“4¯¥[ß…öv&¸4“¸„é úÉíÑ?tá 'êGS‚LYáó8aïp2™y ÌF­Ü4m¯}Dœš»£ ^5cûèß« gÀ=^ÈíÈgHV̼Œ3[I†<LJ,ãq>rs§ò5zа3L\1~†_ò8Úýä‡qïâ(ð‘ËÉE@`/ÆPQΔsp¼ž‘×ëÉQ© zê–ãßÞ‚ñ7^ÎÔéH×›†D+~?:ÚZ¡dÆf­V¤¯8Å0ˆ–²µØ»~1žY° çý7ž?„ÄûVNîU-zSx© ÖãóÝòÜïcdaÆæËA²'ÿDÈ= >æ°lo˜ žQQ‚ÍW#=(Z[fjŒ#ÁZ®{ú!`µZÉ–­’¯ÑÓïÔžG$&ŸÑÑÑ’R8ÇúoìHl\.œÎ˜Âí›8ÀS Oñ|ÏyQÄ<ÄO½àt/Ç뙡ד@‘F:QŽ5¤ª_˜ý̛׸ËV®AM3S&øúµÛá+ípÙ—ã·×\ŠgÞÿ %õ‡ÊÎ×Ksª½Øº| >±­içcdAó/ßÂ^úë×&‘¾¡uŸ/Çæíõ¨m–)Œû›\IF@F@F@F@F@F@F@F@F@Fàˆ8) ¡ÈÈä “É"•4§÷$þÚ¼Çëe¾·¾Š_Òø=Þ.i Œ/¿ÃΜöJj·=[®í~'Ú¸¢Ó¨Ðaòå7abQ¢ŒÝ¦B.åë[¶ÃôqÄ? §`õ!ÈdçAæ7”‹Œ€Œ€Œ€Œ€Œ€Œ€Œ€Œ€Œ€Œ€ŒÀ‰@ »öÑC¾Ðn*KÍÍMÒÖ¸øhu:Ée ¢Zÿ> ‡Š›(ÒGñŠ¡×шªÝ•°»ÜpùƒPjMHJËFR¼Z •½€í Õ¨¨®‡Û-|³U0X“‘ž™³>Ÿ³Õ;w¡“ŠfSy%êkrÑ•„Xc¨/‘ÎïuÁÑÒ§61óÐ XÄÑ\±M­hg*¸:átYá7‘„„ÉüDòøŠ]hóx )‰Fo–dKŽe.Bo+œjj=°åg@Ï&ÜGôUWZMZ¢Ôð¿º:'LÑ´×ÖÃE9ÌghŠËDzªZ‘Ý^XO;‰Ci5:‰¹Æd`âs;¢ÓZˆ>™\dN¼\hq8èèè€øÜŸ"ÜBbbbèÎ8ß.‡þì'בèÞBêGœ¨íÙ½ Ÿ-þDÚ{Ö¼ó`KÏ€…~º"€óØJ€ÊS'jJ>ÁS÷=½-v41Í|ÀÙ·þ W^8¶ ‚î¬}ÿ üíÅå°w:9 4À’=ßýñ-W@"Æÿ½û׿a‡Ãƒ¯¼L¥‘r™.¬üpbÐ:›ª±åƒ7Qž>3Ƥ!ÉNpîÆú7_BɺÝ(m¥2÷¯‡á»ü.\:-±Ú6”¯]ˆ¿ÝóOìUR=ô›`°¤K²ÝpaTÍPºµ Ï<[ƒï=ý äè‚P[áhÞ7ú;Rïú;Æ›7 µ¼ Ͼ¸…㣱þ­E¨nm‡]gAÞ¬»ðÓÛgÃÆdë>{-ûzOþöyìr{;xlYpí|œ'+ÒÇvžä½eN ÍÍÍØ¶mvìØ×ØÜ³Çðx#|æ'Mš„””ÄÇÇ÷¬v‚¾‡î@Ÿ‹žÊ ˜ZÆ);éÉ­5p‹ K½ß£‘[9 F”½ï€X‡E¥môÕN¨ŽüWF@F@F@F@F@F@FàÈàD#€Çíå,ƒž”\LW“¨2r†n¯W…ÐãõàåçžÁsÿ~RZ÷ÓrõÊ‹ÏbØÈbüáñ§`±ÉIÀÕ€æê…øöAüÔ;pΤBLlFõ§OáÏÏþÁG1Œñ|•oáá'aþ/ŸDQšq®2¼ûà/ð×›t¸ïéK—[„)W]‹…÷¿„¡³§bÌÔ"ä'FÆúÐÙP‰ ÿ[€gMÁ(¶R “–ç™uUjhªö ÿÜK1eT Ž•ØðÕZüôׯ û¢{qÿå# ¨YƒòuKð範״¦U ³­ÛöT¢ƒJ[@Ëæü´rÚ›°nËZìnxThkÞ‰/×¼‡ÕkFãÛ݇xUœåkñ§'îÁ{ãGà‚œ=¨Ù¾?¹olgÝ„Î+€®z ö¬z¯±ÝáJŸYн;«°«v:+£ÿ?üâîëo‚&¦³&¡îÚ{ÐÂÆŽÿNO¹äï2ßÂEÔn·#;;£G†®Ë=l ì)—° &·Q£FaëÖ­èìì”ÜL… éÉ(Õ›Æh‘48ˆi:5¶lªIªX¹ Ô­Tèl4âöK¬(¾¿g ucL Ç¡ ?˜‚×?W¡ª™÷v€#‡BEwt=Æ^Ô‰)ó;03ºu,9ݰ75 µ¡M./4æD¤§Åè§ õ :ð Â]5+ñìÛ-È>c'EìQÝvâÙèD{S€–u-ô|®ž*%èl„£³5PÅe!‡,äêpÞ¬ Îê•øßÇÍði­˜sùT$Á¡y-p¶7comÙ¹01þ$Üô‰ÄGœÓ¯Ö·`K9pÙ·ç!‘2‹yŒ\ÀÉs%ZÝ:(5:$&FÓwíx†!¼Ò½gâ³í(÷¹$Fb·?¼/Ÿ#šåg¿´½ÔQ3LI£‰\žÆ Ü [ªmR"!=iÎTG>žøyL|EÊ#žÙ:>+»• Ÿ†“:©¯Ø”Xè9võ¼]‚?±©G ½pbã ×\§[›GñÅÕÑŠN8>âR!üÃHJÍyÒ±„½xÄ6•`ã¤wdäú¯—cƒËåAs»I™Ä®g;Rc‡ÿn§©U…ÄÌDxMt“çðMô]#ÀãìhÀ×|„M¾¥°«nFLn>f¤‡½%ìÚãl…~èäàç¡"9A“2}œŒ[áEC«º½õ0¤¥ 1ÆB"=oº /ÔDdå$BQé…×­†%wÆîÅö/?@ݶUˆ³˜a0ÎpØXä.%µ0 SMèTâ"f| B¸¢nþ–¬ï€Ëã–,„ÛÚ¼¨ßô.ÞQ|¸(>LéâýoÄïjP¶K‹¨mzY! C(¿÷‚€mµåØ»y5Ö×ïEsÓ4\uãDde¹Bp×£³½+·›0y\Œ='náÞ9 ºª±nõø£c0dÂðoý|JOõöxû‹mð8â0lâhOqDŠS?»:!Õ|ŽZ4ì݃Åïo„¯øz|ûÜ´äs›DhKÉÆ=péèÕ4Ÿ áHáí¨EýÞMxé­JüßÏ¿\†“@$tסbOÖ¬óãœç!AšEÙ³€×‰Æ]Ÿâå÷בlÞYÆx¤ žˆ gÁÊùaä\ïNq·ªme«éE¶w5H÷Â%ß¹ ɱfXH ØÿâÆŽÏßBee64„¤R†"z?´ë¾êOÚZèéVUYŽ·.G{§ ù30ª8‡éÎRÐÿäknToû [×~µ_´ œs韣-·žõc©¯Øì ÈÈÊÄìéùûûj©Ü€ê]k±ðËj¨„QÇ4ɬ3çÒ‘ˆÙÿ|í?R=k¸ÀµgÕÿðŦjT7Q©æ ßš<_1ñä1(}ð¹±â™W±ÝÁ…kñìî*¶‰ó1‚Æ¡¡)D†.˜îæ­x{áç(ß×ÈÌ F$ÌÀ¥ç‘ÃDÛC·Ðû»»iËþvn  fⲋx¼l§§²Ü{ ‡ÙÚuáú:pÒ.¶¯º {ËinJO>hÇ^B£‘Š&¥4Û í#,u NÆL&óAÑ÷ºù0³CŸJò*q¡ó,¤V#*&šîU*®ÎDÁ:¨ÇmÅ›¯GY§‹+*™¯L@Aa,fîÉÕaE« â$‰›ºiÛ"|ò^ZéÏé÷yàláÒõ9–Uk¤•=SLT#®ÀàŸdä#í ¢m­èpº©¬š#N*e$±Œ5ÎÊe}NÅ5¬ªí%B7tä5«àéT+R`5kºV8¨°•=¹BTo‡§Ým‚õÂ+´H*̈¶Pšî 9ÜI.2§>B™Ê XˆÊž`ï‘ P‘G)~ûˆ÷p®¶°RYïø|f¬ […VŽ—ÂË€º•9%¼âöæýHƒKè^w·«ÐRÇm€Äèa±a¶ð7¯‚÷vh…Ïתà꜒«‘l·Ò„¯v+<ÛË· ·\ÞŽö*l^¢FÝ>5¶nÐ!p…ä"#дFÙÑXµ»Ë—ãËã1uîpØ2c¨DôRŸÏC±¡”ˆÔÂÏ—P½€³öº¼óA<†K† aoMHµy··ÙIåݽt%6IÏ`ÉU¦ûê¶ÏÙŽš­_àÕW£­!ªŒHsô ¡è‡ŽØÒbŒ0®ˆèÛ>eïCVi3•]w\Íé« ¿—±ÿMe(ùòS”æàÚiôNŠœ-Ÿ.¢;½&zõÞaXfñ«8a™ý®6´×•⣷?ÅŒ܆4Kz1¾BžÞ{ˆØ*æAl‚Ã%ÇË>FÊìr;x^Þ"É,ý‰häLû ¢¥ËßÀ«ï–<=7â½Õ‹ÑgÀsV³^ ì¨.ÁšÏ?ÆËvK÷BÑesI:(Â^*÷ºIœ'v|ñ.Ö­ß‚…õVèÛ®Æ0øÌ1HV{¯ã5[#êa¯Úˆk×áß¼7ãL hÖòYˇž-/™º¾îˆž‚…Âåo=¿_ò‘?¼£/+„A8yUmù”}-’úR[:1dD1FM´¿¯öš(e˜Õ»4!EáG-­µ™¬S4wb(ß±•wÉÖ%¯àýU­Ø×æ'n|æúZQ8­ç› ¡ŽÆ(O=V¾I¥ÑáC«˜£øzÖT‡ašñ0™£$…0H}¡¥tþ÷þG(ÙSOÙè5ð50zb.É7µàåÒÏÂ\è‘í0íj 0vrd슊Á±i|£Ñ‹íP…Ccm  kà›œ,m‹l¾W…pæÜy¨ÞW‰U+¾ åŒrpòÐ,Gÿ¨y—\¹ÿ‘fìž–±2©9z¸¾n#Øaœº‚Úûš=ðªsa Û§»Þ…œy÷ãé[•LÖÞ„æš½Øôññ¯/`Ш£Ó‘–@0vrðSc{þøo¼ˆÝ!YÍöuøä¡¿¡ä¬ãÆË •lDЧ„5ÍC·Á’ÀMH°ÛÒçž¶pË|§BééÄŽ» Ž¿Ó\ÆÎk¥'­8Ž'­![é'þj#—š¨DŠ1^ñFC1 :™¸’Æß-¢“äVð> ñŽ´‹üGFà´A@(uÂÚ¶ŠqEXþœÙ ‹Ï"ê‹—PÅK(„aRlïK‰ìÙN¿ e°~u~ó#ö6NüÍä}Ë»ƒD®©½úh Új€úòPëoqá;Wu@U­Äó×Ç`7ï»7uWt¢q¾“ÚèÁ`îù­E…Œ¨ÁM˜¨1ÐR£ׇÐúKº r|8‰þ\ïôEÀ€Aæð5—ø×àÆYkÑgÚ\*žŽ4´û`‰å/"#€QøÚài.ò-¸ëö 2€•—ÝAE¡6Á˜û-¼úöA?´¡ƒn^Azß(õQ0G,më¢S8¡»o=w\µ@²Tüè o oº¶@ Ìœ°rQŸŽ¨FèšÓ›ü}w$>T®Û|dö¶pÂEE¬÷¢Ož„aÓÒñÛ%¾óF:\à5f_ƒÇþÝc{¯BæŽ@•sZLÊL¢:ÊlÉAznÉ+EŒ– Om$§£…0@O"K†=š;äWOgYÊéG«¶Å IDATÑÅé^Blï|†ÌËpË]^‡lì ù‘ÓròBìÂkþ×?½àõ±ík<ÿݟムW"ã,¢´Gv…õ]lÑ…¸9gæÌß)Ý bBé–ØÛ>=·‰3ëräÁ–;yæW˜L“=§žÒd?ì¥ÒWñ»P BÅÕo¾‚­5nŒýá xòjþ1o>ì[øhë(|§¸¯;"¼è]ôåõ$Qž+$y&Æ%*tƒKóc©š¥_}ŽU ßÙß×K·ÜÊ{Ø×EûûŠËŽñqÅøßíƒojÁ‚oÝ‚JûR,ÛF¾1½_ËÝ¥9Ô7Î!|Øý¥×ýüw1.±­¥xbÖ…X¹åz*`F$gRñ×ÅaäwÄ•“iLÔÃ×°ÏÝ4FrŒd¤Ð(ÄÙ»×Ý‚¥O>ŽèÑ÷áÒÿ+ÆŠÛñèÌK±lã%ð)M˜dëUµê!œÀÈÑ­{¦ºÚ¹‚z‚“Òºûöh Ÿ_…Âk†µ“Rèy¸·Û?\ò늑ݣù^¥6›-¸áÖÛ1ýœ9XòáB©Ó)3ΑXFµš£¥"D7p°‹.œŒ¶·þ×?ÌÇYã 0,]ƒ}«_Â×Ú‘sáä¥)QùÑwñè+#që_!ÙqˆOÍFfºº^ _h¡¹/Jsy5šë˜‚é1 ÖLd‹kGáÞÝŠ”,%*R²‘jËCvgx\QÆ@aZìQRóÆ¡®u5ê>þ;ž{w8.›ž ãF”®ÿ¯fÝs’Ó·¡io+\ÞÍxï͵¸æÜhÔõ:6|¸»ØæÄmöþU…œ‘Sá¨Ù†ºÇóoçáâi™PÕ¯Äö•àmN@sÇ^OPï Ê[eN Œ­E¤ž+ˆÒJ=W³"K¤B(â™Ã–ÅÈ:ÇësÀ¡EÅšÜt]½sy_êÁ˜A^lyÊ„h-d¸`7ŽÒÕÀäó]øöÏ)Ë™…ÄäqxèÕ»Q̹“ˆ‡Sªµ0Ç$Âd¤¢ÈE£›At ¹r–=÷'<òÖv8<¡…䍸\õÐ븡h7þóãßÑr'v‘ÉûÖÇÞÅ…±pn^€u‹ŸÇ¯_­Fþà[§!ÃЂM‹žÄÿ˜Þ@^þÓ—8?þû/0cP5ÁN^Ž-Kà| ûœ¹ô\Òc̸Q“÷kž^G3Ö¿þsü€ø˜s¯Eñ˜1øÕ]SAçž®âb|`5–=ù+üæÕ’ý2ë©€^ùàk¸õìT*´ÂDéÌ;ñü÷bãš]°·Ð}Ü2¼úsŒ±éÉjnïÐïºænzï!üìñÏÑÐBVv•˜œ4’w]‚û~2Ãò¡¡Ì;ßû¥tN=æñÈ9ýnn„̇îãtýUásÐUÐŽ=î"\œ­GµõN½©éA¬¯j`¸Gí~Mê¨a0XhÍ7Ð3¬Ó 3§éý<µÝú“l=4ž¸œ­Ø»c=Ô>7ó‡1ŠFC¨ÅþÔ†Žö¦xhéy7÷ì¦ASâÜËmXΡeûÎZ 8«[¿}} Ùž¨p8ë%y””'!«QÑQHŠ c&¼ÌôˆKßß×ä¹6l¯qbyD_¦Ø˜b8hˆùyC4èÅÇ¥,#CÃŽ½ÐhJÄ·þýL L_¥cZ»VŸd!ç°(€Xèî©IŬ‹’%…g7êšvàm—â÷YVd'ˆZôh¢—BK ã.ÍFq2YêqñM6¼PÓ€:ïeë¯c½¯[;Z­ç]gó¢T¶“Ößv$áûgÊ· vã68—aÅŽë‘>44 ïØ«¾!&fFNØr Bt̵RÝØØ8®äs9®û|-ÜÎaÞ¹úχ‚BZƒcÜŸ%Iã®ÅTãýwþŽuïk˜WVÀöÄŒ¹s§Gn2k×ÝŒÁ‹^ÀË¿ÿ)µwž(š]­Èœõ}¤Æ#UXL6ŒªÀòÏàSµ.ýå˜Ç´!×.hÓäªäÙVüÌë,¼„"I¬†–V‰HKtTÁlèl¸¶¼ ÿuÖ¾n Òi—¬ùçýŸmƒÍÊx?¥—Oø¾x?v.b,”˜àG GõU×M. Ç,ÖeMKPëà9Œ/„k™»ðÃW’…¼èù s¶;$4žJ¯æh†ŒÃœùgoA$#bÅý(¬~â½/k_Ø(¬‰b?a!<ÅÙªDÙJÝ;Ÿÿ¨C© &ñ¶šêAÕ-VˆŒ¬‘kHW?Ѐ±#¼œÊ‡×0*×$aù':Töã’ÙE̱v^¹ç‡6¦9¾›Ì\©´k°kC^ÿ—+•ÈšåÄèó;"ÜÔOÄÊmž>ˆÕ僟+âøL\Í2 ÿ÷édÕö`ó‚᯲cEéùÈJkZ¬ I\ÑOŠãõ¦ArZRãnæd5šÏœðšŒ5c†Ÿ›„ÇÒ–ãɧ~Ñà€´ðßÕmLþdduèQlüʬßÇo½í[£¢ä3¼»â:ÍNeJ•®ÊlTZð9Êt—£rß.ügI®¹ç(ާ›‰ï*Ëjae¬R›‡‘Å|Ö3Ö¿Cû]LN,ÃêÏ+kNAÁ¬«1ä?ObÊ”|hÚVbóºRüùå\õ“GP˜¬‡·j=*·mÅãw¿Ü§¯D¢b;v~ùþôßF̺ódÆ1^¨~7Ö~ôÀãéHMų`ú÷ð‡¤xá%%üíÝð zÈ4¾~«N’¹0Ö _s 6.ÿ œ¼S7º®²ø’oÇ™cG¶|ø<>Úp=†&dÑ£*<©î,Nß½œÆ¿¨b 3¦büнxyÁ28‹iS©ÞÅå´®TµÑ­P²/K0à@óŽ÷ðÓ‡ß@ùf#2 Šp^N¤ÃaQƒ3˜/Í9Zö~]«?ÂÏ üÆ|ŽR;Ù€8˜R&­5…# ØŽ‘ áI0¡±bPñhTlv Šn¦ÝÕ'úD›¢‘4t òmtÕõ¸¡«ŸšƒüaN$™C1j>@¬)Œñ= ë*7s…ÂKå’2GÇÒd=Žn:œ”ÆeaÄ¨ÑØÒZÎkˆ±•±ˆŽKæ ˆz®DD%Ð2P¬£ëG×C–«¹*5ÍÒCÇÁ‘nET\¢ƒjŒ ü[wÃé&U‘ÌK– ÄÚâŸaEìÑúÕ‰’w’89eP¼„2~õÕsX!õÄ>})Ž}íß¿íâA¨@ývÆ óA•?ÈŒ4­! Š”HæÐ!œÈÃî¢ÍÁ#Ý$Áò!V<«¬^ ¢b¸¼\ ™1~I¹3Fya0ú —ˆé¾P£v»5ìܤǖ=Ä‘…4o¸ Cr_& GÅàdÙÀ¨Û`ÂfºâvÍ9/Ð :e0F¸xüZ%.‰  ðgÙÝd±ŠBæðHB4è×!)kFŽÊD´] ÷†Æcµs^‰x¸ÍƒßÝíûкo3ÊÜ©øÞˆÑ(¢5Ãì$™]ǬߛDR;*ÍÜMÈlJ*@±®»·Ô`ÏæÐ5qp‹gØ* Z} †Íæ*«­æ\½ƒN²Prìç4}¢BBv:ȳa'áasƒš7¡¦z¶WwRn½×©ÝWt ŽBÄË1¶Ï$ijÌù5„»2ß|döïQ!*16$y˜–NѾŒcNg·¾ü\‘ðzx¯‰i¸Ôï$z;ôÞ—E=Ç59íÔ`înÍê¿|‘5¥’9ë°g{)¶”4 nÔ$¤ÅÑCâ ˜I/ZkP³k’‹²aÐ’„Rº9;ÃKTÔ¢É7Z)&vâX~ý+lŒ^ŒÝÚ;†Û¡Nt<‹XÒPëè e´k·WÁáNã8}€«o…ð8J¢ ¤O¿³òoÌ>°ñÀ'uÆ^,^6 žPs\.Æ^z/†Ï¬E Ki†dÜ E’-T“7ÕÉ7<‚â (—:ƒŽ¯È‡ÕÙŒ“Ö½mƒ• îýBÄF Æ^ù[ή—0дÇ`6¥[MD³òGŒ€žÎÂÚ'\@Ãi'„ÂYÄÃBáR*> "±ß !”áb“´Ûå+h×ç¼Q,ʇG%ÿTÊQ&„‘Jhìa2ÓÓˆÏ!9S‡rßMÒxNw’Éð—ã•Ã(·JÕšw[ðÊ/Í(Ù¦B¹9ˆ´s|øÁí ÈJ¤bÙ¿ IùÏŽ€tÉ…®»H$‚ö¬ÿä¼³àStθŽjIȦàµj`¨…¸qÈö§Cé¨äó4#†!›±õ ÞcþÈ€&aµçŒÞÂ{•;v¿;Å-,üQˆ²æaÂŒ!`5øècµR)uH÷l¤l’ÿªØ Ä>XônU{~QèÒaKuâº)±øóïïÆÓœx)ü1tKËÃåÓ[¬GæÈ9hÑìCÛÒ¯±&é3,ýhÒªG2vï,4eÏCn¼ŽÛÑ´oÇh|ùÚ°”ùƒÅмšŠï.üÚüÆ}e°Ó[)ýÜ;12›±|z*Ðjνh-á| ržFª½™øpÎÒSµRhâéj:·ŸßQf·9@QM.›ÒŒÀp4lLäxýC1u2ã¦bLP»ÌHŠEKm{¿'—Âŵ­¶¶snÁˆt²–“M]a)¸ù¿GîL'Ï eqòEc®™‹Öœ™k%_¤Ðæ3û¯†‹þ©¸àî¿@ýò›¨`Àxm“—ã»ÙyñÇÙS‹׳-tôr3pÁÀÏ|à&×ÌbxZLͯúš$bïÅÍT„Eø¸¹Ÿ¯.Ç c…BØW–ýu‚T&c¨ÁãG['Ÿ±ÂUŽrQ¡Ç&·GÊ Ac§Hw£32lK(Cá &äŸu5ò']-qyÆEoÀæ ;uÉÔ˜™vd+dñì»/Á/ 8ôˆ-¸Oÿw–þíl«Yƒ>܇³oÈ wx ïœG¸Z°}Å+xíÝ2¬-ËÄ£¯ßˆA4ÑÀ×­Í\0©ÁºOšqîSä5ч,¯‚^\AìâÕôà"Puˆ_b'pSs{Dñy/ˆŸ:L O–Ïc2«S©ïÖçûÛ9‹^r&çå#š†¬Ú§¿Bùzäd¦rÁ,ÞóDˆr?j- ЫtðtùØ‘uÉÁ8‰˜D1…ÉW¼zÈÈ:œ[Wø4ëë¢éÚ*űŠ{vÊ-¼…ϳøéà_{Èxˆ¯‚áÌheîQGß!êÊ?Éœ\$êR Å» – »s‡-€B /¡ %P ªb¿pnÔãŠ]W´Ñ~$§þ¶Ž-GLú‚´â“_&bêz€—sÒ`uIÙ’dßVÖ¡§MWx ´Q¡eCœˆƒ9ƒÍ&ÜwKÊ9»¶ëÆVÎ58óA"ßô!忇F@,óŠW@ä=^3\ÍØÃ^³¥-Ñ(7^‡g¸ ñÞ]x‰Ì~û:Åêµ°ð‰k—Þ7üëcŒ»¥ ¼¯\\˜miÅ6ÒágOœHWiæ4cUìAMWmÑ—pïl»^n£žÈ–IC„oÆýž9žÙH½uœ<²ÑÕþž%«Ý*ýÌãËIèÁ*æþÊ kUksÖ½ú†Äвå ìZó'tëqå¸8˜“F!¡Æ‡ñõÅ#=ç\`CUÍüã/ã¬;žƒ•1u–ôlÄWÓFGÉ›{òâ¸(ån¢ÛZÖ¬j@J4¤ãÓ©'aï‚O°óŠt Ir¡£f'¿÷!|Ö9¾ýEyHø5œKc„Ø&ð¯‰9×èη>—2ÿ–}1ü¤q ~G?Ø„ÿ›š(é"ÇÅ#;ω€„˜Èpéñ03aïŽáþnú`µås¢7ew¿‡­Wd“Ø'QtÓ«X·Ï~ƒ«¯ŒÜ¬8Ú&C$’ÌÌ?'Î+ÅåøJ™9Öyð¿ÏÊ0á’(ÆÛÖŸ¯Â°É©ÒÐÝò”.}7ÿa æÞù.˜>Žòt)HŒD¶·Ñ¨CŸÄ<-(/óa_-SÇMÉCnm$g¸9tûþ¾J¾dê“Ö †}»«/o+ª«ZPWï@FQ!ãì…òÆ1ÈMoÅãCȱ’®Ùÿc>þó~Éâ.Å}wOEZå2l5¥W¢‰á`aŒéÅ]·%,ÌÄÌÖMÌ J°ñ»ðõ®Rz`XQ0ÜŽ•ïÖ ÿö$¤fDG`׉¯^}kÖ¬ÃK_wâΧã’!z˜Äê - œ tkgø$0_ Û¹¡g;MÃG‘6˘V€hºà[ÜOáoÿX‡¼ß GìЕðÀÑC'Dz«4ù;ŒZ9´¢t"2¡®ÝlÇ‚@hßCÑ^{ër 2 ––DGGÃétÂJ‹‚Pü(¬‡"!½ØOä!<ÅBÞ†^Á·xñ+¦Npa8ÝF×/HÀ2ºÖˆ›ð#Pôÿ¯G“qáEvÌâ€cmžXÉľçu`ÉaL]"Ö1Ï[S|­ `¯z;»˜†¢ð r +\‹õ»9ñ?tŠ‹;»õq"ŽSnóEÀ½¿øÞ{á}¬gŽÝÆz/Jn[@†,؆^§þr!L¶q”ö2oàŠsß„6˜ƒì^ÓÁfì¸çJþø2®IÅ ~0’†øqõÈïãG×¾ 75=½5Ùã¯ÀÝcyM’Lañ‹¯±¯…R_Í^|Y©ÅÓϧ Ú< ¿ð#w¾†’Õ_ãÅ[°äž7ñøC¹Ø¸©ï¿ÒˆšŸà½óßÀ¬|’hØwcÍ;’äf=šH¥»ú•r|ðÙ;˜ñí?ážK²éNžiê¼Pí¬Fåºgð“ë_“<„‚º”™’pÞÏFÑ-›1e<Y§0Ž*_|93çY°j¹[vmÁl*_NêµÃ¯Á¨èí¸£î)Ü}#é–¥ Ë©Æ`anâïc8EI4Å^?.ÝòüàÚW96ùÁüÙ`´ v•ÝŽ×'¿†)j¤ywâ·ÿw6pÖÚØ,Ô*%–ýO²LÃï^ü!ò“óx¶¾@™ß–d)lÈ-Œó:B’¹qûblûò-,¨]Ï~µ¿þ^Ô»ñä¶ûþŒæüã ’‘ÛÓ>Û+£m²õIøåuÂÃw^FK­‘\ÈRÑ"pÑïGŒÈáäøYùúÖ}¡sj÷ÑÂÄ%g¿HKáYøÉi],ÎDZ×W÷N÷on8Z«°èO?Æ_4ÃëÈGz^!~ýÜ0†äZi"ÈûâËÞOÿ†Wßûﬨ—!ðÛïÌGî„K1óü«që9i]­{¹8SÃëâ xì³QœÇTÌ™*œÑ»+ðêƒbý†ØÎkN,ï’¦`æd& ¢TúS‡ªä„ënBÔºuøüÁk0ç1&²®ÂÅE“qñha½—<%_|LÅp†¤%#m¿<¬ã®Â—¯ÿ¯?÷î~ytqc©lŽÄmayxW :{ã õøø‡¡¾ô¶óþUŒ»»ú jÚQ¹ê ,}éM,$±’’ PjóHäŽÄw§ÛÂÂõ{ §½j%^x£»è¼óµ]X¶èoPÓK`þý 0mxf7…°bÕ›pªâ‘4çb¤ñö“î@q Ð ¨Ö[1û®Ÿ£äþ—°ò…?à}E™³~‰ßÏB¶ ˆ(>fHp·Õ£½ÌÏ\ôáT'T$K¢¾[;ïÑÓМù=<Âv²ÅòÍq.\ÀV¨mHLŒÙC\xyÉv´5ˆ _eǹW¹9ƒ @ÂM´¶¶VúQ°Šï=ÝE{î)”ÅÒÒR”••íw5 [{Ö=Úïj -„#Ú1­(ŠÔÔ:¦žQa“Õªõ*½ŸTúA’[…Gs®äÓís£Â€æ­*x+5p¦ÈDèÆˆBºÕpò9˜ÏÔêUlb°žée¬m¡|†5[éšV§@ÝÛÅŠ(JUÖx®ÌR!ƒr„Bú]þ##‰?õRr»®ŠŠ‰m£“²¥…¥! ƒ'ÌÀÅŒÉoôñ^¡«e²¸f=¨kv¡4ꚣŒ¹3ÐÕóœk¾ k¥° >Æîç eÜ ˆuFF¡èËØ­/ÿ¦ÓÑÊf r•R„¼ÑVܨÉ™8 ŒçŸŽ97$ XÍ `<Ý#éç¦1Xa+œË®8@` Ñ›‘=(êþšÆU‰ˆKŽóoüšÉ@èd¬Ÿˆí7F'aBa 6¡T’ˆ.Ú†ü©×ãÚ´bäå27cÃ,1Å(db0¡wª¸šoM¢‹ë¹¡#¦ Na$£ï›`þÌ(Äx`zi­ˆKéó¯nG+Y…c}éÓ=¥±#è+gbøŠ? S.½ 9„7ìiË%NÑó,îybŒ„óoº”ÙÅ٠ȧ3'cÂðIfUL&R gáÛw †)1)I&žfοáû˜DÅqHŠÖž¾lâ¤÷(!žƒLŒ;çbtD—¡®lªyFrŒFz+“àH ¥Å’†É”9‹×0L‰"dV ›)¹8©=´ÞÚá´üK?-IÀFÌĹFÝ"s‘HþŒ‘¹ñ0îËáG¥Àè³ÍÐgØ÷7“1 yaË7û™ÃÞ„=Aœ3¶hèÑØU( Ç‘Óg .o(ò‚…S…ØŒ±(–‡Ì¨®Š}Ô)ЬÃñCYhÆük(s)?h2F¥Kc@¸Ç°<Ûw{3e’{l‰ÌGBž¬¢Q˜u³vÉcMF†Öôò°1qí%åž:¤¾b²ØVFd_QH<ã/Ñ@ˤñ>²ŸF%"%-Ù1ûØ/Ö‘~P('‹ ¾u;&Ó7rY¹8›cߥ¦crgaRŒyÚ¬®ØÁ=Šð4KÊÌ8¿“L½¨ 21}þDØbÈ1rÀ·—;h9æ|(“‹¡Dq2‰,»ýÎÔ3íTû´HÊŸj'ã]Õ'q‹€6ƒ JL$ß*ßöÝôP8pm3l ,Ù[‰öNRËEF@FàŒB -‘ä|õUDZˆCYîÂû —Îöööð×C¾ —ϧžzJRìl6 %…°//l=,))ÁÎ;¥|¨W_}µäfzÈŽº~A÷‚ŒFGkkë!vQéX…UO$ã?+ÐÊdòb¨ÊšP3Î#ñË ®¨3ÇÐwÊž[¹è‡&Võçxpç-(ÈàôºS×nIÆûû˜»p¦ƒ‰o]˜Pƒ_gÆNB{1MLöáOoÔJ.§¢?¹œ9+¹X9ü5ÚL¨x™,½ƒi—,–Ã.0¸\.òž„\¶}ÚÕù"ëМnsâ?0-‰üŸéʧ%sƒ¨ã¦"h ‹µ86ߨf¬H7ƒ– kñ¹`÷1? NMw<Æî°}3ãüz¯“ôÿœt*¥vßêÁÅi'Ã8iÊ•<7$<â"¬.Æ] ™/'ï\îòýn‰±dt1ÇÑ7>Œç¢R«î ê º:x\$ !nư?{¿ûë½¢pçâýHÆúÞ[˜[ƒ Mp¶·Ñ噡@"æ’>´ý¹"±ÏKáõrLų;ל!7G IDAT®Áß~ñ’ú<Àt=5uºpÓS¦“d+Z‚£h¾ójè»?uX›ð ×v>ï,Q½´Ó%Ï_~üOÊón˜–Lr}t/Â}œ1†”'$É¢…‹1]LÝë ×ñ©/Ibz«ä=ììhg\-Û1÷ÕN÷VÅó]<çEÏøþÌYº·ÐË7Ž#ºuúIÄÔÛØ í!Ò•ðÞt¸é¾+\ú(1vñ|(g¯¥«N*åQÑæÃz&Š6úýÌð“$kßÜ÷­û1Šͱƒ¹hÆXë=«žÆÿøž}áeLd˜Ë*Y!0ÈEFà Eà›PÅ`½yóf¬_¿BÉÒþk8}útää8?_š@÷g¿þ+„Z³×ÑBȉ—ö±6w(¾÷ÀÏû?9˜>ÂÑ¡"L€–2‹ˆ|©„Tº;­-Œ#TÓBØÇ£`[ò‡3~?ÜO+ˆx¯xÿ´­ív'“µ÷Q˜`È TD‘¥?ô>Z957ŸÚ}d:Ü×ÈH­> ­Á¶ÔDؘ4ûD1‘JÏéªöîa7Gbs\Bª÷VãÃ7waÆ]×"ý7zÝwÉóÞK[0ó®ëf¢õ{€­Xž…ð°gþ›­p¼žÉÉÉŸfÕ7z}³pöèÝÞæµXôö^¸ÕQÍ¢R“Q»ús,[¿&<‚hÆ_j6!‘üQFàTB@XûĪžXåJI!ëE?JxŸ¾\KûÑD¿«ÐS„JœPæ]Œ ê]Ù*bïJâ¡[”•8SàÃM›„±Sç`8czü$q뵨EÊ'¦cèõÇÓ|£&¹E‘È83__ø¨ŒÌ§ÈÔX§9gÎáјî–f¦%$… ¼r9½8½ÆXif&k2î§¿ÁêûÏ/÷£o»½®bÑûƒÄ÷ÿñ!àn„½™,P;Û6/ j_=jË·cӆͨnOFvª…yÈZá``ðÞk˜· Ãcê±oïvlÞ¸-!H‰ar{G=mUز~-ªÛ·5:Fæ;R·lÇöŠvä ±Ä¢‡d„<òGSaå/A*ÓŸ"\sä"# #p!@¢‡¸¤.ÚùÓè°ŽÛ¡Âl¯¾ã¼[_rC22g4}ÏÄégp§O$b«ùJ±BÕûrøa@¤ã¹Ý~)wŸDA ±¶®aBX¥X‰g¼[p²r›H+&Š®¾{I@PÒÚ4ÍÆàa2{±¿R«ƒ†Ì9"‰§×çæ‹Aän2é T¦ Ê•–ð<2ȤJÆ/yÜ)•“J­‘G¢Žà¡Ùðû‡Ø70$…Ø·ÕeÛ°Â>œ•UÕÓX·®ï­×`ô5÷â¢q9v¬ýÿøÝËøjÕLU¬ÆÖmeXSOªú{þÄÀÜTtìxƒ® _ãOÿ^ŠÍe?Bvñ ËN‡+} Þ}k¦dŽ…AKWÔà+ÿ,#p*# +z§òÙ“e—8ÕèSïñP«ªª¨Àg‹?‘ŽqÖ¼óœj“Ã#=è Ï ×¾Oð§¿nAêÙ6a埠©¸ø®;ª©@JÖã/|ͦSqþ÷~†á£ pV®‹»®ÁØâ3.‡ÁÓôt %« Ð?ý ð8P_ÃG£Hó:çaÎ1âr§ÑŠv4T· býBl%ÛânŸÓ£„¥“¹MlC‘:‰ÔàïP²÷Û¤æFA¬l#ìÏe&ב82zU·n܀ݻv ©©‘”¤V ÉM¨$û*+± ̘ RΊ#뉤-nƒ'Æàá#0r¸ ßZ¼³bb³’1tò OE›¯«7Ô¢£M$„ÕÁ6d\¯ìF]C%*ê\H·åë·Â­M fSM~¸šªÑÖÚÎÄ®ÌE›ˆÔ¬tL>ï|¤ÕÓï>& º«¶ì^ަZ3Ôšœ3÷, …èsÎaòËUh]Ó§/ 1ö ‚¸¼tee® 󅈢‰Ê€-+ˆ¯zæ,k­ÚŠ’åK±{óìñÆ`è¨LÊ•Qû&SEËĵ.§;¶n=:…PjEƒ‘çN@ñ˜±È‹õÂãËc»fdäåaÂ̳‘ÍÄÌϺ^¶ “VEfïBÊÐé0VPá«ÆÞ2;΢oǦ=Ð&L@RÞP$1¿W-ϽAÇÑㄽ±Í ™°ÏD¬ßÀ«Ã …߃ÚíktAŠm$ÎZ$¹a&;í•4l^‚6I¾~ü¡ÅÔÂC YÄ'Š¢6%Âjð!J$Ä­«DéÆåXòÎbTìkGgÒ`ŒEù L®+XC½è¬)Ã׫V¢±±~ºššƒ©|grPZbEÀ£‚ e©¢­IN+)+„e¹ dÄ $ÒA ä"Ô]ÆŒŸ,Û‰G@¾FO<ÆrdžÀ©0ÖÛýÞB!/¹|s…ðL*ÇúÌ+–³^Bñƒ’“''y‚æE?•ñ:–¢FbhU³F‡ò{yH£ÆPXŒÉˆ õäa~>ê[!b¥š¤I˜›¢CIcÖ~Y‚« x»Ôƒ±7P±, ’¥CÆ”qq•ï/Z‰ÝûþE!5…¢iSqîåÓhUŒÂ®uµð›”Ð%‘°&¼x NC\¬Ù´:nè§W¦·™Š©Ë:¿£R–&¾ŽJ4ïY‹§î½‹+Ü™7aÈœq¸jþ ¤1¡°R‚<6æüñ¯7ÂѼ å$žyõ—ÿÀ+Ï}‰¬Û'#:7 :KD¸äæÕœd›Žvy_^èU!œ9gêëj±nõ×t±$Ã'C]cãâ1ï’Kzm¨ß…Þ'^!ãÝ4]´ŒyCÛj„ZZ0.›„ÚÅØòÁ»xÓÜÊxÄÉ–›Ž¢\ƒdU³×´!yì͸í¬[ñSuØþù;X»y5>yµ9EßAbŽª*µ”8(™ ·½ƒq~ì8¬$$C÷ j*ki:-ÔÁM‚/رŸ¿þ>~ó3T¦Þ„û¸yiV*ºZ˜ ýøäá›Pâ*BkÊ<üæ¶ÑR£ë Ä$· =5€-•p¸\tau2y}-vÖ9©HLŽî.€üMF`"àâµ[]]=%#MUjªäF"¼*H”%†@FF†”~E¾FÚ™‘å+‚¸FÅû@뿉³‰MKK ÄK.'aÏyQÄ3>ìÙxr¥8¹½¯gF=4M¦Ñ©W…0>!󯾃‡aõÊÒQ‡Ìì&’Ž9¶£ŠW7å«kC·m‘]¨:f Ò·~‚èÏ¿À«oø‘2ùWt/C’š7 µkþ€×—¤Ã6f&cÇbÐÐxk–aw© Mt¹¤R›š_ŒÀçkPUÕ€O¾¹c¬ØñáóørÃ|å òÅ.ûe$Ô™`ñRôQv{¨È€ª5`kÉZl£‹ç¤+‹a 4 i_#š¤eª ]2¬I*8Ö~‰ ›wãƒñaB®ÕkPºe-Ö4ª7o0,QL¾ìð!ù(,f-tŒ;”‹ŒÀ@G @ï/ɨb²‰"â ªŒ7Y¦“‡@8æE¾FOærOýG@Ä…¯Ñ<Ö÷ÿˆŽ_ÍHlüLk&?c޶ým)ÒMW(ƒgÂ9ßÇúÌÏÖ½jâ·ÙÒ%wL/-ƒ¢‡¸ø„c˜e¢?)¹BOµ+¼]ê¦ëO÷mº˜LÄ'˜Óåuj /HŤ—¨Vh¨cŽÂ ìÕ;±g‹æD:c5¨®jGi . f æ´aHIÞ _k-6ÒòI ÛWoÇž}Mh¡2* – ‘gQú$Þû(*3bÌ*ØÝ>´6´³R-õèpØÑÉøBO{vm*…‚ 2¡ejÌcPl„Ø= ßS*åÔµ„ª½•è0dbX¡Ç¤éédìaÐDë¢zm—)µqäÍ22222222222G‹@¯ ¡hLI¥0)%Ì¿ühÛŽØD)T¤Ì–X±¨Ÿ(~"k•Š.•æ‰pEJÕ ¶r›Ñh%ÓfT(}ƒØ¦ÏGfž ãÆGcÅÂ8*§™ˆŽÖ‰F Të‘3õ'³úXöÕëxæ‹×$»œPîò&]Œéóf [Ï ßâ«qéyíØ¸n^|éa|þ¢pƒ°1¡= sØTPÄLj©„Q•æ@ßR'Œ;RdApw3>[Yß¼4é!Ì”·ËüË#ë+u°¦ëpöŸ/ÇÄ!`_ˆ·žyK¥:ÌE—…‚™7ãºÙ¹HÔ“[´¼»7¢\Uˆô$+b-TI¯#Ó ±ª%V¦Â¯þ›pÍ‹U"ˆZ¼ä"# # # # # # # #pìô©{ÓZPh¨h ºϾ}´z¦U EO¡ â—s-_p5TT5ÚϨ1ërÜÿø¥TêHc†” Aš?â‡ÎLjd*—Ú® ¡‚DËÌ¿çIœÓÒ {›¦©‹J„ÙlB”Q0±.-nÃfßÁ\N̽±n âutý/Ö/~Û¨ˆj3.Çoÿ~ùþ¾Aħ SgÃaÞŒ[çaèÜ_áöÙ~|‡©+.œ¼*5d7ÔÀ6c '} 2>Óé£+©>š±MFÄŘéª2è É̬}o².{CSŒHÐ÷ÖæÁ½È[dN%ÚÚÚPZZ*½Z[[÷»#õu B´Z­=z4ÕWUy»Œ€Œ€Œ€Œ€Œ€Œ€Œ€ŒÀ pRÂÖÀî’)T:3öܦ…¡+Y  §iü)Ö}õv—éqöÓ`µ)4r7*…Zƒ1Z¢âCB*x*UW’x©®°êhýÓ°®Ö€’)/ÜÐSÙT¶™Ð+Y' "ßÃaŠ)} ²; ¸0o)Þú`r/ʆ•.¬!›å!vfÿÉ €î¤A•@Z25]æQOÃZlÞÛ„/ê²påÏG"Š.¤‡p^=DGòO2ÍÌg*,~EEEÒK|îËê¶&Š}DÀxss3ÆŽ{ÒP¤†ÙøVêÛ`¶ú12J‡—ß4aö-eH°ú`:ÆJ§N»em~äçtÒ]_^:i'XîHF@F@F@F@FàtDÀßg{=>[øÝ‘³ED´ùý Üyçí°X,Ò¶“¤J}ź”9k°mÓN4»W03Æ¥ÂØ«ÒF… –x²PSªi •^h£ã•ž‡´D£”jãûvý¨Ô'".m¦7¯UÔÀÈ` Mï™=¤¥C-|T{)M ÐÄØ>iFgÒj8ÀÏN/‡ o’8,ÂM´¡¡999>|8 $å°/…0Ò­ôƒ>B±M(‘'¾0ا@åÒ(” ñ!6ÝÁF¬üÜ„‘W369ÊÓþ<6G+ ÛÌØ»ÊŠÏš¸éNÄ[y|GÛœ¼ŸŒÀ7€€ÛÞ§Û þ‡Æ‹Ž‹ÂýS(t‚&Z(tB±4Ña¥¯·#ûˆ:Â*(ö”Òn·[ÊÛÖ—U±·vŽn[ÈŠ¡¥#ºJÍÈf¾ÂË?:3‘ŠE$º]øÅïGd)ä>^-6¿9/=KVdf›é`Sº¨ 4ùçGw¶ä½Ž¾Ž=hªÝ…'?ŒÅ7ŒF ]¥{› e°ôÃ𣮆‚Ö­˜ødx¢¯ÄãÌCœQwKÖ-z?zl)òqq¥ÕFíc8~ýÜo0*.2¬ãøÉ~Ê´¤ E¾»%Kßć¥_`ç¶9È4 ¿Šaĉ‚î:ìݱËW¸qþ÷ç3…×)ƒØ T(ƒeàš›ÿŠN‡›Vî4؆]ƒ?=r1l¼ŽeKá ƒ~ÞÖÝT\Váš;Ÿ¶Åä^Ža£Fá?›EJÆÓ³tV,Æ=¿}ÿŸ½ë¬²:ÛÏÝ÷æŽì„ìI!ì)â@@u€µUkë¨uüýk—¿mµµµjk­ÚºªVmÝâT@öÞ#„„,²sïMîÿs¾› K~nî½ß=ßï9ß9ïxÞ÷”V1ÞˆŽqRnÅß¿éqFtzÏ}½ŽsÑfè8ɺL¹önŒœŒ¡ b%§ÒCù=dååu•//]¤?È)ðMP@vÂ*(RøÌ +ñïè$„Á°¡Å}â%Êèð¨Ñº; /þ݈š6%Üú RG„JÊlˆëVéÜäÃE€€½.t±èj+®™Q”D ¼ÝòõþQ@Qyˆî2OâQ¨r’)pFQ@Iß_kVhÅ-×#Šá—’"@ëŸ ë_ÙˆKö†ä&bhŸOFÞŽe 5œ°ÖÖa÷Â2|÷ñ03_‰Ö-s°ëJ|²¦E—&Ó¯¿71óK5Â… üÞñŒfZxŽq·ÓÖ‚€šÐVF ø’BçH9A¥QqÑÇ,G*ž´Ïeƒ=…Ⱥ¦t[ÎülŽÑN„™Ç˜C.+â&Æ0ÐpÑm÷c¼ç<÷§ZâmÑØ“¼ìwÚáðòÌUR?†Ö•Þ’.õ2|ÿÎKpÕ-ADS±% ;>¸:š°üÅç‘sÝøer4ôå˜ûøkø¢ä"Ì,Ž9Ȱ7‚Ê×¾>nTlY‹m_,Ĉ;žÁ}Ó"ð٣Ϡµ¬‹J'áúA½¬?_¿ÖÓX%4öyõk¯ røõ¸ìŠ\üpDoüï±f÷ÅpèPœÐŸ"×Ý9š ä@'Ñ¡?k;„–«–) SàL¦@ø Y• £äžD€¥£SX È+¬ŠjF%îôj`­‰Æ™PÞ¡%É‹”4¼×€zÊŸ¢ÖîŒSu© ).dó@QgÁÞµ&¬Nî@^ž£³œ'h"­ “Y-®:OCM¨ÚlB…ð] %Op¿ü³L¯F¦}KPzÀ‹ê6 ¦~oû–cù– 8T‘È5#2ŒpµT¢æ@vï:{‡ Ûv¤¡ú ­ ñY9H²¨h-´ÂÞÚ„Í+V`M³†²Ð81:˜3Ç2z¶`xüvêéð"2–qb(-Ú€£m´ÂPßsÒÉGØS[u)Ö¬ÜfOb º{÷V`ôôYÈOŠ‚ÆkC}Ùf|¶ná«>¨5qˆŠIÁ„iç!#F wÃ6®kÁÊ-Ndçq°¼ gŒQY¸dæ$ ሂœ×aíQŽBÉhàñ»ÊQûšÑÞRùó7C1 y\¼´”nÞr€‚X:.¿a ÍÒ béøtmìÂÜy¤Utb.¹b8™" Y—‚B¢ÁÂã·™âÏñ<ô[£W åñüé¹HOcxƒó(«·ÃççDë±úŸãä8-Ýó3°Zþ´ *ÌGJ²ƒ ô¨lñ¢²Öœs¡ ²­‡ Û’†Ä‚<žÿîBÑ %JšÛÑjs}( Øsc]ªMNX*¢³p¶–#59•ÒPÔöþá¬NËd’+•) Sàl¡€N'N($!O;ý†€v· P’ýeô;5¨ß‹èPx¹ #‡9QìÅ÷b±‘º)ýaŒî@á;FŒqAY¦ÄËÏ›°vyš­* §@xüÅ•Œ§Æ‹Â™‡QH“À6†þZM°œ{€œd ô'Í{°sMÖ—F øº‰07U`ó²Pç‹CmÄH¥FÀm«§Xƒ=å dYØKÿÀz½Ñ´ãɤdc€”ÎIë`#*ìÆ!?`,ß • 'ú¿Ä‡bX¦YòË×0:š9^‡ª½[PêÓÂzè0ªmj$ Öw9a}VØ«±æ‹¥X³a¬þ4ÄE»yfqnnÀї!ÃBÇÆ ¬Z´ËJ¡ú%—š°î(8 ÿ›5=ÁŽjÔî߃OÞ(Áà)ihkn¡…ßORÄN‹ÒµÐìh©ÙÛ£¥Ÿ?ÿ¡®rÌ<"ÊMhÀöµ_à ¿£F)H:±c{%ëlÁ¸«/B¤Eσ¦°tu9}v,÷±Ïžj¤„è#tÝá Ê‰⩦€ÛŠöæj¬ülûÞ&AñuZú•8Ñh¿ŒÂo"ãh)œ;÷cÛÆ ¨ïÈ…1>€ /Ì‘Œ’Ý›rªõŸõùƒø}.4¶D£0#FÇÆlBö#V5Ù$æYûy”8(¨´wDcH&Ïg £”:ŒNl©'§>§ŸÛò/ž÷v4µF"=!iI‘Tx«‘3Ô„•ÖvØ¥ÈF½[øOµyâÙÀƒò훀–(ØbÕð’Ÿj;¸ã&NÃ€ä ©Èãó,§Z«œ_¦€L™'A=^Ñô„µ/ =Z #@ñùÄ}ÇÊÎÿUßm*T­7‚±^pë+žä!†_‰‚An”]‘… L”gWšõ»ƒdÜH¡UCZð%Åx‡šŽëêÙ•óÄïú5‘b'®DÎ!S€HŸx-†ZŠ@m aŒ 䌻××nFiU5Lü IDAT‹ªm|Ö’aˆJBRZ*²[šñ΀Œì<$2jÒcGİ E<-P\|Ù4Ô|² ꉗ¡8/‰çóì_cÔÒqJ `ÀsCó'Äâ_Oýv¦òÙ°ñ©Òdà¡_$C£8¬;p°tžz»?òŒç³¹凨´jR.ø=ÆgÇAU;;¶”áÅyNüòéié4ÁS¹{7­ÀÙ‡ñãnGAîDä¸ ˜œ0SîÂ=·çBµ÷”¬ž««1>Œ‘µ¥–ö(GÝ~[?ûoW9ÙIéˆÍMÀýPá×·ü;|ÙC'â‘' Ð^ÝHK(Ï^¦ -Z²Äœs)È'Ï{šõÈ%Z€z4)‰çÔÙî¤oñ‘`/}ÑwA˫ף…'*ŠW"è'´ü`#lÖ#´•¾ ´\†L™§DƒA0CŒLPy *½îÂ^Ø:( ¡ð7÷õOÒ £Yƒ²¥"°‹znF¡Å‘ÐNzøe²Òú®Š¹˜òs†)@tœø$R©Å¼·2dE”ÖUä¿g ‚ô©Sk`Œ`#66I~vFÁxBŠ>:YY™Ðx)Fcda}cMÒ3¡à³ò£?¯Æ„È„,$Æ·‰€˜)Hd^â CB#Ú`m8„+«qõ½Ïàš1JÔ­ŸƒÝVcî²* š–zR>„.[üŒzšP|9&å&áBÑÈ1ô‘S`ãV-û @cåN¾ö’‘2bÑËcó ËVP‘4pp€Ç:(á hØ4B3-¹¸üâB¶Û·)1±±°[;hÅ ÐyˆålïQN@IáN{¤§W…YQ­yn™9yCSIÈ`ž…u²ÿî:B\KðŸßþu£g /ÝU[#ÚÈñÅ“ð’PM’÷Ejo<€ÖÚ-H¿ö!ŒÌJ@B$i9 ozƒ®"*šÖÁΊ‚¤‡Î¢…N¯†V0¥râbÏs«uRP?1¶~.ëBŸ¡ÒÒTÞ^æeZõHÇ!–´vp xH8G|Ñ·×/øÏµ$ áz¸vÙ_·è3RiÎ)$b ôUÓ×Å?Wýö!Œ)H¦o"ëfthqØLþ!]ÕÈ|K)ä2d |S‚…P(Þ…` Ãm „B G#÷‰ï}ŸÓû•Æ…¸R,–Gjls¿YŸÉdò{ƒK.ânZ"µbúŠßä$SàÌ¥ú¶y\mh±ñh‰;j«[Q[C.$1ôl)Ȭ(…PC©ÅkïüÚü~'ƒ(µ¢²ÆŠ¤Á„èÌÉŒðÛ††}oàÓõ‰™J¿¢• }Ý Ÿ!=tæØD˜c]Ði£qé®@’™Am|víbCén'âÅwFõ'3œ´qÍaÿü<§Ë+èc>sËÉèQŽš}tÙšºÊ‘ {"³O°T,‡þzB`$*”H†Ð ài¯C+á™›ì“qÿ³b²3@F >ݼŒÚy¶£{ß…\&„3ž %¡ž½Ðàx—L±©´ÖBýKQuuô<Ë×J?È­ø|g.šR„䈰ŒÍEºp £À¬è¬K\ãqoßÞ¤²C¼¢ ,ßZtZ´ Ö6ì]׆¼Ëè‡É±•S?S HãD’²*±ts Š&Q±½ ­<ûw ¡êç\✠@na5vUU£1" ý(]݌̋¢?ÀÔ§=ËFO¿æˆBí¬ÑJ*ˆÂIÔße ÈøÆ(àtÒÿˆGG«_X WÞÝJ(®…} EÞð}á¼}÷€Þ@r¡U”غÇLÐÉ3{¨Þ½„†dz«£°Š~„Úv$é8eø§X¬ÃL`ˆ»}G'¹¤³žŒAnbpK¼Ÿ¿õö[\8´© M6Fm\ ë@dǪ ‰ˆGDBŠâ?ÁÜÙ´ÎSñ¡ò 0Äeyƒ©´¢µ¾ +fŒ-T„ytBÓîHìIŒDþøË1:[ø2tº%)ƒ`ÓÂ7жV gm5:Ú²Q|^tèØ–“ ¨ÒD¿Å4j¸Ï+GÍ®µè¨Ö±œ&th #0,éc‘áJÀÄ¢غ~5PjSú(tr´ÓOG»TïDÍÁ]ØÚÒ€Ö%%Hžžˆ–:vm£pìž‹}MiÈ2å c°­G9*q–—‡Ap:ËñµñÜÀÚRÌY¼’þ­ød® ±ëãaV§àÒ.Fœ°lðyÁD(ØalZ¾»UølðE™°uÑ»(JþrU°øÝröÔx¡¤² ¥o¾Üˆèø‘¸ü’A´†žøÈ}Ü@$ä{0!u>ûèM¬6Ð p¡½©ªìËIÈg[5–²®zú1îÞeGe£o¼ÔA¿ÉLœÙH$10Od÷Eî$ÆæÜÈ¢áXEbÄã9VàÃý: Í^Ô)Çâ’üè%‡Ôs£§gl/‚z$ ÌBAG6-x¯íÕ£¶6±I8¿ ôäŒmûWj×­ C§Æ¾Íë°«t+^Ù¨aa¸(7™ô»îË$Р͵U¨5yǨé*ÆüîFd¤qaáUe°/).—%S@¦À ) ¾¶¶6´¶¶Âjµ"**J²ïFa÷´´´Hç-4ïÞ“ýM+Â!n˜?3`Ófn@>DÏH£%+bQA…~´‚çòN[ǾDªÛáÜžˆÍô,œÒŽŒaí]ð¬pÞ½ ¿Å@§ÒN„Gj9Ñòï2N•:ĤÄ!¹ oÏž=œÃ6J[7Œq˜‡Ç§2ð¤8Á2 —‚Q១ÃÇQÉ86·S³Sà¨Âö•Пì[ƒÆ}ÀK,üb„TÆ 3£|W€w_œ‡26U‰LF솙…ôûë´¦¨ }2’Òøþ5n|øÑrT:â`Ѫ›‰²N Š!q(R”f\8n žûxœ¡bQ@kŒÄðé£(œ6ÕPks àÙ~ö‰*F;U¡¾‰Ö3ïFÔ2²_b\R üÇ-ÇÏ€2ÍÛðéÒ]‡…mÙPÚ,0‡cìw§ †¡ÆS\†ç¾‹eŸÍ§õ1GwOKiãö(oœ…³:_Ö:Ûh +…ϯ@LN4&Ÿ?ÐÞ „j3-„é:\váz<ûÑbFõ°ïD`Ò0sŠ€Øê aà”G6²®¬KäàÅóª©À‹ì1ùˆ¤õöÛ)þKø^þW æÃ'±~­Ææ4¤ø>ŠÓÌÐä<$•ÓW¥¢Ï̇Bíñýç±°”{náu(Ì,°äþrùªmí«û”xÞt$®y ¥•˜¿K‡Ä·£0-š>Û}U@¨Ì´üsø@ ʼµÖ‘·ázñ¨1“Ÿ,uHAXV°ä`lG ûª«r92d œÙHMˆE_ÇJ‡C‚kë÷ðu‘¯ªª*üõ¸ïÂ2øâ‹/"..5j”d%<–' £Â:¸iÓ&¬Y³FÒf]ýõRPšãVÔùcZZ!’ Y^^~œ[pÑ¡}í3ƒñâçŒ-qÔ’˜~U3çp]rnù™‘Áåiqà«¥³ÄØiVüü¶d:ÈZŸJÒ ä­XÿV"æº=xê=HKäq§R„œ÷¬¦@vv¶A×ëõâøs´»ÉçÊÏgÑOضF ]¶xf…/žŠ0˯4/é¯Bì&l„•ªuTÿJåt5ÑŠU/2ú1f[þ†¿ß‡8BBC‰ýruÀí£Ë¨ÄZF9ýju)Gœ¨ ŸYWý'û!dûogÄ@-áîR­“½õ«æ#Ýdöü ÂFÙæ¾H¢ÝYYYRûOe­ï‹º¿é2 ¶Ñî¤_GÛd:ñJÞ6MMMhn¦fPN_ƒ\+|í°v(a4Èœø©û»ØçEë§XGϪä',ŸÇžt¸ˆ°Dôˆ¥p¬~œôžá¯eÔäxèúßa3É"NÝÓ²eË0yòdqil!ìNù³L™ýN8q"ÊÊʰxñbéu,a0Ü!ŠTTT$-übîûd°'Æß³Ù×iaà¾ÄÐ×Qñ)˜†&"@¦©-~u®ä9§ƒÁhøò(55UZøL&l6™—£—©ž•‰EÒb± ''111ý¶Q((ê£ÂÒ¼^žaÅF£gŒÐa×ã}©ìÇâF‰ 3Äз)¶/¨ ¥.ZcdP¾¸ÊI¦À7G.áìøUö “"Õ-£ð챆ÚJ46Ûè±{ôSj“‚J–„DÜnáQ5–lL¹h \|î´†vŸŽ¼h54|N{¤Þêê‘á$¿ôI9 ®WGwü$ë:ak9ŒCåô»<}@X¨Jg@lX d›EmÇZ~N²æoq6ÎÓNøþ·˜§¹ëß¶1M¸þ™wôV“Ô»µ[Röu޶l!<ÍÓ^®^¦À·‚¹pQáÈœ™™)ùž(r¨"…¯¡ …u°OÔã^á9‰À0ô)ÔzEÄò£™S‘;V?1Ð…”ûyÅPò_™g/„((¢kò%xé5‰<â­~qÅ¸éÆ øD!QbÄ«ó×^o=Û/Jô½?}$·f‰@g{oåöË)p:) „§“úrÝ2¾¥ž8BB¼„ w2©¿…À“ià óhˆIwbÆ­²É i%gø–S€šíŒ<ú)ÑAK¡/é`N©„³(³&Ù…â5î,j´ÜT™2ÎF ÈáÙ8jr›e œC8+½sˆÞrWd È) S@¦€L™Ý)p ŒF÷,òg™2d È) S@¦€L™2d È8) „çâ¨Ê}’) S@¦€L™2d È) S@¦ÀIP@†Œž‘ä,2d ŸÂ'P#Îd8“h›Hšz¦¶ñ $›Ü¤oaØ´Ârÿd œ8ÿÖÖV´´´œ4ÄÆ`0 99"*VrvMn’L™2d È) Sଢ€,žUÃ%7V¦À¹AöövtttÀãñHþ}Â7åxI8‰‹¼â>‘úE (áéP¡ªJ…„7ôZ@£8±sz0 €»U‹ƒå ¨bˆ0f:©Llˆ xÀ¥¦`®‚½]Á>»¡WOÜçÎRΈ7¿KƒÖf%l6äŸ}8#)7B¦€L™2Î- ½ð{]¨¯i‚‡=ë¾»3¬¢¢¢ ˈÔ)á÷‰èH_vĔإµJr(V)à‡Ûã‡R¥†Š×%B*î[ð'àƒÇ+¸*ÔZMßö=Dœ}}èûñþæK òϪ´5(”TôµAˆç˜/¡ü9‘è›oýé­1Hž* .ôô‰½¸_SПŸ|_ZƒÅôkegPáÒ µçKó3ÜÌÎy*¾Š€1r:{¾ ÿ·efЇŸ¢UôÙß Gs9f?÷!T*òú Ê-¤+«t8¼xøá?HB¡¨?Äîøë°{íؾvêx1è e£3'`ò…£‘”“ oÃZ¼úB)’GDÑ%ã‘¥ uFä?ד·q Þs/\êhL¾crú°ï~¯A±1*âIð¡_ŸÔŠ…Á6,{áʺ ŃÒ0r åÄårÁô‰#æW%6÷ßAKD¬Ëð̫͘vËwP00 –“¹ñ$Ê–³œ]Àı±±HKKãtWŸA djj*êëë%K¡(#*¹¯zô)ÑÞ¨£p\ÞHzŸTÃޢÿîSãð@`ÂPŠž™G[;ÌøÓO"QÕÄä?·àŽá\Ë­:T•«±µ$ˆY^®ý!Eá‰z}FýhÒ¡ö»*°Ó0ûíÙΨa@[mš[he°aSRòÓ"»:!ð8Újq¸9Cd$,ÔJ›h—¨½÷ vßnÔ;ÿsGÍ;±zý MHBΗŠ]…Ÿ‹<-»±iËv8õ©%Pa}1SùÀ¿ /ýîŸp$AÎŒñ¡æî÷D-ŒÏúå«°Ó= ‰‰ñ¬ña ­u»ñÈí£ðÿþ#3 L 'HÁ€‹p¼ƒØ°n# ¯œ„Œ, „òB{ª›?‹sš„æUyÂò}´6–³Sâî¹KIXDq(£¯B•Ƀœ‹ê1ga‰~hOB+$ÌÔëU¡œ-œöëØ6Ì‹T ÛÝ-ù¼ ¶P‚ýÊäu«²÷êôDx•|ñ³HæÁM¸ù7 \GýN”‰mìq§NQl·Š÷õü­GÆ>ø"ê rß¡ö’íë=‰<\r…³{Ñ•°<†N6|u rúŠðÙ÷¡ùp^þ<wü`8bÌ<ϳ—²¼ŽVøìÏøÕ  j(*n|‘³ð¿^†8ƒîæJlýôEüúÙ/™ tØ’¡PÅo^ü-FÄ-ÒgœO/;ã/yáq6aûÂ7ðé5Ø_6 Ùy#ðè³WH Óþ"MÐS‹²Ý{°d…—ýt&I§þ}¾Oç@PÀ C¼ø_÷âÉOªáp Ò&">õrüýµÛFÉ[,^§–üÿó÷Ï鹿l†­m~ý¯ßbBººÞ&ÿéìÖ9X·»¥UûÖㇿø-Ø@Lîµ(1¿º÷bÄõ×Ãpšéh/Ÿ‡ûþü:¶—V! Ð!aðñØ£³k„±/ú,æ-'8·W\zï0~p*F 3>¯û& <¸‹¡íRé'dÔ-Áù¢‡\ˆ åÂlÔ"èóÀmk@ÌŠ°­j?ªÑâ‚H!Á»]ðy}8 ›®ÊNþCzâ§oŽZXÌú‚(]•“*t¤ñùY¶Ø‰q )Ⱦ»=n¸•ž¯Ð÷pÝAÖÍå·{Ý Ú›©%ˆè€—–Šo. ¦ÜEkç@˜ó>nåæw£­®–V:úãu)d¿CÐcq?¡E_y'ê6OTß$ÍŽK ùÇP@taؘø,^BÐ_÷$¾I×ÄcÃßÂ÷‰<âs_§ _‰Ž&-ÖÍ×bÔwmˆ"ÞߢÅòÅ$ª÷v"A=œ¯Qù\<©Îr-ö­6ÀÆÆ”/ŠÀÖ íY^ ËñÂ^eIJÏÔh¶Sáæ¦Ô1h² Ãò<0ëx­Y‡¬+¾¨? 449ã‚hݦ@L¡Ü¼§|7uE,;{ŒqzÎqúûmØêyRž‹ÝÈOš)ÀVc@cµ×Ò'0”±é~$ô£˜íÙôJjjThð±c®;býÈÕiph·‡)0š}ŽUᬕ³n%ÛNaV´;1Û‹´|/Få»áiŠÀºt¸ˆËöÓ¢ÚÁ&è⃈MóãâóÛONOƵ£¥Jò=¶’¶< : ¥‡¹WäÂSh>¸­ZޯǺ•*´°=*¶ÑçÇàóhÍ¢W„XÆÙ_ÛS²" ›JCe©Œ@Ü@¦_jß©FG£>Ô¡’ùÅ 2Ñç3‚(‡~­z”•¨Q^®ÂŽ÷^ö‹±DÎh/"96îà5 ÏÚˆ /vbh†—Š"&XvK…«¾PÁÊ~ˆÑP8Íâ}#‹œˆÐrŽóÚY•|Vx¬ÕØð…?¸n(‚æÞZÏ5ÝÙŽ=ŸnFâ°éHMŠBÞ€Høu9T:Є^+Ž–&XSŠÔógá¢AJ´î+Ã᪽ذ· CÇÅž¼›BoÕãšßÝ.1þJZƒî˜Š³ƒ¨²Ajt½ìCár‚  fã1Ë‘šAÞÀO÷ Ž6Œ”ºïŒ~¡\§»DPZÑЭ«6q~]€‹²Th=D¡Ûá’ææ1ºvÂË~7-ÿÜk‰¡€‘}ï-i¢ 1jb""RB°øPkCÏ[oùÏæk|:Éø:Q[Úâ)W#!‚Œpc¬XíÕ7!1MG” ó8m(Y°‰£/ÇØ\Z¥-سf;¶ìkÁÈ$#QgÝ|– ›å¥([¿ªü©øN¡ekÊàÙëÆÎÚóqaJÚÌO©<Ø»ìS´²7|ÎÏTbËgËQZ5ç~ê‘céeQúŠMO·Ñ…ÈèºÜ°\!¼qÖèŽØÉ%P’K¸z©øCòèé˜4%—ÚÀùÆïq Mµû_k ³t­V"Ï›Äã!="¢Ü€›þu^¸}b)ÔÂeŽjv­:ô ù=íh·Rà¡ IQ’y\déÐH¦D«¥"—A·ÃÎí,ÇO8#OÂ'¢,fú0ñIú]¬ËÖ­. "˜ÇÀ("_ïÉOMœbᆽÃ[AæÃÌÓ½Ø0:Õ”A¿›‚®ôSrqcýÐ1.€±ctö]´>†zöK¤€—¿;:à` j¨´ZŒ<RKúè4^´3⢰zx.¸Zmpxt0h„à$ÝÞãJ}®vØ[Û!šH» ™$ ìOûrLØ5a¿§¶;¼‚ù}:AÙXÒ p¯Ù#Yw¶Al†‘0Ñ(6{[›Ër·±N/\ôAÐs&ù½Òøz8¾~i|•Ò}F£Ø”¼‡>—|yv¨g~7…P5L‘œl·˜+ ¶+\Αy*GÏñÕjØHöß#æ‰JÒ9Oôf "-&¨5ª.sÐôœÔpNFrL„Oì±æ ›*§~§@غ'½ ™— §£Âp# ø~´1œ§/Þƒ´ôÙ*͘ý¼ ÓÚ¡ $:PaÂû/ê1t:°g}-BÓFÝñ8?Qh騧Å „4\aï bâÚi%Q¢(9€ò-fÌyU‡F'âh9¯JŒQØ@Z´úïÍûwr¦P¨:TTSŒìxM‰ôa!¡bûÊ × †[È4‘y¬ÕâÃådqxmàùN\H‹Zvr4lSu© %u˜ý–.–ã§òe@‘ù)ÌÅ·cã{¨­§ÆG߿ĈÁçºc IDAT}—;¢7bçr–¯R"ãZúfRAS»/%›txïe-œ #Ú:Ê‹œ˜î…ºÑ„¥èQÛ¨Dîx`óçA´±=š´ ’h!>ÖBéU'oÜZÔì1bå<Òa¡„\¼TŽ»‚wZüH,¤°ç¥Q£Ç¶&¼û’nS€Ï¾Q‚m& 2Å Z$ÝN%6-Œ¢€NzS8TZ( R 3‘í!/akТj[È~ä2Œ.H¨DîÕzîb­!÷‹Ú óôݘU¨CÓªç±s嘷¯þÑ1¡}è$#`§>wÚľÔ@ýUX€',±‰ÂÄþ჻ÃJK šm^h""a&”-í§7y-¶ŒÃBнªqHM‰ê´‰5©g9dŽ3 ¹«EÀ#õ½±ÑÎuÂ$ ü~Ÿ“ëC#lÁ8äf'ÂÀ}F%Èí-8|¸mBk¢Ò#2>‰î7䤚;¢ÎBEÑå(ðÅbÇg%ا“ Ç—²pïö8ÚP_W»›Â©Rƒ8ÂÔJ#¢##xø4Å>æÝfw2 Ó;Ì€8>bw>w“P“seàª[îFqÚ[×ãÍE¿Âîj7.L&¯+BòTû–•bøKÿÅÕű05mÆÚ–Ex÷@3<RøÐË&¶OB±b{hÈs»(‹0˜œà«\ÝOÕh§ÞH»Y<4dæÅÃ#^üKBr‹ra¢0Ó¹ñvå—>pñ$ãÝvp%æüû9”ì9ˆ-uZ>_…¸üÞcü¨ ­’ ÒÚ·ñÞ  ¬¼Õ BR}q¸è¦;qÞ¤bŒ*N… Xÿáóxù½Õ¨:Ü&YÑÌY3pç/ï ¬X¤PªUpµV¬ÁÇ/ý£[]¸ðGßÇÅSFÒ‡uu wGÚÉÁ߈퟿m[vâ­…;)4 ÊžtÉ\4u"Š©%›‰½f •n‡¯¼„åôåó ð‚ 0öÒ pÕät9R2ͨR{6,ž‡U‹¿ÀêCܤØwÑž!# 0ul&­µ^ÔîøËæ,Åò…p £Â[€‹~t †ŽÈÀ…Ev¼ýÛßaݾ6´lw`ǶÈ|â1LÉÓsS 1;]5²ÿNbÜw-y ¯>þ6RXóLˆNŒÉ×݈ï_3QnD]7t~à}^!kÞÄ ýenÔq‰<î<8+9-Ç0ŸØ,Âyþôp±® –Âf.¼é'øÁõi8°ø¬~ÿ=ìæÞvà±G…ñ·(Y€±©þ®ñÝQ~ÍüþxŽï˜5s2’%$*Û€íXüî3˜KåÁáò 1á0næ0yòHœ7:z ¨=æ òp9áy¢uWaõ»Ïãµ7uÍKÎwð“ŸßŽAÙœ'Q¡yÒV~ôœ,àœ¼ãFåcžM4ù{Q@£ iû¼Â,$’0©œ ‰¼Bì/¡0HNÌaÕ`?Í}m´%R8ò·«°—–»Š‚¸ú:N¾@µßˆ¿=­ÂGSµ(¦P’!l­Paðu>\@ `™ÿúmq¸ý^’fø0•×Î/t£z~ žxÁË.Ä “»Û€m³ƒÈÀ´YŒKP`Q»û)Œdøñ“Ç;ÐúA$>dДõ °ŸÀ/ŸjGÕfZ« ˜ÿ‚’Al:0ØeÁ ÿÖ¡´N…Ü[ÝøùwÚQ¿* i°øFMíÀä_ù±ýj “A ¿Ö‡a©D9TjÐdeP{íNlÕqøË_tØÓ @úUt8¿­Mj÷+µXð”VCw¥¨QÕ¦À. ±ZƒøÁlˆc`—•ËØD‹èÛÓõ¸m´ –øW·lŠÅ’O4XzÀï?ÖŽ âÙ÷?ÓúøžÏ®ÒaÄç5–Åâýÿêðî"5R/õáw7·£bž{w¨1ïï]hÃÀNc1ô­Û}؈ª=*¼>Gƒ·¯Öã‡Ä…}ò_Þ~ƒf»¢ îû ÞmFإƻE¢•(™[RÕh²+QZKËì;AüäÏ´³n4¢‚å|ü0á;Æ îüs;‚ÕZZGùþh Õ±±–£ÃïŸÖ ‘4ûéŒÄ´3ÏÖBÿ ˆªÃ…C=´2tÎõÌóþþyÿ¢GðÎ<;VîKÁ³+ÿågOâÁçç£^•ŽKïù~65ö²µX³f9–oÜF‹—o@¤Ö€˜È$ ™z9Æ¥é¡ Öp /Åÿü7Þ¢%5ò•G±5Z‡E±‘H™ò ü„åh5´o“¶jîcA‡íz*;éÓBÞâ”lüÜg-UØýÅÛxêɰ¿}r™J øs×à×/ÎÅÕ#S ³Ò-aÎó¸ÿ•ÕDòxÙÆDDEÂû-.ö’·±qÝn<øôa\žJh¢ulô‡ì^Ž‚ÜItÔè®r ®ý¨Û·wÜF7‚ïâʱ´ÊuÆëïm€Ê?ûO¥Ç!ÆÝ‚Mïü?}n+•ńʫ7yã^Œ@Et÷íÝÅE0@–LZOi>N[#ÖÍ~¼²mv7…a ø©18¯Ã#\‰aƒ@ëlþOÀOŸY‡vÝ …3ðÒÓWžÈQ䔚r¦eV‡0Ǧâ†gƒ9Ê@…V4RxwR©'tül°4~Zõ©Œ¢¥ÐÑÁ1 ’?¼:–ýïÇþŽÝìpçÿýzl[^ê áõÇ?&œóM\iî¶4pÚ6Hí©¦C“ý<ô£¥¾‹öl[“ «ê/¸"a5îûý è0 FÒe÷áo×d¢tΣ˜3ç)Ø^„Ô¿ýc¯¹«ÏADd>†Lš‚‚Ô€~Iªã˜8vaß®xð™H¿æ÷xpJTm¥(Ù°ïüë7ˆÎ{Ó ãÑS›å³îEíþ5øéÿALñuøþ%¹ˆRÔáà¢ð.ù"VQÐj®®Æ=÷¿ˆÌ+ÿ÷\…­ u;>ÅÓ¯þŽÐ¶—‘•4c/wcnÉlŽÓ ”ˆôH* 7vïÔïǨL šwÎá}Á”‡"nF…£R¾Á€‰—MÁw¤@µgžy÷ Ô7^ɱ¹ Wæê*GÌ“{G™»Ê±ÚCód¸õ-<ùÚ"L$:ú{ÎçÉœ'Ò<9zNŠyò¢˜“eßëužô˜4ò—~¥€ìÄ+m0 !=ºRÉ‚HíTØz(î >:oß|=ßÂ"ø\%xà Ï7àr2õ‰Ô²µcÇÓIسӈx0z¢ )ˆFö¤RÕªAÉ" L¿½·¹dZ£+”ë@ýŠT,|Ë _6»ü¯(¸ì͸`¸Ã®¹;0yચ1|Œç§à¢ïÖʃ:dkÇÅ?°á¢\Bö éw½©ÅêJBÅZ­sá»7¨Èä‘=ÁŠx*ô nµUGzÑòÆh‚ƒ&[‘þz$ýg˜gb+² …l¯ õ—]|;*Ùng‹9™n<ôÛz¤éƒx;Ë1XÿŸHÌ} 7<ùƒ<˜ùp ®æ»0XÍPî4aÝfn"”5¬u”:ÙËëa%­P¬™MP°}ºT;¾÷„SÛì2AÁ²5:´T#›Ç€üþáz¤SÈ’íBQ xÿ²ÀIZS%%K¼w¾R鄉jƒ7èpàÃ8©=#7éQYBXrŒ¿~ºŽŒ?…”¢ä1u^–½kÿ‡(–$ʹíe–“ëƒ:W­Ì³p?|¹ÓŠYv£UJ¼·Šþq¼Á¯ñ -'ˆ_þJ‰SZí¥~£Áˆ€M,à:Z¤”’‹B¨•§ÿoÞŒ»p~ãbh[K(1zÆ/qoóAìªrbé¾fø§$ÃŸŽ´¬ 4Õ“Ž@nþ$‘™ŽÔ›‘`A 8fÊ$ħpíMh^º ê?ÀÈAÉžÁ(¶± CAQ#>5c®JÂ<Ÿ§Ñ i¥0©ËÂãsrˆŽé¹O‹:ÖÕ(Ù¼?ýk)îúÛ+øSªeËgcÓòåȹæ)\V”mÕCd—9ŠföóЪװuý,òNÇ+#,ÒñBαÅyqï8|ÿ¾k± š'¢—¬mIxtN.,®åÔÏpWãp« e\_/ ,$ö_a-w9go¡6ô\K”à®ÀN"…fЈ“Ô)¶ˆ˜žö¶¢Å!}›6Íþå‰FlH$å·Á¾§3o´R ÕÓ5ËÅÖ¥¢u¨ìÓÇðàvBï$µ ý{|.´®FÚÄ»QP<)ô' ±1¡|®”¯ùå-)˜rÓT/@q*µ0é?Fåæ_áP]%6-+Á•…pÝu·aÀȉˆcð’´?Lެ4ƒNvÞÆ6NâçU„¥DDÄ >3³~ñ{ 9ÌÐôyɲ®ƒ+ÞAyc,&ÞÑ­®Œ[мÿ쪭À¦¥;qåˆó¸‰I"¶½¡†ÚÂU•2H‹Zz5þ÷£®5ö@"Œ¸sh ‰UC zd1fýè* ͵ r3áX³Ѳ«l”kI“΢E”žƒ«Bíɺ`"&];C¹ ùSBí9PÛ€µ´2.ëpŒ?—Þ8 ù‰¤ÜtìþÕp*cq1 `ÔäFWR&Ò‡ŽG­\½†žVG!1¥7Ýx`œ‡ÜHZ[œh¢ƒ“ÇM-$á¨BðŽNÖš=¨ÙºŠÚËøÉ×!?ÕÌ6æX°ÝÃÔS±QÑ÷!2 7Þxò¦MFV ý/3V™…o•ÉC|Èë¡!ÏM¸¢‚J*@ŸNšÓ:)úª6zÎ<7Üçf4K:ÐÊ*uHIõRȵÇÀ½ŠnØÃö(hÉ%˜‚Œ… /ßwùYÌ7ÒæqòC—€$v–“–æ£+÷:Q!ç¦`NR(„èxMAá^" óŠë›ÀK«nÝ=æ|ÂÀ\Äàéhê$´õLLj-á“ú˜©ÜóD©µ 6ÆL«¦~j³!:…Bx&uÐEFcÄaH##¡#c­‰5E¥¥. q)9H&3ãMJGBÉT1!BG2` kKöllÀ”›À•£Õ¨Û¼e;vá‹õuvQ2($â ’ÛZ/ÝW"NÁ…9dž”0‹Ú…uû#‰ R¢©rê–ÀjÓ`Ýû¯b‹š‘"}­tqsìpH>|:C7” ä^XL‹,£¦S9GÅc£ ~ò ¶Æ –³¹G9 KŒ»ãH9.¿ &FªŒŠ!|x*f]1ƒGf#;Ñ_t"iÄþ{ Ýä¾ûÖcÏ ²p ¦ÑW§á0Úý|Þ¥gï>…ŸÛÊÐrh=b/þÆd"-Ž0Ø€æž@„ .ˆ‘,bÅP8Ö„ [ˆúÒR¡ÏÙü­H?!¡+_Ç‹ÿ©…]‘†Ÿ=ûCFÈç3ÀéGU]~°nÁ.Lšù œ?̵­>z+7Õbô4Î}B©åÔÒéZO˜³™{™P´ ÿ6÷:±ÿú q?ç÷Ÿ Q‘ì§ •HÜ>=´Yûú®Ï¢$AÂi?ý%­‘I:@(Ö©$âÚ˜=ˆó©K ”¾S»,„q¥Ò(ð;\ôRŠQ0t0òòÓaààt—Õ…°åb(_…C§Ú›ôØO¤BeãæÂÇŒÎÔîFúÇ)2ÈE7h?ÌÀ4<ƒ>ö†Fæá¢--IÔ¦™GHUý!TîÓ!£ßÈú-ôý¢F˜ðBGk=4¬|÷º¬d°o´Ó7°±EÒ*˜%©_$¼ÚOü?w-GUe%ØHfÙz(è{c6‘Ab[E?Ks*4¦íöïõ£¯€/€–º&øØOA\‘“nËE˜Akeç=¡öøh’m©k@Cd%éq1âã“É@…BÒ†bp1ý é+h"‡gŒ´ ‚ï †Ðþp!P==ÿjÈð詃ãð”ÕО˅«¶>tîY€êò#-켓˜Ž¶4WÕCo™„ܬdÄÓGT­0Ò/r8¨Ð%ôT,Š„ÈòéKŽ7ÁÕH,wËrµ¢¥¶Ebƒ„Ðiè/hÔÒ_“ùõ&¡]£ÿ§ŠPSΓã[GÌ}£ð'¬'½Äø’Na†ž^MÈ*È@z&¡MíôOçÙØkóÀÕîç15!ÄÍ׿¯Õ Î;¥‰È!Þ4Ò:Ld0º&*:wCϵ¢/“ØÂä wH:†P 'qÌM8…V.~“ö{:7§Nþ)î»eåétHN,xZ˘„&…íGTôHü]­3’WØ‚y/ý…L^(^•XíÄa÷t0O£øë¤Ãnõ¾…X¸ê;îäb$V0 qíNfåâDÇìäa×㊩M˜½`#ýÑWƒ ŒÒ;ßúLb€‡8‹žun¥Øú¥ºÄ¡ç†ZˆX&—õI©¢°2êL-o§ÞnÌéQÌ£6Xé„üq0bÊD ΋fÌ<´q/£©îà ¿YÚÙLáÌ• 1ñ"NÏÅ:ÜžMKÐåí®{D{ÔÉtêv ¢¼ J†5úÅùJÊdŒqKcMÅÍÌGNÃO©æ8æénã ß$6Ji«FéÆ÷0ç‰ Ž‰ #?øÙ‰ýßÅ«{¢#~} jö7Ã%œÙÃ9³Bájaœ Aj.Ýím8´ç¼ÿ\9ÇODƒÛp ·Ò]‚9g[…ED@qùW¢—þ!b|p|›8¾ÒÁ—ãËLL¢,2©NرӿH´SMØQ L•Ñà³ {žp8OÒÇÍÂÕÓðñ’îódçÉ =ç 6~iž„çdoóDj¦üç¡@w ¡ø,„½c%!8 ¡P¼NæÌÂc•óµ® ~@< éè5&|]HgºÈRGК6‡,7­S~F/Â]€RJQèâ3e‰ê—ðsØu÷‘aÞðÈ•Pý¢G¥#o\…•«iŒñ2lz Æb°ñÎsFÌù€°Q!¹ tö!Øý؆Îòô÷J£PùÛ]Áøl¯°²‰v×ïQ`ß *’(Pñ±•F+´4tµF4í˜téÊu䃵šâR‚nmÇíŒÖÙXmÆŽOÌXø¹«g+0å ŒYdê…•{›í0±=>;ÏNÜ£Ão˜1êçõ(›W8u£h^¸=ŒEÆ Cd\¿~¥ƒ$AVÉÍ—~œ«"јàD<÷1m¨gb¹úrêísÕl0£ŒAxVî¦oâk­;ÈM¸ õzüxF4×Xqc'á¿\ê7~EÌ…`ÀÁ m­¨®kF›ÒŠ=û£¬Êƒ@™_6W´VDÖô‹ç²½M6–mTî6`o…•~õçcRÔŒÐagd ‚ Œfo‹„=šV8…2Iñ°uHr´´Zëy|Fg9Zò4"š§½ÕN«"ƒ”0^³•Ánèh‰6H¢ƒ‡>…mMQ⛆nœ…$]êviÐð# sŸsñˆ¯ÒˆžÊTF;éÛË`o>¾¸2XÔN­ÖÃÌc;$Vé$²$ DbÎX´<»%7å±DYóFåÖ¥xws nºY(‚Épï¶±.»9ùòRyÚFþ Ûm&¢&“wì•ø8S&ÒÀI¥ùÂîÀÓóÆáÎÇ®ÇÐA±0ì_…í —`L‘P *z`ui8oLòÍäeiŒà±(.úv†•ogjÏ…v1f5#¾‘1¸Ÿ¯Ú‹ób"ܬµj ¿#é\èâQ} bŽˆ¹Â1 XZ¾5”†ŽWbóJòîJ@2Q|}™¸mS1Bùˆ\a¬“å°±F|ïÅ%¯è¤$$D'tE×—„pàl#¬$x~úÏ[1l$!… žá¬|Í 4cá äß8ÿQ:¦«pÃO£xT§P¸ îÀ¯.¿‡çbqI$̦­ªYÓ³?¤ÕIáDCõ~ìøüU<ÿß—õl&ô çCÑZA˜ÃtÜýÏ;ŽÔ¥ò1zÜFì9D<õH¢ÃÖê6$O¸éqÏ£^4Ô”Ke/]¹sž-Eñ¤¿²ìCP{Ç£hX1îùf¤Qìýh®8€úÊ*6 Gœî#‰h•@D) IDATÒVFM¢³³=×üh®ýÉùô% õ]´§§$7jÒ¡[ÿ¾(£Y˜Ž÷áôÄüÿ,„Õc@þ•ßgd¶Î_ÄÜ›ó`çÏ5ëÞŶM{°tsþøæ+žAkê0ö†óÄÿ>Í &$1ôÝ“&:ûǧ‘)ª¢\8êlAC¯Zö­AÉ®5xýC=xs.FäÅÑg„ÚXÛVŽÓÿ²dQv·íB´“»•“Îþ{üouŽï¤I…H!³Ü)ݧ'cA6’uÑz/Pã¸9î4n¡vÔ•@{…Î ËY8÷¨yBmgg9áybop#÷;ãÙ;”]ódëüðâë=çÉ—æ$çIùÚŒ²Ûë< SE~ï 8FØÕÂd2Ñ¢Ð9ŽS­PL‰ÃìÅ}ݰãÜrÚ~b`C œ@Áoðêœ\|žCÐ*—$á?ôÏ+¼ÍŠâÁ< 0hüÒ“úU-–;!ì¦EX…ößYÇmÆ` ýæZ(H·Ó׎KµÄé»(§¶Äû j$»U(à9¢ÝÚuô%lÐà_³cqÏ̈vºS…E´xo+Ï%³Ôí¾¯úñÐ" æ,Sc-Tw>Ù‚±IíÈÎŽ |‘H‰æ54mb[)n,Ñà…÷yÞ%vì|3Û7i¹‰úq~,×ÛNÈèñÚ‘Ác;ö4©i‰TâÕ7ãpß÷[`$ÛéøØ³*dÝãÃaG¯›Ç+ñÈoÔ£q ­ºÔ=ÂÃã+ÖUËl6*rŸ‚ðs¤äþù$¤¤ Ž#zчŸ_1tV#®›LÏbÉÕE¸ ‹°Âú¼s¤ou=îøÎk°³ÆØL ¹ävüé<ÎOl\?ßú;,çoxð»øߌ¨¾÷‡wð‹KÓ)¦ )s¦Þ0 7_; ówò‘˜4OÎϧe¼Û~ÒÙŽÞÞ‘c0|B潜€—^x /¬M¦b1ˆÌK¡žÁ„ÄDŽvFÅ—â×mÁm—':)¤)Лb0á{@1u«ßÀ–õëñÊÁíH¸ï]<ñ§BTo¥rñõ&ìóþsf|ÁP1겘ã–Ó^¾%k?Æ÷ÿø©ÔÜU7?Ïg9ó$¼¼úJÙW“‹„ìLI» WMz£·GE`XQ–þâJhžýœÇp¨‘é-Á/§ßUÔ¯†Z ¼?â$ ¹o¼t#®ØQº]3$G¶9Ýù;üöÇÓaUÀh­5dãοþqÑŒ@°£©b¥T×:Ö%rˆ´dØ“T„LÅŸ^»cÆæ"ó3ÆøÛ’‡[‹ÿ|È£½° ýÅ*©ßº3î~e2†1  cÈÒÇp0~ô‡™˜yû%x„áât ˆ4MÇßæÂbì[kÔùÏQÐ"ÿ‚éˆL‹ÆÜ™Wã¼?2ˆÚ¨»1jìX<26ú¨¼çÊW-.½ÿØz÷X÷B^ã3›6éAü{,aß VÖWIìl"¨Ì¾M«èëŸW¢†v'ºLØvâêëïa|˜RU=BÁP8™éT´!jb`“Š'«šUksÓpcÆØ4mûýõ ¨s§"{ôE¸ÒKl5‚é`,?tÔUbÙ[¡´¹ iæ}²‹×DþÍ\èó°PJâLR:Û³«Œ’/âpýEYhÚú±Ôž:g$ò¯½7™çÇbã.bö—F઱ìøèI,œßU ßÄÅFxϲèv†÷nqáÜ'ŽZC!ª­ û·,Àò¢œ>…„óø}Qü½›…‘–ø4Fe> +^»ŸäcÆùiÄj×cïÒ×0Ÿ;C.ïPP[ê%´\Õ’&¾ŽhJ±òƒ`_» í<êéç¹MbzMåÕ° ¦c<Bx½â¾i|Ô<·UÖ`ù›”Æ×ØÑNm,ÆB+O=ÈÜw—ÓÇF‹ ÇÅÃUò1þµØC^ŠGAÕ±¶«1OÚ*+»ÊIïœ'å›ïÅß?Žkù£®y’–B¿WÂÅ”Ô̇ç‰WõQ9)æÉC}ÎÉÞæ +–Ó7@aåkdxövjÕ…—D%Ô±à¢áæ!°®Žó±¶V ÓéËÚ?éÈó*ÿèïGjÕñùS.­¤ÅEø†ž NóhR/©ÅÏ7&ãÍwôØò&ñ<1ŠgêåÜ0ɼx2{{[ð]ß‘š{~p·ì+lÈ]Êc'vkðÇâ©~!Cš-à‹@Nƒ_<‰ño52XÛ\G Ï$ŒÌqài Õ)|ãâ/¨ÅåñضU‹yS¨yNÏv«‘ó ¯óàî©Ðñx‹c%Qœž>“G¯[½å/¼Ú†C-¨|[G¿O˜¼8ÓŽžBFÿÀ¡$jÜaLõX  XðÑ_#°þ=<hMI~Lû™#Ìdl%Cïtc"Ú]ÜŠËè;žL˜û?Ÿ3à–ÿ$mAEáò©S|”e¬ˆª£xê¢núùPFø<ψ§oçÇJµ#ýíÀ‰Ê÷ñú“lá™^-G÷¡‘F¦ysfðÜ,ž‡—½‹æ–ãà±Fl]ù ë\p×îEE#M§Òg`þøD2JÂÌR¼Ø~–kë¦ç(µ‹›V<ÿÁ¥í‚ž„Ì"Ì-If¨î ±xÖ>wÆúמBûNªwïa „ 5¾étÌT©HO¥™Qånì{9¶O¾³3É 縔Ug-M’|ûðgÇVÏq·£)„¬ÄZ”­~Ç n„Ñœ‰äžÉ]cJ!\ÎÇy%û°sí‹ðå™K<â¡áðq²]T³ßm©™ŒN8šMx÷Å'±ÓÄ€ !š;R‘•RŽÃïýƒQK¹acDÅlj6¿Š%ô‹Lå"X4•ý»Iéß{hK oS[û÷ª?zÚ¶L'4…|´7c÷Ú:Ôí±ò,¶]:`âÄRÌ*͆³îd9bœØÕ§«÷8™8o.ì¡]½ÆIó‘rŽ“‹7N._´½÷˜ä8c²`€q³ü8‚óP‡ÃA“%/êxXÍ£“„vÐÈÍ«çwžÊÄt0eõ—GOIׯ<È£hZè /qd\w~UƒšÄ6cslEδ Æå1˜ ƒ½\Çï©.Shšcêí<ÄýªVx31“GDÔf ƒ\œb0¥£v’‘ÉéÄí÷y•ºlœN„i¡1“‘6¿¢Áäü’WDzŒÔœ-»•QòÀ%µû™6¤¹1g©­¤™¨mtùpñ-K\pfÏ*Åù“] n°sÖaÇÞ Ø¿GlŽŒÜ \Kß½*T2 JÓ¡ƒÐínLw”}€Û»|õ„´qáís0®8N!¾JƒË/ÚŽò¿®ìUWö¼Û1gÆ 2<´×ê“„&Ï5ñó˜TÂs­öÂG«^S¤c*¶¥pî<”.žËƒ‹ùØ»fbÉÂCpÚwà±—>ÀÊ2ÅËÿ˜4ûR\4% *úº%»RáÓ'³í<°½›ž•k¶bÕ‡ïcåÞ®¶ z&N-Å’É”|“Ù»ø‚X½nÖm}5»»ò,¾m&3²ÉS9QÈHªGÉ´,ß‚#M”¦sSÖg™NÌ-E&ý5r7a}èÄò’R\ŠŒÌRL#{ûÁÞ­ÀqRÚï8é3lä×B@lŠ\.— Ö××S0$ ë>9 Ó¼¼<$&&*«O¾ch9Td†’‹É>»ªs‡wǾw½‹cæ<ÐÚëâ5Ä °º.sÚBÑâVdMÓ+̇‡»9­1ˆ4gÌ/—ß³;pû×â‹¡ÿ¡ÂÆ_##šÑ«o‰¿FíŸ`‚Òò]IO¦é ^刃¦FŒb]ɉÜr_á\Ðá§oµiªËèóÄïsif§· Cîæp/ÚSKË,í@údžo¸GÇð\>Q–•`l<‡O$mFgzº®g²Ð̹ÔùuØôƒ>yé Ðõ“r_üÁ(fÌ ÿ\?Æ/à9¶ Š#¢ éšßÊÔô©ïIŽ/2`> ”DÀ½…ÑW©¥{•’æ8‹îyÏ–É3N¿×OŒXœîçyOZ¶KÃèd:©=´ÒôPI‚ÉîU×)'Bjsï(íÊ¢üµù‘ÌWü81píÔÖ0À Ç‘ÅÂ60xa½§ç˜Ú Ñ–ÔÂwD ?ßOÈxõŽ“@ÜÃn A+ÐÚæ†—óB¿Éœ ×%­‘ [`ù+/ÓÅ€ÂË8¤åÍí³Ò`Š/¢ 5븇§Ž5Á9­D|ÚšjQYÁ iácäÑO9ôG$C¨Ô¶dz§Uï¹|ñ#"Óg‰Q4ż!ÀÀ "¿ˆöÔZ×?¢’a3$¤!™8÷*‡dÆ×e°$p³ÂCħ=e 1ª”-dbÄöÍÈCv-ÜU˜éâÉEp• §‰›Jì­‰ ÑÍ…¬—¤ídÞØ§H€åþjÓÄ=Ž~é 1 LÐG'|š‹(Åõö潬…ªX 86(S²ZOI˰ßÚ…™nªŒäØ5N‚'ìÇîqbLÌ@’ED­ì½©è;N¬Œ”×'O7Qçä[–+Ù| ”„VN,ú¤$ogà‰Á& FÜàQ"n·ûëe,jž¹JíàP4„B£(6´¢­­q Â`‰•ùµJQOý"U-ê~µ>ŠÖpéõ܆-›w£µÏJÝÓ<ÛdÌ;ófæ*fa=ׇñƒ˜oYV¼u®F2FeQñ؈õRX¾Ètfë»XçEkü`ö,g–Âá¯m¸Ö Á Ë+¦ÃÊvÙÑv]ðdúhÅÎÈZåwæå%ÅòG•PÔÝÂÊL]ÝÅöyv³±²É<:úäP¾r×áKÐ9`ž¾÷1È̩ڮ´Sì†ÄQo?wåQ~î[zŸï¤•õ¢ïdÑ}òöþ*˜B¥¶\Eæ¨2”ˆŽJÙ"È+[Õ}ƒhJß6(ýËßE©ñ÷)·ÅU¤é.³?­¿q"ÊtÇêÙq"jûé³bã‘ìä-6̧“$Cx:¨î=]y€æ>b~è/)—ùGhZ…¨ë<Äþrž;׆kq?û£`ˆçÔŠõ`€á _KA$˜qëÇÙ×ÎÓ¥XùÇ7 T­±„@u !ð@· åz<Ó#ÂÞÈÅc#ÂÞØœ©o’!<}AwªºC#8;‡Í‹À£1ÅüaÅûh¥q4â&i:sÈ1zæ°–5 áóßdŽÇÎŒâ¹~è-ûôwÄc#¢5xv¡LgØØµŠ rJÐÂ3K¯m¸Ö ñ<ÇÒÉO±+ò]"  1!öà0gC„]fcÈ1:Æ:t 6GŽÑ;5¶I8‡üe¤8×úàÓ>âþX’ a ù.œ6B" Îóä„LLz£™ÆÓ^Þ8&ctLtã˜mDlÓ)çÑwq<6â%qÆ®LgÑ1M—؇œ ÂáZ3â±’ á™·²6‰À˜D@Æ[YY9*Û– ³Ù¬0¬åå壒FIÔ¹@AAÄYZb3#Çè¹=Fcë…õG~~¾b2šçúÏ»xlZZZÐÔÔôYqN×)Öw±Î‹tüøqˆ³2Çz®5£¤¤6›MK•ë£F¶O" H$‰€D@"  €€dF^–H$‰€D@" H$ci2:Ö{X¶O"0 ¾Âüh°æ¤NDï£=€Í(„\’$H$‰€D _$CØ/,ò¢D@"0’_ á+uôèQtvvžÒ \8Œ‹dµZQZZ §Ó‰¤¤¤ gÂ*øý*èLa¨YíÉø[ÃQ(-zZEÂj„ƒ*„¤I…Aa œÓ*jØnŠFUÔˆh"Ði¢Ÿ9=ÃÖ0YD@" HÆQî¯"l§O{ AN^• a€äW‰€D`äزe‹âÈ|Þyçaþüù=ªYÛØ°aöíÛ‡ÚÚZ,^¼x ¬§}=УéOþ&K¿·Y®’Èè Kb1a¿Qc8T^®ag&Ö¼–€©‘ô…ÜM=­ä?³¤A[• [_ÊÅñ 'pÃâ&ØL2ºÞgÖ²b‰€D@" ôA@n§¥Ëÿ² ÜxÄï‚~ðƒï"!Á®Ü%Â>àɯ‰ÀÈ" ÌE;::PXXˆ9sæÀáp(ÇBÄ4}ka‘E(o‘÷Àp»Ýe ·ÙhįEGµvè0ã‚SÌœ‚{û´)dFs ?üZJ.Çå=HµÄO˧ª@°ŽV¼ûŒ ›öÐF¢z §–ð³N~Ó;¾B‹Í!>·€îè’!ñ.éliBGsZü!h-ÉÈHK‚É@êª9ö#аϿՎœ E˜6»ŽÓªŒãUåGg µÉ¤×`ÒÅ#P¬¯^š·WÕ{ ud!;Å­¦[¤ Ã_¿o®mCHŸ€%ËæÃ9!RØ×_g+*ê"È—‹N õÇäøÃß&ѧÛv·a• WÞzR„†ø«9+K {êÐÐJ[µÉièÏÊVœÅDó™ ºPÛ¢†Ãå€óÅY4[œðaÎ1~Míj83S` yÒp=ш¾öZl|ëUÔNCJ‚i¢‘}ÀùçÞš%CØ…ü œ „Ï `þ„/ x fO&N<1†P¯×+šDq¿x 7C(êŒÐ ’ó²bÙÙwBÓlS$M$ûÆEJ…Ï‹’6%k×¶5Q¡µV§”-Ì4†”"ZÔwª¡Íô`ÜÂ6LÎõA§í]H(¤QèR©¢Ð6¤—™ Á¨Ò̶75ƒ»]æ"* R*q¬lö6V¢¹i>®½yí'™!Œ›÷v6aëaf—¦Ãl`+ÀÍYÔW‡ÛŽ#dK@ñÌ¡3„!o+jmÁÊM‡ð$aâ¬)˜2g2RFPc0=öÖ£¹²ëÞÞ‡ð”ëqóâtjûñ¢)VÔ߀²=ðœ˜y¥`SjWž`g=+Êð÷Õ¸þþ/#ß®‡±ÿÉeð…"gÔ߈ªcUؾ+‚óoºɤ¹ï|7ˆbÆT–h$ˆ@ë!üí¥ÕhnAmp ½h..»¸ :ίcªµ£³1!oÚãÅW×¢BÙ¤œYÈÌÎÂâó `$jªü-û±rõ‡¨ªi†Û¯Cjñ"\±tÍúáa„¹'ˆ¨"è¤P+gâ<ŒÏq¢Ø¡ã~+ ÷ä$$9“{Ú0À*Ðó»ü H† ÁÜŘ9ÁŠïBÛ'ÞO¥!÷ˆ<‚ ÌcìðÙî9-‚ÅŠ¯îbmzüù5ÄIºc/5ˆÂ(RcÁfÀÒíg¨áïԢݭF ûø#ƒ!Œ„” t¤5@f®£Y­l(‚­x¼*ø €1Nìh¢?^Hææ<Î? ª§I‹úê²<Ȟ܂Ôš’ÎHH OƒŽv¼-Ôº0´ú}-»h‹†´ðºY—°9I‹h_·ßŸ§U MRF²~.¼>Ò¤³€ÒÂ.“7¥Zc‰¬KìO£á.úÚ[éÇÈïZMFÙfÁÿª†°fv™N|ëü<ª IDATîj›âhÕFlY“„ó.ž„t2„ý¥(->3ênéDü†6â¯'sÉÈ»I˜4ÞI-£v€ ¯x¸q Fßçõ#| Î?"¤!bàð¹ÏO,…8¨êmÂòWÖ¡£)—iMH›>å´Âh„‡sT²¥¡ÛŠ«.Ví'¾+å°=bQñ9¨ŒpÐΦjزUÑ qíüT2„ñÅ3@­|! Bà£YÜ¥!D1šÃtp¼æ­÷±èž;‘I'aµuQ5ç¹НºŸÏ¢ßÙíŠF#*ë7E¨‘ð³_ýÒÜïmcö¢˜O}h=º{íh£!è ɰíbúüBX4ОfŒYȆ½aQøZªpbÿxjù;H2qM²yQ<‰¤Ùù0ëÇbDÑV±+V­Åc H0‘ ܥ´¹…:$Ä,> Ö6®ÑAÎ ã§¹30Õ¥¬êœ@'"Ù•ÚSºd{ $3€Ð ÆN0x@Bó§âîG-6“}’È#^‚iŒ™‰Š{Eg"…Éhؘ‡ÿ}ØŽ*2Mä­hòÅÒ‡ªñÅ¥ °™y­Ó„ÿZ‚GþEȽ°0þý‰Ý˜hÆGÏ¥àÇ“PÇ{ßüÏ"¨¾y Sftàü"QšHjÔîIű]ɨÌ?«ç¶ÀBfŒ[U2|:¼óÓ,TÓ n·‡7$¡áž|éÊhZ°g­ ýÚª”-îHÎñã’{jÚüÇ3°òy6| ÁW_ÚÒ­ŽXP»Ë†Wv!í燰DãĆ׬X÷Ž “—…±æ š\?’ÂȽ ?þ÷CŠdÖ[›ˆê])ÄÁ†Ö“9/±' ¡xÉtˆ?ý¾àÊèNØs5B“͘•>’,g—ˆ“’£“’鑬m8“eH$c£Ñ¨HæÅ±‚áZ?ñê›âB!ÑÌ ` ÏD µ$¢ö¸ßý™ ‘i~Üzi3µjœXŸ†—ÿ_:¢Úf:pµÛñØKÀ…Ô` m¯ÇõX÷‡DüìÎü÷Ç1χ¹´ãÿðOŒ¿Òƒ‰%ä¥=[,©Q¿[/ê°s©—Loëf©eãÎ÷Ã|Ì„”Œrgz±hZP›7WXñÚôK¼xèštlKEe¹«ÿ˜†%ˆ+M&tµh ¾îµ±z2×d0;øÚõÈ䣆Q¥C³WƒCÍ@õkj\|#¬M h¨ÒaÍ+^¿ÒŒKMÉxç-^{Ë„àľ|·í8qL‡íÔCpI9­k¬õò}ˆˆý»—ã'"ò³ ueïbÿæ·ð/–)B³c Š'OÅ=ÿr²9š¬ÅžÝ[±yÇ^2&!üýõ(ÔÚÙœ(š¿“Ó´Š«åð:Þ¾ ?zj'R'Ý+.-ÅåK È~u¥†=/bïžcxüåv,XÄÚuàóe"Ù9 ß|ä”PS-Üj­æ„DM6Xyáäö£» A½yÑV[Ž­ËŸÀïþq¾ ÅÔìYN\ù¯ÿ×Wय़þ{öÅ1ªºoùÞ°¸ÈÿÁ7Pöá«øõŠ:\ø ¾ñ…9È0´áÀºçñ“§Þƒ›8jÔéHJž€;¿w/ææš©¹ÂÛZƒÃ›ÞÀõþ\Xí:LšLŽqväÂvÿÛ¿ÂžÞ KÎç0qòd|íÎYq¦mjt°õ¹_áWo›nš & i~7NO!CËv¨Y®å^yìaÜ]O›VË<<ô«{0%•fõŸûE,èmÃÁµÀÏžÚŒæ64fì¹i0›/Å}w-ĸ¼$hIsÅ{*}´”"wÂ|÷ŸÆÑÜoÑçÀEjÓ)ÙjkHÃÂÒl8l˜œõXtu Þ¨lD°”Zx½Ü.ì@Ãç1#Ô™†E3²9îU˜y~*¸v–7Ó²G¶ú3^º˜ÈChoraÊeÙH/É¢KŒK>—ŠåV5sÌ¥÷oý1TRÅ*A…+íÙÇ O²Aú¡·ÒÒ$åkßg€¿IJ‘r„Y™_" øÔ†PKi¹0׊1ƒý™Šk‚)Œ¥#û>’ïmÇŒhßhGý丧“¨}3“žÎ|6`Ç_\0LÑbR–mnÀl‹ µ¨ E@~^'Êh^™`À`ë¤Ùž)JCîü:äe{‘fëÒ« +;ÁäŠã4×ôÒT4W\IC-AÎyMH}-Öt7 æ7b"}k_µáD™»_º¯5\èCÍ Ü?ÏÀÚGÒq5B)ŠŠí)U¯MG…_c”ZÙ0?kí!,y —Ïé„!ÜŠêÔ¯KÖ] ( &².µ:!<ð#˜‚bŽ1OóÏS@²¥!18£Iy$Ä2ÿñ¤·¦"%w6®øü 8íA”-_‹py#6–/Fú8jӬɰ3S²ƒ+hiÚì‚Ëd€Ù’µ âJ²¦OB±!ßqäàÉ¿¸á©usûr2%äMCe¹‘¿áþËøÚ¿^޲u¨:°on¼¤rüv¦¢Ú\CfP|ëŸê“åöû)pÕÕñÄ۸쮇09ÙÚƒe¨æfݤá<¢ÉÆ8ŽÿÆ„ý-˜žx ;6ª‘hIBΜ‹‘ñø_1•}Mëf”«Ào_¨ÃÒzãÒiæY³U‡à±¼ŠŒ_~NÕa”oý¿ûK Ý|2#4Åî÷Þ&‘ún€4z rg߈‡ì¹xá%5:´ô¡FšÈÀ#/Çe÷>„qIA„[j±wÓX…U„ÒPv$Ÿ«¨¿^û¸íîe0¹+QöÎ X³û*:²‘`üd›l_Ã.TØŠÿy¼ ¾ðd§’ÁVáðÖWðâšt\Ph3æôé_Àýæ<¼¿>ŒÃeÊu1œN«_úí¬³ðb¤ƒäz쯳ã2á&2*®Oz‹­-gÌ"å,DnøHÕQc@E‹Ë(”&ÓzM¹µ´6‹UfŒ%úð!t‡ê­˜kÓ#SqäÕp~Ö ½ÍC昦:ØÄó½û­ &Õ‚2§8ÆÊƒŽò£XvÃÈãžE$Évá ÿJ$gX@Á Å^U/BÁŠ|â>a::òIwƒMG´ÐqS=.Ïyà6z2T…éÀƃø íÔî§_ŸG (×à`´M3}ü’³C<Ÿ&iº5$Œ I¢Ìk¢ŸŸ†þ}íÕ&«ûK=ŽÖèÐÂà,Áöï´Ò•Ñé£èÈ ÷ê¸(è»h$ƒi6FÑLº|ô¾ãòܰ°l•9L3@²“TXwЈ@ȯø< ¬z6{ü ö³1Qü&’–š£ü©p:‚Ð’v§dî«ÚôжÐMH½)ˆ’’NëB•¦²Œˆ–ÂÏä d-„ɸµu ¥Ñ sb\®0ª5ôS£À¡ÝÓå««3')‘}ÓRŒFøó¹Ò‘i7Àh0ÃfìbÚDsD¾½3ÌMxáÉjŽÙÞÏÞê¢æÌɈ¹*4gŽÇ”iéhï…º©«ÜŒb7Ìl†2pOê…®QM}¥†æ±*•9ihSY¡kNƒÁw‡xž— ka*Ÿ™,dPÚÞþ}0ãH¥Y¬ÔÄ™°i©½ ¢±ê<~úKjÑÚp 5ál”L)E‘Ëou{ÌØ'ÚÔ- Rit°¹ 1ÍØ„U¯UóXâÛwˆf¥'“Nb"Íq栢Ȱ)YŘ8% ¶NŸÚÓ‰Õœë>VâÉ¢â>:jÐ^W†Jîœ4 %9VX|)°tÀ®ãNökW$ZA³9¥SôM(ß_‡cû?~6Y\±çÐGú]Rtæ§‹€žœH¼p2ÏÍ9ÅgÖÔ2ÞƒAZ nÁPÄÂ%°}0ö’TAŽ9î8·õX°Ía¶7N ×Di&³óˆ‰„ ƒ#F˜¼œç9ÇÇ’dcHÈw‰€DàŒ! 4}ƒAÑŠ÷ÃO@L3(üÅgXFÜ»ŸwØ?GÕh;®BÍŽ(LSéñCªKN/‚tпn&¿WP+˜×Žìó;°t[.^_îÀÖ`2¥É]L×Â/7 d|«²¸Ei²)´uTap…c—FÊ^(Æ#+éOÄ€1a£ ñ=º>¿ØlSðH*¤yè}U¸©„¦¼'Äà5`pAIõVj*y6£ij@1áã‘ZÊŒ)ütH|cUâ2T~ÄËW_£[ ™Ü j0TJP2¬¼ÛÎ|n*6Ñ”*†.'w5Ÿ “EãK¦3ˆ€èT±Kï}R´óýJWâíW6À½è xsM¨÷øA~ F®öb˜™È³ç¼'¨½NĤâqÈv™9.¸%Vo·¸€›EKbÕ³¯)ùX}dtÔ4MÈÅÌEŰÒï5d¶Áž˜OG—)x/ò”û?VH¯,~Ñg"-Õæ%â÷üÏpì«Â 4sÍDz…­|æœÈž´mÚZò|;°=eÞ§'£32M¹¡Àa†ïÈa´Ôâ³fÇÖ7žÆ>x*jE5zŠÇ38Ÿ±¦Ú*t¶×!cÉ—1‰{¬F"|æ—\> ï¼Ïi|-ý3iîþ1¿Hm¬‰…¸óâLüŠ4ûƒ¤™~ Fi^ÐBšml®`Dø|GJpÞ¼Búr’Á X’ä@k“8^gp›Bƒq´7–!í‚[1!+ .jT¶ (½ú;È[,‚`Ošê Áš…Gè¹ñ¦vR&" ÑC£3ÃÅÒn¯ðYs ­)Ä,*æ]™Fâo¡À×!"b²¢t6øÙ>1ôVå˜Ên)Œ"ÄÝmVÆ×üáJbÊòñÏÅ÷>ˆù *3] *ÓUzþ¸q=ÕH†° ùA" 8S˜ÍôÕéf Å»ð ŒùƤ³‚ñ/Á fPhÅ}g„!ä¢dË"u:µv{õ”¦Å¡©%ýò<Ç}¡‘nèjÓ äö¸òÛ!´ÕYÑTiÁîw\xéN”.¨Çøtn8c·ë¹¡$Óerú0áæcø· ¸)£†pïVlÿ‡‡¦{qÿ­Í°0ŒÎD25“:þ~2‰©=„´9 H±ƒZÆŠÞÚO½Gß ÔÏV‚ôté{„f×.6ÊȤa¾æôd¡Ê§(MHc»]‘[äRsg ÐP©BCGüÆHGvP*8™G¦3†€‡BX.6òô=:©xÙ¹§n޵%à¸ùF<òï_Dr¨/؀̈́ù Å’è³µTþÖ6¸ùô4ÂÓÚŽýG›;k6REVŒ,UE…T†.çx'cØeæ,J`íÔ`…Ý $Ó]p(ì¡Ä»…c¬ûBìMdåD½J4ÇýW@s¼A§`5ÚZ›°«r~ôÇï`\2ÏäÜÿï\ƒß¾º WO[«k2’‚˜U÷<úØ\,¹4'ê*ð§ß¼‚9_z„Ú@,é¹p¤«a"“wË7ïD>ïëü­p3 ζíÍÔÆSëèI§´Þ‰ã/¯Cù²,óYu7”ã½Õï"”p¡çô$ÑN …%l#ƒóË05ýBÀ‚p3}«±a_iþëbhá–ƒ8¸æQüvõ^\77 V!aÖ£+˜ñs4BóÎ@ÂbÇÿƒI é…påÍFåÞÆ¡«r YC“ÑTí^‡6%àÚëç 7ÛAÝ$Éô šyÌB<ÍZe’d…ƒ!êlÊeP!#îÎ9ˆ·6Cî…N˜››±{M#&)•ÂK¹Uñ™A!ãøCxムL¿Ü†ƒ›PÓF÷Á›ÒF¼ú3_¸HÆÏ,Ƕ£å8®µbÂ,`Ǫ:ÝœŒô,!0:³IŽò3‹·¬M"  ­­­hooWBŸ'&v9N94¦=hÄ}>Ÿ¦p®Æ1Š„tšƒNûM‡7ßOÆ…sÚa§/Tõ6t¡yÖõÍ4™3¢ju*žÙÆß?ŽB§‡>MÔ¨Qôsç(1nâ#&Rã1;Úó‚HLÀšÚ"ÇákߨE-#:çÑy…­4O¥ï¦œ ¾Ö'Q›Gºl5Zª5*´-%mí‡R”Šõ<~bâhha$Cü^-Ö¯cTѹ8ø·d”í0ÜÌ3¶ïq^A|þ¼0j©Uس_WV9qõ‚v4nMÇíìbvq>T¯[úP+¿¡ lzë=lxk=Úp¤Î‡_þÏØR ÿRüÛçÁàœ€,×»HU½‡ï>°†h*Ç,™ûp>üÍ÷àz臸x¼$’1¢©ñ%ã~_=¼|æd8‹/ÀÓ9`CµØ²r5Öw×u´Â‡/­Âª÷3a7MÃ×~p#Œ•ïâàŽXq¼ Û»9÷¥ãà‘f¼·ª •ÚG°þŠŸ3H‹ OÊÖý¿m7Ž4×àÐêç°íÈG8ïó÷ã‹‹2«c0Œa!oêÊÞÀo~¸ ÞòxùŒ±àº —Ⱦª°%¦`úyv¬ß6³X°ã#?ö9„y3Ó`±¢¥¯;‚ª^Ãoôošðù¢–P­3"kúu˜È®rdOŸY,Ù±¿üÏ4¡¦vœ¡*Áñ†ÿÅÚù?ÃŒ|=œáJüùÀþ`*Ê}dµ8|Ï[ÔØÏÂW‰ON…YôÓi9òiÞÖÕNj6ƒž¦]W¬Ð,BΗïx+`÷cëá¸Ñmûa¼ü¶ÇÏaËRšwæ'#[tâ)’!e-L¸ëògñ—_|›š^Î+B 4¢ôš¯ÂBÓX=™ñöú2üå‡)ã§¶.€¶¶¾uÏûÔOÇ­÷_âqipÅ˘NQçØúIMß­D,¼ãËxÿ—?Çÿ¼MìtvèRoÅW'§À¤œÙ3¶Z<úZ£ENélhŒ!¼ÿû‡ñà<ÖHCë…Âq¸m’}ô‘û©)«¦ónù"ÊžZ‰=«_À×iº±^‹¯R‹—ï:iÊùi«5ÑU÷à`¤:'±¥`(ä©@fz ƒÊt1Ÿ’!ü´HËû%A# ˜;L¦ªªJÑü sQ¡þ§JB;xðàA>|XÉ;Žf1Mâ©îÒoܪãìâ,n$D1ãƒTl^™„–2#uñü½*+BS|8Qr©PãY~ûlXù¬éâÜ>ž[Ø|Œ ã\7Ò-<PÏM Þ"F€¯x/;ÈôyÈh•¦aîždâ œ°µôS4pA4›c*±ŒrÃÖ¥е'yJ#JêQGíÌÚå)hÙm†·ÖŽ–vú<–zqé’Vž!@Óñi“‘?óÜM¿Û'šƒhº(´Yÿ±ë …0:] ’2yÄÄ“h¾GáIÁ’–ó@¼BKºÉšöy\;‰Bj]ÎLFSsbêJ Ã(¬E³—1ˆ’ú䪕rxÔFOý,×Q€Ò¥˜ÀçÃÔ²‹ƒëyftŸdÇ•_º»Ïµø¯ôwf‡+î(PhöziVÊŸµ<&V”>¥9|Ý==þ>àº/Ç.ð¹ãÜè/`Mìâ# ×è‘7ã*äMö°]Üì±E¦^DëaJ(À姤9Vè9øN ¯Ú\€Ën½ nŸ˜y5Ô¬Ÿ–æDtÈMVé(¨r%`Ùù 2£ Àı=–“šç_‘Êç sÍ6=Ä€(Üý 8 SQ\JáŒaXž™³ ŸºšFÔÕÐq ©LKKAFjˆá#,8òóó9oiÈŒ~®ˆä±t=›ÆÆF455¥æm±Ä:/’XãÅ<:ÖÓp­%%%1“Q:®È$HÎ ÂD´°°N§“æ^3•t ÿÁYâžäädÅwP¡‰ ‰ý>¢ïÔ¼ìFî‹ÕÓ6ľ‹÷™í]˜_ åÿ³ò9“™›àaxªµa˜ÝHS\,„¶õã´ 3X }…£a<í¤&kqv*>ƒñ¦µ§] ¼ñG€c•¾‡öàDm3ϳŒ×õ„FeŸWZÏI<÷BD:ÑXsûvBûâ•¥X¨‘:‚ áÉÞŸ$±Š€dÇjÏÊvIF1ÂäS“’=»Ý®øžŠ\ÁZhþvÆ™Án¢T4‘ü$žfš·á7­ &ƒÏ|£7\ œ` e’ ”PhìÈ)ž€Ä4/ãtð„Ó`¦ÜðTzv•B?Ä”tš®ÂF êÛ5=#¦XÏM|ήޔÔJF5Ì0£šfIœD@"p–# ü•b‡Ó ¦p0i ÃÁÜ+óH$£ ž»©ašñ"èLý!%Ãìʯ¢þ~–×$‰À°! ÂaƒR$œ’Ñ;Ôä=‰€D@" H†O04žJd)‰€D@" H$‰€D@"0ú áèëI‘D@" H$‰€D@" 8#H“Ñ3³¬D"0¶þ€)<œz4&A›H"0Íh¥q4â&i:sÄ¢æÊ1zæ0—5 1.c¦ý£y®|‹†/g<6Â>†ÓðÕ Kú$bk¼È—””4¨£²>©ÌÑþûp­ñØÉsG{¯Kú$#ˆÀpC8‚$Ê¢%‰€D@" H†!ÄôZ/ IDATgi2m”&£Ã ®,N" H$‰€D@" œ-H“ѳ¥§$QŒ@(Bgg稤ÐjµB«Õ*g¶·w?*)•D«ˆ³8… P$£çê(½ífbŒŠ÷Ñ<ׯcãóù ^2YÄú.Öy‘Äü)æÑ±ž†kÍgBwk:ét¬C)Û' 'ÑhtÔÚí ÚDÍ4g_ȲÎ>ä=ûúì\¢X+ÄÌœG{÷|<6‚ ‡Ã½3Èo#Ž@ÌŸNTt®ôÁp­±rvÒdT  “D@" H$‰€D@" 8&£ç`§Ë&KFB’'¤SñªSÑ%¤Ó1Ó†Så“¿I$‰€D@"  É+™S" &<PWW·Û=¨RE4¬üü|Øl6˜L¦AÝ#3I$‰€D@" œÉžù«D@"0TUU)šA—˧өÔ8U5B›(˜ÇÆÆF…Œá9™Âj,xñi3R§»1éüN¤u³xNÂ!-H$‰À„[ái>7ž~™—}y‰ÈLèß[P2„`(/K$#‡€ˆH*¢‚%&&*‡Å’)¨pÔ÷ûý8~üø MLGŽúÏ®äh”QþڌسÝo²yc?˜Úgö(¬9 @¼‚aøÐ`4è Q« 1Z#ˆø[Q]„Ùj=Ñ ÝiWA(…Z£æë´ ±–Xp8  ñÂPëL04qxG|êš‚ˆª´HIK‚~M‹F‚ˆ„‚ðøA|yØîÞAü ú´µ#·HÍr‰æA?j³DB|~øì0:´Œ®Øß¶8ô#$æUæÑ gÔ6ð´ ëz6µzå<ºÓ+%Ê€:Ä7Ö@¯×RÈÛß¼ÄàsÄwÀS]qàzèP1¨Ñ°=¤³wù*…֞ʺ?ˆ¶„9VÄPÐ ý扰01¤ÊÓ·Ü¿³í‘pa•ºáœ"ÄÛ׈ݛ7!4w)vdŠÖOs"ɉ€Ñ¨%Â{Gþ Œ"ly PÌ>…†PDþ§Jâw‘·²²R¹W”!BMŸ½©+Z_W»û¬~§jTX…@#ƒܬLü!f6œêùÛØA ŠöúÔWCeG'TºBL)Í…#ÁýPåÆƒŒN»[ƒ;7;<ѾëñÄõ˜0·K.…Ô!V„à÷´¡©­íÍnHHJsœ5LHÄÛ@º›±k4Y31»8]7+ ÃWû!^x¾AC ¾pïÕÈÂóìlD[ã l)óbÖ’ùH2k¡U a>j¿wç÷×oÁúõõØz@…¯|ûdÄæ}äë=Mr?õmQ±ùm­CmmZÝA SLƒ™ÈJKè µRèØÑ€ã'Ðê@­7!Ù•…Ìt;Çê©×§OMàgT@È×ÁõÔ‹æ¶6´·h[’#™¹¡ò#á î¶&TÓò§ÉgDjfç R87Řn‘ÇïiGÅÑ£ä Á×ÞOK+šC'ñÖè0%¥"Í: ¦0ìãÎÚchöEÉ” ÐE=fdäºøL³ÝÊ3%õ¨­©GK™)Ž”ô¯ fK¬µƒ|ë—Ž7«4ÊÂ4\kÆ k—Ù>S¼¨=ü¶®~o^‡½[îůž¾³çf ™¹ ¶@Cu~±Ü‰oÝ7ÎcÿøhQÿq¬]µ þÄ$”^B†pˆx›*°õ•ÿÃí?]É ÇD\ÿÕÛpó×nÂÔ®cÆXÚ™Ïîo;€ò6á7?|Wýù×ip%ƸbÆòWaÓúíp³pÑWÈDoãAݲ÷ýûFüvýX`´"áS(i[u4p‡÷îÃ;kü¸ö[· ƒ7žjKÞ0ç#ÓÞ~+~q¾ÿB%¼®#ºt¸r®Ç_Þúä“ÏaÁ ~ð‡›ðõÇÃNM¶Š#=`»O¬}““Œcr³|bãSØ´i¾õûõlß$<²òqLËs!cB ÑYßÅ»o<ïü~£ÒwΩÿ„sçâGß¾ ÎnÞNäÙ³áïøòO×ô›'ŠJ¬úÏxíѧð:5æ±dÏ™‚9wþ ¼cBìÒi¾‡ðÖà¥ÿ¸ÞÚã¢t˜‰_¯ü5¦å;‘ÁÇ:äwcó“_ÂOž?ŒÚ~XaA#®ÇãkÄÄœD$‘#ŒÏsŒyRúÉ3"£¨AÅžMøù—Ɖë~ƒ'þy\ ±9f(%õ“—ôÒxAI‚¥Ϲ¿µ ‡Ö<Ž{þkV,|©yÊÏcrŒwµLþ•HF%"ª¨`ì„vL‰4J†OEÎF|î›bH•|¼O0C‰LÚ·¼S}xu¨Ý•€Ÿÿ؈‰—Gû·ñZS\r¿YÍ&4SãåÕ]¥\ù­6ÌŸD¡-Š7ÿˉC“¼¸õŠv䤙íi·àÑ;í0Îö!ÿ7––„±þÏ©xn… •u]«¤>)Нÿw=JrƒH³t1¾;“ð§_špà° ÍœÁ3ÎòyÖ±1Ësa×¥œªò·±‹€ Ås/Cî´y¸Ì¿Þp T(õŸ¨ ¸ÛÐÜ‚5)‘&\º^ZDu¨ æcXûl+¾~Ç,Dºdå} Sé¬s·ã¹Wo†Ñl‚åÅΦzÚM™¡2p ·—1$d`âå_Ã+ÓÎÃÃßx "”0±:­Ä9ÀÓ^‹ŽHÌ´Í´²K4ò9éÞw²Xj:›¨ ¥¥2ÖÛ€ŠvcÚLY’ïÛÕøöÚ>Å«õ0çߌ_?~½b2š8@¬+O[ µ6šÊj‘lŠÀK­„ Ùž3EZ'¾Rt‰lO°þ>ÔF,‰JÛúÔ6¨¯¢ß=dhÂÜ&ÓÄ·¿dʹ_~ès¸åëQ$‘æ±Ë Ò2Ž:ž€Ï‹ê.<üô1ÕI-Ѿ]xý?…Û¿‚»J­°{ÑâK~€ç>W€£ {vâÕïý ë˾ˆÂÙé° Å¸?ÐGᵤÉWcQîümÎb<üõ•,ÅDr(¤ Fnï‡;p`G<þ–·cÅü7švVãí}—àÖIÛ•gÝ+åæQ4us¯ÄÅÖ9¸÷¢RØUxû¾…ƀɥ9C!i€¼|®Ø>oçÜý㯠hR12©…ôpe&QCØu›†Úã‚ þ?_š×þHóqüåÖû±ýÐ-ün¦f™¹ÇåQQ£ªm-ÿXžˆè÷råúåØMÆüƒ0^§ÖÊ#’t&=clÇ–}+ðÍ_¼ }ÚõÐÓzêh9éÉ ?H$áF ¦ ŒžÐ†izuª$˜@‘W0‘±{O•ÿt~‹ÒÓסÃþ#Z´¾ X’BÈŸFǶ¼N­SØöã¦Ѳ[Þ¤O•Ú ûl?Ô~-v¿cDÕœNØ@·§-û-ØW©Cöd? ¸Ø¸Ù±zQgù9Q¤²¬}« Xõ” ¡Ë=p,ôCÓj«ËMØCN0”ÅÔñ´í1 ¬›6wé‚i” áétï¸GlZÍ|i`öÓäRS¥<}ÖvbªËwâ­ŽÁí‹À–2 99¸èÒ©Hæ8ì f°òÈ~ìÚqílÞV€ÊÃ2|6¤• +þ?ܸëö£öؼþaìçaê”\L`î©®£j3Mıv›ÅEaìÚu„þYNØE¸êŸ– “YžF­ÕÞœbs=ìZµ"…æ(b ÀÛV#[×aõ¶rt„ l¿ v» /½óÒ±eÅZTU7 ‘¾•s—ÝŽ ©f„jw úÐ6¬ÚÕŽ” K±tnu^Ôì߈×Öí@[gZ ‰)9X²ìB¦Pc & ái¥6vÖ½¿íAÍè¼pXš(içCÛ½Y‹=¨Þµ¯o¬‚Þ1™YY¸ð|{\»BÔ$´ãÈÆ·ñÖæÃèäÆSøB%˜õ˜@šØa 6:úïØ-­Ø±æ|X]‡ˆOƒm"®þ§ ;¬€0É«){o¼·­ÜYÒÿÍ‘AS8ÃDÒ5i)hIsCÙÛJŸ†ô94…+ÁÕ—9HóÐ{$®¡£ú#W Ìv”^u-rÆå"ÃæF]‘Nú¥³ÝÝMWkupdO@’Í “¦¨!Ó7×§îÕ­<=⌶èMÂ߯ÖnÛΡŽU¸mî(:ƒÉ˜3>9HO ÃÛÔ†ªÆNf@,OC‡sÊC]\jn>Ç~&2²2`ô•Ñt7î° 3É™ C†Ñ‚)Ôˆ‡Š{ ÊÍ`OI‚‘óEÍJ âMÌ,A¢™‚4­^m ENb+¥."âóh´¾é>–g°äFؼއ#å6\¼l:êYÇp[‹,|¿ël¦ÝÇоcç¾|œ*žÿ®$Âò]" 8cÄüÿb&£ƒ©8–·Ëïn0w 1¶ýj40Ђ¿2Œ…S‚˜»(ˆÚ øÓ;zíaLãÆ¼¨ŒêñÔ»FdùP2+‚Ôô(ªWép¼Q”€vîk¶˜PÍ5&݆ƒ¯ŽV¼÷°ø«LšF)ÍE_ߡú5„0w>%ÙUV¼ú¦:Ö3nJË.ðbË´XU­— ]«œ²‡Ø«c/;72Œ*à ÇlßÔQG·­Ä3/ìƒVC­z´ ÅSgbÜ¢)pØUðÔ—ãÈ¡rlÛ[‰ ÿíÜ»•’Ò1ÉU„L;Ç‹õ¶Gíþµxñù¡Ï2âfµ&¸z4žÆ}(ß¹ý}]aÃÖ-Gàmu !a<НXˆ4n4µ,G¥f “zÞ/|±HõÐSÔöÆ£øàÍçðâæfîÌÂd¶|ÐY“pý”[0=¹­ü6t» ÙH˜ó98…ô¿r™ÈWðØïÊ0ù‹E˜^”È < ØùÞ[x~ùNC“õP",nD &#}A&ld;açïâµçÖÑŸ)î–KJÜP¥Ÿ n!‚c4Z‡W^üA‡S§ÏÄy糜Xë¢~øÝµØøæ³xñƒ&n>Ùga2‰Q5n=fç’)Ѳ/U´(Ð×bÛ†Õ¨8Tw3ÿh9Š.™‡4±CcŠOLѰ s-v¬[—^݇NO€˜“ùN1R{ŠÇg"™ ¡Š4·Y×^þmêH/Šbée%'iþÄšÎÆ Z,̾ñjXíÔøªéËÖŽ¨¹Âw²û›|k² Ñ`ë«Q[×€nó3ÃôñÇìlâc4k Të‚°˜)`!§ÕLE E j,0©é'èã3¥§WFS“`“icٕǯ±œ‡f—ÉNñâ-ôq´”¡ÂƒÞ…"ç ‚µ®ïÑ:ŠÈä5Ô§p'/;Ö”ÂøÂ,˜,Jq &Ö$'¢!:Úëp¢úÚ¢&¤Q¥oŒùÇå øÛúÏÓ·ú¾û÷`û Üþ"Üp{xWXA ùS\óRÅî5Ø_qÍûª9?LÇü‹fpÞ·õ”*w=PȉÀ™B@0„â%˜»ƒ{§!f2*ÞÅïâžØµø|Ãýù‹6`nI%fšŒ<õ¾Ën¦_„ç9£ðÛÌxe³AJó¢†(fÝì…ñY3vì¥/7¹ÅAüãÿ¸ÖÎ!YÁ$ZnoàFëZÈG‡nnèµénÜñ¸¶‘‰t1ÐG…}¯š@!>þõÞ¥.Û<ãù|·9ðí)÷—I"00 š`N,DÞ”ëño?°#ŸãjÝOþ- xwï˜8×KF1òivÙÒÚ„×V™Q2i:²mzn mpÚ„)wWñ)ã.FrÞ<¼½ô}|ãKÕÐ6ùºL=»w•.£×Q*.í(Ïÿ-žùnZ?|;×þÏ®:Šù·2’c·Úa`ŠõKÔWŽŠªCøÃš(þíÏà‚ Ê·nÂ=å0’v­i5 ÉH`¤œÿ{¸,}/V¬!#/S®ÿ,úÃOqÅçgÀÔº[·•á{rã?Ÿü+ææÛà?ò6Ê>\oÝý'L{ïAd„7cçª7ñðcZüä‰05«G·áï>ŠhÞI\gv`ÆçŠåKÖá‡ß®F¤º³>Ñ`Z›öãgË;ñý¿=ƒùiŒ[W÷ßX£Ã ­ÂepçG5EÔÓÓä{ðëoÁáÙƒuü–o:†9Y`´|ò6­óø:¤/âCªñ_}¥Å$v–Ñwó§øéÓ>„h] 4#‚æñWýÏ÷>®©ÅÚ5í ÍB®ÐÝ­ƒê³+Ç4…ÖD#\zqè?ã£*±Át îžÎ3m»McmòTüßüÞsض›‘°õ‹pCúÓ­+v쾊Qœ¿ÇöíÅ#¿|÷.qãõ=õ¨ê°£°Ž¦-"uçi(>^f¤UèÄq:L˜P¢¦âÚZ*0î–ÿ‚ñNØ.D¨=øÂ]—aóf ¨'Ñ7Óœ@|hr&þÆ'•> ×T|ÿ¶|…æ iVEV[Œ/Î4Ó¼TGM ‘E¦ãŠK'!-Ùm(®44Ÿh¡B1¾²øÒ{n¯=€¦ã›PpÓ/1¿8išé9fãÂ{ŸÆ”8o¥PC+:_$¶ÕÄ  ‰²H]×οá`;v½þ?øï?6"B-öÏ^z@ (ÓWda)¸¿yâRÔÜ‚5?ŒÇ–߆ŸÜ4 Ƀ`ÌÏûi¢S.¹ ΢)È\µev¨))MµëU˜Ô¿+Oné\$¾BL¿yº³R¨ô×â?FÉW]È™•ÛO§sIX*dáú­Ä5œÍÄãì¬Æ’ä÷ðGZ$ÓŠ+½§`Sæe¸÷› pãm‡±ò§ÿ„çÞ¾—Λ€eÓbmDžiKðW ˜§§À¾hAÿv<ñF,o ÉiCsÓvìxë]‡B "üi–飩¥Î¢¼pÔÃpî‚‘aFþOÑ€`™<ú`#BºËzM ˜Ü…M܈ŒÂŠk„Û鋨€]GrI3û‚eG[+P¹éy¬X³7ÏOƒ•’ú(5~ÿ¡aä`edY‘Pqý8†áeså 9{2j~½·S«HX° 'lÂëÙqùU¥ÈâÔBKÐC3™aѯ¢Wš)ÝŠi‡ªç켄¿“ÚЧ¿‰?½lÃÌë¯Cñ¸tätìžè”ðÊGSR7öh@Fñ8ØÍfXLF26œ«iâÛw,Ÿ8 @µ¸*Ϧ›Ï%gñìÒ”6>ÚN rëKøñ_7cÁÂù³'£Äy’}И’á*˜…‹¯ÍFõþ—±IW„ %‹§žtxyxœÂÅ×&˜GÔ ûÑYóÞiŸ‡¯Ðv²d K¢€7êC{[ˆÇXP0£g[½t ©äó:ÎALj¼hGÇARf)8±‰óAù€ˆq Îî›Çb5~ªãZÿÛ¾Ú]­zïBtÑ;Ø`\Ç…Øq·ãÄq^zyÎsòâ—÷Oœ8ű“¼$Ž“¸;Æ-7p£Ó»!¡‚zïÒjµ½ü¿YiQAD1Ìè·ÚÝ»sgÎ|3÷Þsæ4úù ïÐ^´„0ß™£ÆN="' !ņæjÛØ—}µØªQÎu¸ŸfÒf K“f¥AIÿ8Kk5ª-V„ÖcÆ,::h…¦“6mÙHf¢k%}q: ¦¨ˆA"˜½¶¶ °ŸIÃÀ/Ç5’ÃÍ™ßN†½ íGª» >™ ¸¢((ä&dš±µt>–fjá(:„šŠ:,œ”È(‚¼€¢3d&v ³ËŠNB—Ž.T¦Ð@ž±ð8„1N˜5Ÿ¦…{ÐÁ\Œ(,oA—.õ– Ô®ù 'F ˜TžÁwÚkàQ¶bÿî=0Ss9i&ýÕØÝV4Ôµaâqš…HN‰º‡fWW:+PCŸ¶ˆ¢&L4: f®¶òæu³—£©k6Ǥñ$&‹:3éMœŒdUrîG{#ÂzÚÐX|µ³(¤ ̨y±w 0§˜sZ€’êΫ;@s¨—6''3H#¶ž‚œþÿç÷Ú˜R¢‡öìC^ÙLp´¢«Ó‡£*áŸ1 Y±~ „æé;Xyp7ªÛÚª·S()G;IdÌÕÂdã",öVi¯å}¤š:Qœ{ZïDx’0&6ø´ñÖMcñ^ìÛ]ŠñËi‚¼˜0j+ï5-Ü|©k³£¥ÊÐä±0Æeð^ô¨í®ÓÑ\ªÛuºö3Ù{gÍX"Ç2—¡‰QpG { {Ôí? ™BB7}‹ÚÄ&. ‘ÌÇJ»OnÞÏÉ?ˆÃÈè0˜Œ!ÐÑN:*sfOÉ€™m°H0ˆ„|—HÎ B´ÙlÍ ÉDS&FÃ;™°'~w8°Z…“:ïÉàVvü< ÒcÂÑZ¡Æ6 u³¿ß…DF¥û \Nšðìß•¨}¸§¹0Îì‚‘þÜú0À¤ÔÄ 3ó¼ߤÂú-&T´iñ…™ìy.;«°§y½üwI"à®Äõïâý­CŽPëP¾yñŸ øbLA¤;ñ?ÞCâ|L_ʼ øùCŸ‘Ñ€)“ ÁÐ…£«ÇØISqûô(ó3V»–¼ŠçŸØOSIæÄŒLƸEwb‘`X<µ8¸þMöõAw_ì®zóvlÚL3MÓBüæ_?„¡h+* r°ÏÕ„Ò—·aéØ,”×úqè NíËØS³Œf×é¬AîÇÇ#/æYn@~ÉA”~åIü÷ÊŒÞ@ ÃN*…XŸþQPä IDATŽíøç¯¶“!©¡Q™¸÷gәϓ¿Sÿ“ˆ7OÁÎ] pÙU&FNõ¡²^‰«fF3R(ã¯GvØ1<Øö4žüÝO™žA°I$ËY7þK‰iDÚLd/öãKÅÄ Onƒ“Xt#!1î£Ï`GÙh£Éî"<÷Èÿâ ›2Èýè#j¯À¯_ù­°¨ñ#ZÇÒ¼§‡f#^¦ážŸM Ðl)ÍCeÎûÈÑu òÕ=˜fŽ¢@XcØï:ª¾jªÆ ð] ÝçŸ.f*RBbñ#búÄs1݈ð6Öqüi¸ûÇ÷0A8± v¤­é ^$Íbýˆ›¢<ö£­4 [„=ñ ¦ÍHc’úîãËŸÁašK±·(‘qMØøæß°‘ƒŽî{ünšërŸÂ»›ë¸âÃgðï:œÔ|kBâ7î6üÏ‚Ù÷b,õ‡ÞENNþ°æ¥`ÍÓ¿Á>š^O]r ~rsF÷¹n¬í ÈÛµ¦Ðû1=3)Ñ}E*~ŒOÖ¼Õ9vº_ÿù]µ”v|-u×ÙõÁj<¿Ó:Dîî<.xÒ ÒtšAªF xjÞÝÖj|ðôØßnE“³«´&Fþ2þgáXdÆŠ'µð“î@Õú—ðA¥õô÷WªˆÈ¸ÿ3§;aüÀ: ´üT¨Lê‰æ]Iš'‰T2lï¸|Ëê0Œ§E†»¾ùm¸[hˆ¿µt¡J/™Vå£UD··‹¯¸çéhi,îþå7ŸÉ²žÂû•ߟWVK—°0•E" ¸”HŽB _C!¸ÄDSøÇX,=äC5Ös\$•ß¶mo”zDGG#))餞kjjP]]8oöìÙóÑ“tøÙl6„O1Žööö!OñvêplKîýŽ¿ü¨S2¼H ×]»#÷~[ƒÿZô2^§DÇþÖcJÛ[Û±ò[íÇ“loÿCìÖáßù~üôƒZ,I¤yJÀöŽæ&-&üôÊìµ*Щ¹ß²ãÆ+­¸vB9rÃã øÇ*˜Ö­o‰œIp‘/UàŽÿÐú¾ã;í |Ó·–üüyD <œÑ/¹#|²5:²±Ñ4” å;;ô3|ÿŽó ƒ4`·Ûi.¥ héû²yƒT=¯‡|T…3`lJn$ º{÷·Ižˆ#ï±3UƒäèÕd€¼ߺ.™1ýÍ8]v6;^¦lনFX* AÔq›gè:ݧð¢ad5·†iRx``;š |n'â9/ŠxÆϳ¸hÏöŦ¯‘ð!žX6æj¥©½Ï?|ãÕÙŽhÙǃ|tŽVñ3ÃS…¦ÊøÎ²`ö6` 7}æ&õÞéããã<éÚÕ{t´¨”íH$aÐÌÌL466¢¬¬ %%%ÃÔîýIœ—È]úˆˆˆ“š˜öž5òO*“ c®lĻۀð8/ƒ?ˆ‡… xu=ƒqDñ˜x2Iyzž}{ðÔ@„P³w¼p·Ì``èìyéŠÕ]˜ÛLeÅÃÛÐX§…¹ÆD¬m¸I >FxäI¢Ðdå²oÖaÂJlv%57JÄÆ¹°é'1Øçc0š¹„Ln;Þ7s…Ë"€€ðESQZ³“±1s~4î‚ñkGyI-:­ W?© ê2&3ŽÚFúÞô„„4™†`¬„)¦Æ€Pæ»F* zZq!R4! àÒs|(œBèÛ#î§[ Aš †!h®qâÓTßBSÙ¶@PˆA«j#¸Xn|±‚BO« ¾÷gÙ=S}-…¯˜,Ý( 0E3eËò¯aÑ5)4¯áÓ¸Zéu°C ”eb´ò“\×:Þ´úÁÇlUÔJЀoaðà ïb¼!£( ÒÅЇԉˆJÂ_?Ý]d-%‚w½Oú—ëÊ#‰€DàŒ̧ØÝï‚u:'*„Aµ111Üm7œ–‚ž:ă„^ ŠÀX•:âû›­ Ø](¨¹Útس]‡­GèûÇ€¯éB¨Èwü42¿ #èW¨7q÷ž»…< *½‡š Árò÷ž¢7³?­Ø¡¤Ù˜H¾Íäâ…Áø4£Þ…lP" H$K Ôë’ž9x‰€D@" H$‰€D@"pNÂs³ìD"pq# ür!Ž2€æB¦ñBÄMÒtîkôÜa-{:uÄú”ktpÜ$6ƒãr.צèS|þ¼»¡Œ»à˜Ït¼ÁvDŸR  ò²ŽD@"0,jµ:pØJçùGñ9 e‘\¨È5z¡ÎŒ¤+ˆÀçá^¤õ\¿‹i# ’v®i»Tú »T†ç™>3Äõ,Òd4ˆ„|—H$‰€D@" H$—½¢á%6p9\‰€D`ôp3OXG×…™!Œ™ß5j%¼>?Ú:í£7hÙ’D`”ˆ J©kt”ð”ÍŒ.Jšá…‡ê!Þ/ä{ýèŽzd­õÅÆæpÃæÌ¤Z/Ü{ýù@¼/6]š;l烌KºOƒ^s\ l±Økôbd´žÑaГGEšŒ^ì«FŽO" H$‰€D@"  €‡F–H$‰€D@" H$;ÒdôbŸa9>‰€Dà"EÀƒÎ¦jݼÅîí©4PЭ_ñz¿ ÊÈ)X±bLZ~îWað/>Ÿ‡ >è`Ñ|‡¯Ù{Ôo¯Ags Ö¾—OÔ,\yÕ4ÄÇ„žB(kK ÐÚÞ ›§ÿ8Ô:B²M?¦‘  —¬‹ê“ÛÞ§­-nøýZD&ÅB¯×A{–Fé÷:áj܇ ;ˆ“qÙ™UŽŽ8Ç…ö&+ԌĨ3Ðm¤t–µfh©mäµà„W¡†Æ‡ØHásÓ³>y8›öbû~;|š0̾rÂO¯£.kZÝÐFÆ#:<ZúCí"æ´ ¤ • j\vÃeSú…5è%Qœíp¹\°:ý1EÀlâý­Ï}ÓÞÞ »KÜ65Ї‡ÁÀé]l¼ðzÝkÁ J‰n®‘Ãïw»áóûàEï¤)ª€Ù9-Ïe°:JÖQ²B°Ž¨è#-Ž®vtÙ½ÐGÄ@§VA{ŠKÐïõ°Ž«/=|Ò¨Tê~}ùé¶aooA—[­9 Z­!?’:Ý#<ýÿN[;ìv'Ü^%B"c` ÝK€÷)¿.ÆDÀ€ÛœZÃç+B/â@÷Z"~^Û‰ìÓÎ©Óæó°K;,/tÆp„Ð V¯Ω7 x;`ë¨Å¦5£]£å˜Ã“ËW,@䀿¥@x:Ës$‰ÀyGÀWWjïFN‹~åP¡º„\}ϸO€ÁÆàï‚ÍÒ†¢ÜBtù#0iÖ˜(êNÂ$øÜ|Ø—âÀ¾Ý@b fÏŸ€˜SvÔ硤²-â2hCãçGr¤®ó6ùí1…¶Öj4•å#·–qn¦ç#:Fí)!¯“ ™À’j s|2Œ:2nƒçóºÐU‡v¡Ó"yâ˜S};:[*QRÕ@&؇،t$Í8%ÁiÒÎÁ!r„¾梹© ^MÌi—!"L{<ƒŸÌ£­!¹üݘ„ñKfŸÒ¸\–Z´ÖäáÀQÂ'\™¼Î´ê³%Þ÷BfoÊGEQ3r+u˜¶âr˜…@ØûóEøÉÏ ZªKQUY ‹Õ›K‰èq 153Œ÷6ì­eÈÍ-F§ƒs¯ 12²¹Võ¢F '绡©Íkaò’7é¢ } 9¬çïDUÑ1X,]°ö9Mš€ÐÈd%YÇ2xsà²Í„‰3aä=€Òr@hm+Cqñ³Ø`÷izÚI´sJrœ‚eõ•èhmCUu=,6?"ÇÌAJ|8’t!CfäǹîÝŽfíßÞ[\Þ0$¤;}õDtÇá9Þ–C!?H$Ï~j,¨?´ëùÀå»Æn—‡‚ŸM•*qÉs¹3H¦¨Ïðü=_NØýö–£ºp7þô½ß¢Ä3¿XóLNF‚®ïÙ}êùèµ·¡³±[)F¤OE{®ü¡(FP|dH›°ÍëØ°;Ç|¤;¨ d·†¸‰ýD\>)‚‚F ÔŽ ÇÏ_ê}Š=kžÁslð¸'ã{ é˜aŽB¸áĹñs‚sܳ«ÝwžmQƶ~ô'?~óÒCÈNd`Aø?ŸËކ¼°'X|²½‹7pŒì+ØNß¾\°ñ/xè÷ŸÀïå܇›S2n<‘桚î{Ü/"öt `¢Ï¾ýõ­;ìçãøˆæ<¯%8KñæÏ#ç@jRnIÅ”¬pè©iÅOíJcÁzÚSoô\æ¾)ƒÈsCÑl©ÚŠÂOÿŽß¿Ô±w½À}çRsúc Õs ˜(ň50pxMÅŸ¢pÿ1ì«0a¹û»Hü|¨k˜ŸÎ?!¸;¬%øô_?Åê-µ¨osCg0aÚ—ŸÃ˜$µb€ÛÖ‚‚ÿ€ÿ~j;…W5XêYøþ³¿ÂU¢dœ"1-GßÇ›ÿ^‡w6–®…Ÿ¯™‚ì4æ.ÔŽðZkÒsoýñQ®É"”(uÔ0’.NrÌŒ[0añøå݆©s+ëܨ£ð7bçšß±\|˜ë¤0ä†Ç1WÜy;–ßw3&pQxаëý×ðæ³ïö£Ç? ³n´›¿ÍǶ#oëÛøÙ³»yýp¯Å9 i“fãÿ'‰‹f˜:üö‡¸"mdO’!§Äïâø*ðñ?ˆw÷´¡Éâ:ÌÀ˜øü˜9.©êf´}?ùÁk Lãáï^ú¢€Ë;ÿ°‹§¤cf¼>wŠÖ?…'WíBIM;ç@ŸcF`­Ì‹Ìð‘?£¼®ì]÷GÌÉÃ{»Û¸æȾó/¸þò‰HŠ ã¶µ¡x÷á™Êo¤·5aNÃýHOí¿ (Â!WüA" \È(aŠOÃÌ/ÿ¦.”Žz4WUbÓ;›Q$\sÛ͈ A¨Pí&ÁÐÇ\ÔG¦Ún³rpÔHhi ÇW°(LÓÁW¹H" Ó+¡ o@;$…ª‡¦9£ÚÏó‚žì]0çžftÁö˜£qIá³<6tᩘ8%ª×Tïdí]Œ¿ûõ{ÍW4ëJÄ>÷]<±ÚH|ÇX01vK=:¬ ˜¸ ¯ÓQK܇Gq¶6ÀR–{£ͽ†ÀKmˆÄ¸›žÆcs<0GD F?4ãêèh ³a„Okî'ì)Ô˜c&âÆëê°aÍ-Iõˆ×ź„`eo¯ƒCN­Í,¹å`QÅ œ›A+Χ þU Ãm…¥‹FnZbÃÿØMš×A?=ñµð{ðÞïÿ€Ê 15RÇ{x_ ¶>å>¬¼ubÔ…¨+܇g^µcBbâ̼h½mp¶ìÀ‹ÏnBWè5˜¶4×Ï·áùG_ÆšmnšôkÆŽx^®ú)^]“‹v î~ôŸ¸m^ô!¦@dôS€èª\` >¿õ¬ÄÎnw£¼´Þ¤è~çõ¿»õûI~‘H$ 5Mš’1}Å]˜"8zoJsö!Ývš\fbÑM·`bQ4'TМT¯QÁMÍPea^{öì«ífîC¢&!.süáݘÞˆík^CξC8äöÝÞWÿº K¯]ŒùsÆÀsøU<ûÇ7PÖå‚Kk€)n æ,ù"nY¹¦!€þ%må{_nÇŒjµ¦¥¨M6<èpx ³KîÀÃ7¤Z? ¿1ôœøW 1Œ â°Jc f# ‰±ÆAµZ µ[ÞÅÞõïáÍMÜݦȯœˆ´‰Óq×ÿfÅiÐYñ)5~;°uÓ¸)´o^ý:*£Â‹´¹Wa\4÷ÍÉD´o@ɾµøÕ û XŠ«n¾×ß¼É=›Öµ;_À‘ÃùxöƒzL›ƒ»rá¶G ,z¾ñûŸaNR„;œ.4c|_‹raçî}Ä‘L×)—N´V`Ë+ÁªÏÊÀto¿ŸLp2o}ÿ9ë þòÔ[È/j6$ w>úX¹¡ðÃÆ7 a×-E¸lÌÏôÃêPÂlæÜžÅ]+M¦só*áå:ï[Îpy÷mJ~–H$s‡`J5t!=·qžF4ÔÀ&í΀šEÕü.L%ØùÉÇÈ?t¹e pè¹£ì­Bgƒ~v|ð*µ_ƒ†êr´ÖVˆueh¬ÈGc\-6¼³yåÐQ{§Ñùà¨?Œ½›€ˆ¤TdGv *ú}nt¿ÝÛÛa [€”ló‰!éuuuk›[‹áËy´púi›ŽôŒ±˜@ø 3~î0¾Ðz"T MÓà`0°ƒO Ÿ"ñcSd¶ W+58PЄiÑ pQ[ÕÒHßÓFaÐ Ô–—Mft&z6µÇÜÇUºpúŤ #9y%ô¹i³Âå3ÐÍÄj ¯r¡¦ºúè™3q":Jëa­ËÃþüfšV%Q $£EM›†þD!ê3Ø}¾è{¤Åo­ ŸR>6ï/†5d"æÎ V¤µV«—ùÇh6gŒ@¬®5dþ+먕è*ä:WÃß`Ï §?U Ò¦†ÁÓzuÍõø`Ó~úÕNÃø8\ è¨ß…O^Ï@æ÷¾€0šL×Þ„u›vÃFŸªX2g« UŒÏdîC°‚…ð4¤%Å‘c-h®§àASç >~GlG±~W>iÎÆÔl óôçi¤¬H¢ÀüœO±Ÿ£1$a¬0‰¦ðY›sûó0'ÍLðälš«­Íå°æÃí°j’0Þ„x³–º|ìk°Ò'ŽÁH› Y–ŽÔÄ•ÛÑDŸ%Asï¬÷àEòÑOó>—­5ô³i °s£Eh&ªÄÂîÇtnNù@ËiéàX8ÍèhF<ñD"-5þ=VØ=~xú¬ýÓ…F¥¥&:DÏ—DŬŸúµ ú[*–Ž´oÙ¯q•)Sæcâ´¹˜™Ö½Ù6hìy˜8}^wE8²¦f3X®±áØö hµÛ¦çšÝŒ¸ˆ‘é±…vBÍMŽˆøt,¼fe /'}Òº4¨‰oZD `JRBk,ðÖ_ᦎ§¨3PçŒÖ)×¾J…ìôóå†ZW]Jö~†ŸÍñF„ë.êÐx¤éô0è- ºÔ‰Ê"â³'À¤§% 7¡Á_:Ú¸™Æ{,Íè“"¸ãEFF2” .墆Ñ!@=ÉÜúiÅá³5 ÖâB§ª yúðßP«Í˜»ìnŒIŽBrÔ v #ž“Á*“&rS£•ÖB.´Öñ~>~úVËÉï4Ášò]" H.\„Ð'^â¦ÙYŽ ƒJgë!lù˜þNyUh5ÅbâÔÙˆe¤½êêV”Uå`ýk!˜½â›ðЄP«¤™&Ûqò?ƒ4ŸªGgm16lχOŸ‰ SÆÂ¤±¢qß'Ø¿¿º‰·Â˜Ý…)ƒ¸<ˆ_WÃ>ç·¡-"îðY=4ö¾ ßGwé­ bÒ\uíddߪP¢ÕA¡fì|Ì_ìEìãÜË ½ç^ŠŸŠnÍF@(샇ßã¢;Í2õ.¦Îœôz´üõ¶äkåŽ0=i–+–‰/ç“Â#1w;Ýp»)ñ˜ƒnÛRƒ>DSs|í’rVi§Ï¢#À<ñ6DÅÀE†&m®tܳ|j·|‚#ŸíA$ï‰ÁªCD! 0t‚Þ>4¯4Ì¿‚&ꩲ ­uùQÐX 'Ï‹CBÓ)DæŒ,h:w£¾â>Ùu —ßsM µàcÝÎcï|Š•\ O{!5í°iW%®üêO0=¢5%ÈÏ­"ý½„+Õ:úÊÎÇ•—W0PI3ýt¬è×ïj‡³£‡ÊÚ6o¦Ï£(ƒ}õæ"†4« ¹ "sêB\±,k 6äýÅÍôc3 *½?9ZŽ¡þè¬ÝR‚™·Ý„…³Æ`R”¥Ÿ6 ¼–B µŒTü@©Š£Ç IDATÑ!4u–,ªäœ7áH)µö½Cêmð"úäwÑß–Á·š<>xì¥8šSÎeÎõÎ(“ΤåŸh€ŠL¼‹Â¸¥® ø)k4z„ÇÄò~C^^/Þà­õ ±÷:°±ö§(*t0FˆèÂNÔ+@*¹©×„²Úhv8ƒ¡bˆ:¥õ¬£Ëꮣ4!!3„ω.nˆäc×Çr3ÁDŸÊŒM?P¡¥I25ÙÔv zÚQ®æf^wäž\Ôªñ¶C˃n|Åý+4"НÞ¯h‰BÊi³ YGˆÀ¢Ž_å#À3p¢HcFú$£nÒàànìýì3¸øLÊÊŒ¤†•/…UˈÖÂ¥ÑQ‡ºêó#eiM‹)P²I/Ÿg®€»ý}C¹FXY¥dÄçØx¾3ò+Í2ÄzX„¥K?p®Ÿµ…ëÒ‡§Îá®ÏòÐÕÒkÄnúѤ•áh^žÆäÉH2–#ÔÞÂk¿ŠÑ^;á%o,R "!ß%‰ÀEŠ€—‚BéæÕ8Ö܈æ02Ï7>‚?þðr2;­È]· ;Ö¬ÂË9;±ú£¯á¡;¿‡¹KŽ á¿Â÷,üàÉo!]Ó Eu.î½ãVØÂ.Çb2äªvšð•}‚ƒÕøCqL µÑ3þ7†¶ÀcÊ@tJ‰(Ó7ÌJÍ0Íz•*Éôo¨Û}GÉŒWDI×cxôÎIéë wbKýÁL©i:*LœT4¯`q0zdñÞ-ØôïM(páØ"-:š-P’aÓÑÿMœ;ûËXƈ¡é±üøéPÜÿÈÿb£ †ô6hNG?ÎX£Ë´‡ðÜË;¨võëË”8 Im˜ÎžE7aÖì Ä3툷d/QFö'HVà“RÓ#N ɤÍÇ+ óA3 c¼X9c^ßþ<ºEK ¦9Ë'ÕÂ;s2&QvÓçu}pló èZ0a²¦L䦯$,½*úC4­o¤pëÄÞO^Fùz¥pá£æ"‹ÑZÃÈ)ÑÒÔ„úªµs°ò¾/`¾Î¼Ýð:€þî€/‚T%#öE¦OÇ5êCxoí.X-P-´ñ=EiNƒ9݃;濊—·ý ÐL³5]:–O¨ƒw>f0U£ÀÆs¦_ìéãan?Œú”g°ƒZоÛ콫½ƒ#[(œÏÂm÷¯Ä$^;qdNgÌ^†9_l@Dr,BB|4ŒÓ",u:–*£¬Ä#eLÒ‡æÁÚþ¼󺺘V\ZL\~ïŸñ›ã¡k=‚5¿øÖ½¶w,M@4S¡"„þ—Q|Û=pqjTÜ(q0ÐŒß?d¥Ó‚Fìå(x-ЏzövFÔ˜ðkUOÇ×~ñT º–Úo…² O}á^T5"oW!œËÒ¨U¼ÎQF=^G´àm@CÑn¬ùÝÃxãP2îûå_°`z:²SÄÚaQOÁU·ÃÒ[ˆnz*ðÊ×ÿyÅ]Çû ¡à§5xøˆ¨'¾Ü¢YºK¤\á‹!©†­ÓíO>BІ©&$û*lú;çþÓJsLÁ×~ý¸~7·-}¶»h¢?èZ¡6Ÿo. Äz]g| –îÎFÔSRœð…ÿÂÌ93pßbV}çV¬ýp2ÌÌÎŽÂ {¬ÃŒoøŸTê$0(N4­‰vÓÜ¿´n<æMÈbôÔÐÀ‰R ?ù«D@" ¸ sîþ ³ÍYcS»"uA|b&MKrŠ\J|hÒ0XŠ<'$T‡ºÜø8÷-¼ù4îpRÎdë`ÍÁß•ÌE;ù\3‘çR Žh@QPÛ=ýÛøÅŸ¾ ·‚~UúPD…ªÑ´çOxã½xw»›÷¡ýKãÉP‹°$—^ñÛêÐØÔ‚ªŠ2.§ •Źûé‡Âþ˜8LžŒ:jl‹Ê Pÿøùopy$5­¿ûJíh®,£yÔ "E‰~áSŠÝ8´m/l¦fîL·ã@¾·ýøëH7;è'Cf±¢ Í%ÌÙÖ‚úÊb¡ ƒ}¥åz¡ÏO}Ý1`Ò¶ÄŠ£4;îB…©ê2š¢ÙMŸ5-µ!Ž.4”Aɦ1a;Õ(Ø› ]F ÒÌ#J#âi?†ÖŠ ú¥âžŸý󒸑ñÁ'Øùá^l~}¾¸< ñ“`j±Ò×p5Þ©K?°84·4aíËU0gÞƒ$}ê`§.< jm-¬|Ëg§Àè®BCYÖ¼Éà+ÜÂ7˜Ìˆ ÏîÞë÷Á[¶Ò|¬/¦f5%Í® „-:ó{磥²-Mp’×;²}|qÉH—«]µص-†4?†éñXËŽâí?þ‹Ñ·ãö›³¡pµ¢­¦»É,&W¡Âˆ§Q~˽èLfNNW6 "¤ý‰—L¿ ÄhBXTçsÞ_õ1ZgÁÔ !ŸÁpÞoÁòï߬IɈG3JWpN‹QVS ›Ó Ù›ˆ´Ìx‰QOX¨~íž¿h“–<3Uÿ¢°Ï”­j:,èb@CZ,ƒ-1‡œ1¡qY Å|ôÖ{ˆµ d Š7_¯‡iÊGsÂhú¯YqsmV¢¶(Ö%Ôyu „¦ÁjK¼ Ñôië6õ}Ø›y­­{?ùÇ^ÌþÚß°œ)|®˜D/ÞsA-gîîRúša)‰Ð·oÅ¡¦v¡¦3.šš:Ñ€íØ u,ô‹‹Ö±Æ«¿yŒQFó°·Q‹™w>€¨Ê?cKýL¼qþûÖ ¢¥@ô”l`Öî؆›®½K=¢ØÊPY\‹ªÚ®=ºŽ8L“ô >ozúòÛ¨Ð\˜»J°îÍ·q=ƒÊ¬>‚Òj¦¡™s9&˜|Ð'ŒaÃ։™½4PG˜lžvñYV2ûÑCøŒ¾ÆºqWbÎôE=ø#ü¶ðX³uCs364RXnÓÑ Þ]\ôfã¨_‹>G÷}"ÎèäýíClÿÈŒåSéOÙ½éq)>WO{=É%‰Àç>Ÿˆ /þ ›¨ýÓc)¸&šª}Èßú>6®þ…]­è¤]þžQ_Rˆè°ËðÐÿ}•j8 ,|¹(T4íØ‰-i{pì}ß:;áÙÿ0§mvغ|šÃ´KðµŸÝñ“’A¨¶ÿ¹þ®Š¢ió$Üòµ«ðøš§ñ¹ÜPò¨`kŸ…›4‹Ñ]iò§I9Io~{£÷®Âóù^F^ =l îúÚ2D›Eäå3EÁ…‚uÏàÓÌ[+4cN¬}áqìL¼ 3æ.ÃטÕ#Ðב …‡r™øœÑ>g¦"#µÇ{U˜þwäSÈ‹¦Œ P3ƒ’QeÛCgaæ¼X|íø‘CÖ™ß[Ç^Ÿ‹œŠ&¬cð#·G7¾„n„NJ@êܾ6Ýô0`™­m~!1‹ðÕ¯/Ç ¼ïeð­ŠOÕh3,Á÷^‡Ù““±4(¸Ò$Ÿk °vuB%¢lÁП~í8|é›7àéµ°íèZ”|¤åÆSóÜ9‹ÑhOz_ Lö)üó1h›6$ѱ)H§?ó¶µÑþÕïc»þ.s ɪ‰€D@"pa" ò¬éÃ"h榦™ }ÿÊ©±©`è~_Ê*ªQÂÝ] èh ]Œ²(ôƒÔíðÏ‚VabW_–†r4ÓDfrL’S“N­F+nçÛ?þ ÈÖEæ£°Æ ·>a1©˜Ì`}‹T­Ì«UØÆh£jØFĪŒhÁLZ»èûA3¨H2—®@FèÓê cÒh¾â{T3̓‰UÄ<„Dg!uì4L™îD ýCÕd†“³è§ ÚÕÔ€ÑoMÔÓb`NœŒ™³çá°›þ›d= áIÈCÍ•~57˜˜ÛIs"¾z6Îÿ&š© ÷7…. æhæAœ)üL£iåADbÒgp ÑOë]ø†q³qbD˜âã…yÂe꣧éxh¤G1ðÚš‹å;q¯!¥Ž>ø\Ou!¤¦™uˆ¥¿uöx QŒ˜+æ}4¯ƒ~«"*´‰“fbÂØxús#TTÍBÜUj8yÏŠ¢Æ»”°Ü–µ§(˜¼ÖŸWVÅ|<öà1ù.\"$ÇF!…¯¡ŠM8z3ÄÉŠß^ÑÐ~²jçå÷´8ú1ú¦ÛãűšÖóBÃ9éÔ‡cûwãßz‚LþlürÍŸ0)¾D4Ûc&]ØkÖâ'ÿûOÌ-#S¡ÃŒkoC²çŽV1Jb+MΦ⿞}sc•òÈú>†ý6æ_ÿ_k*„Ù¾{¯ so~s3íð´TàÓ7Ö¡È፻ÿë§taª1w÷22ßÁ£¿½ƒ‘ qZ‘÷ú]øýkÇÐMÿ¦%÷âwöš# l| üPöÁ·ñÛ¡¸%’yëÆ`Ù5ÉÈc5ª;™?*m~ùq<´‚»¯AÛsê¹é$+)2ÈbTÖ(wá}b¾B_Gß!’ö Íéâö5Ó’SÞ¾|é997 D@ácâ8X§dÀ4Ì¡)‚I¸üCMDǦ ŸJO!¹/-6|nܦ(!7¥ OªOØ53Ú®¬ú1X^¡µ¦?ÛWQâëÛN€ 2['Mki>H³Â.FƒQzÂ1²WLjí¤°EÚí싌¦Ðt‹Ô ý‰9áÔî—Á~<¼w ó:2Ç Ð>ÆçuÐò›ç0‡Jæ#íkÄa£ÅŽIм_È÷úa†Ð-O1ˆ†•÷+?Íý ¡¼69Áëêd:>ó†©Ó÷´¾Ø4µw¡¹C§9ýâï<„‚¼ÿþòU¤|÷E|÷š$j“ènhíá¡ß¶Õ&˜yÌa\ß E}úI>Õ‡ú¤çÉÿù'R¾óî¿:iQÔü÷-¼ßxxmvÓÃü¸¡ÜŒé“÷2XU\ß.>WBBM¬s¢uÀHêÛ ¾‹ç»x΋"žñâ>z¦ÅÅ{‡H¥¡„Æ`Ûâ¹%æÁîöÃvâ=¦§¢ãvÑ CO‹†AKO;].Œl'°¡4hÅÞƒ#~fÐg²µ:¿\ùCTfý7ß9‹ÆYðÖ¿‡w«|L²K–, ï°Êz;“Ÿ$‰€Dàóˆ€` Å»ûK†?$é xè›]8²oþñô:ìY÷ ö‘áôûb˜Àzîÿù/°(…é´p'Z„#ûß/GÎ'Ÿ üúñHYp+ß{û×ý9ÊDF„"{É\Û¸U·¡F‰¤™Ý<5…D…‚ë+ OO.ݹÞóS°#úe\ó0n¬xŠ)öá@Éf¼V¥„×éBÒÔë°„¹ïº>‰Ñw+˰pWZÉ@ºŒŽD§ê¯óQ„i–§ëžååõLÌÜg‡>@0s0§Éu¸ýöEHKŽd.ÍnFTÕ£`®° ù"Å…ŠZ·ãKmª4Ûí©­d €¾v`;<¨7„÷úƒ]œÒ{@3ϼƒ¢0½` ú´Ãîßûý÷ÐGsã.ìbšŒ6šZ÷/ óæùXpù<\¶€‘E%Í9dq%žR_ý¿ø¾ 0¸6‚Ê­¡¦@¬Sß>T³ ŽŸ‘¡ L ?}éýX~-5TúAîy´Pku ø5R=¡#©3‚éÉ^ò V\›‰¸°ëpSEMagXzD5ªÖ†ÂÌõ)>VFRg°óFû˜v¨ûFŸŽ”šjw¹¬D€Ò>Ç~–zZ¹ YØ€š?›E;Cà2ä¹§òƒ‚~Z~0XØÄi©X[[Ùïl©!ì‡ü"¸´‹h¾ý ¥NmDg‹%.""Fh¸ÃyÜŸÖ?܉t1ÈG{#”´ÛšæŒ2š#ÃüPÂ4ŠõETRG'Cµ·‹Ýn5w|…>}V&n`àµA„=×#¤€~J šxùBGÛšVþ.Â{‡‡ÓdŽ*'>dI³;¬L‰ L¸‘yØy82ü¸þg–ŽfX,íèä.²*$aá4™Š ‰Í"a.Š ñnïE1ÚžAÐW†O×lE]] ìƒ2LB»7W-c”Ñ83 ³.±â-CÞž#8¼·9ôl‰~ ¦0ÊêTšÎöÈ·£R_-ØçVC8ê¨t7Ø›QÑ2º©ÐÛ™·Ï@ßÏ“iøÏÒ°Ž7ëÒC wˆ™Z=Þ„‡~ŽŸx?œ á9$ÿ´ºù3ƒV|žw4RsJË #M”Cx+qñùÞAŸÂE³§"ŠÏW©!<­™'I$ aZÇÀ‘‰½Ñêºí¤z Ui™¬ž!]ˆ¦šêÑ„MÃfè¥×\¦f Db gxöÐîh£JšÎ“$ƒ^„ggúîhŠÀ0tcðÿþ%!q@+îxjQˆAN‡+Ôj…P0Õ Š£9BÒ«e'Z©¹Ïçob'\‰Ì)3›á€‡k¬Ž;™h=Â$ü/Á¢ ïUêLcp ÷ øÿCVˆÀ[´e=_Šû^hô ¤O~?œ<„|AR¿Š!q”Z>ÿƒå’¼Ç/ß%‰À%‡Íê”t,gl‰á ùuáãѯРÉDæül!ŒŠÝŸd¹¨àS2pÇæô’ep”á >"^ƒÿ,J$ÑB`à–Ühµ+Û‘H$‰€D@" H$ )^à$É“H$‰€D@" H$g ö@g«Ù®D@"p1# rŠÃ>_hã´‰"òl]¨4^h˜IzÎ-bmŠ"×è¹Å]ö62DäÃ@NIV¿ïõ#ÍèÖê‹M¸I#“ÅËrnè›r#)ÚÌô.Âÿøâ.£õÌòG-)^ÜkFŽN"pNè8ra‡5nL‰À…Š€\£êÌHº‚|îõAZÏõ»†¹÷ÄK–ó‡@ˆîÒkÎô™ÑW˜–&£çoÝÊž%‰€D@" H$‰ÀyE@ „ç~Ù¹D@" H$‰€D@" 8Hðüa/{–H$‰€D@" H$ç)žWøe牀D@" H$‰€Dàü! Âó‡½ìY" €€ÓéDuu5|>߀_äW‰€D@" H$³€Ïª²M‰€Dà´Ø»w/,X€ÎÎÎÓ:_ž$H$‰€D@"pjHðÔð’µ%³„ÀW¾òˆ—ÅbÁÂ… QSSs–z’ÍJ$‰€D@" ¸´vG-ß% Ûn» ;wîDGGG€®ªª*Üwß}øêW¿Š»ï¾û‚¢õdÄxìJ´Óáµß+p° pz™ð•¹ŠãÇ_üº ““½0BÎJ-žù‡×>ä@J¬ª³“L×ïQÀR¡ÃËo(±ðK.¤¤{«9õ¾¼NšrBðæû@úµ^¤dz1#Åso§ µj¼ò¼ Wþ§éJZÊUø`­’ßíH õC?Ĥßç¯í Þþ ‘c0mÁ$DõÅï€Û¦Àûÿ§‡gº±Ã“ÜÇûVøèjWâÃgupÌñâê™nÄ…¹Ù±«Q‹ŠbÞyG±#+̵âx·£úÁcQ£µF×ßQâúoÛ‘lòC7^Ãul­Ö ¾Hƒw>¦ÝçÆœ1„‡ôη‡cÚ»]…¼æ)±]ƒµoªàTû°èR¹6”ƒQáAGC9lx¥¦kpÝ¢1Hˆ2@¶­pbÿÚWPÒ¬C›2·Üu"•Ýí8-¨Ø¿«·{pÝ ‰¨.ëBm¥Ë¿q Rµ^x\b ) Õv'ôvÖmÁŽÝÍ8| ¸á»· ];=Ãp¿y¬e¨(:Šu:pí7nEºI3äº=¥nÞ€)¼Ûå ŒUä;ýâ…ÛÕŽ-/>çÄ›0v|&ÆÆž«V«ï¾ûîé“Ús¦ÉdÂM7ÝtÆíÈ$³ƒÀi’o'àÆ“¼JXš”8Ztr ‚‰ã“,Œ IDATLæIô%Jß6|vB%îZ¸&<ÁŽº«Žè¿˜§¢ÍÀGO„ •k4¸2Û¤HâÒ#º¸9P^¦Àþ£~ÜA‰ÎѬÂj »B+¿Ý­®ÎVÔìú ›gâÖ¦#=#ZOìekñç×?FMñÉÍhp~›šFâå­A}Ù¬yê]y®Ò­h(GQÉ„ªÝpä¿GúVCþðÖ?0ÏäCõÁO¿_‡Ü²0ë‰A÷÷l&ô á¤Xø+úá{Ò“üè¨)DÑÞíØ›3j5µ“§zËaßÇéíC°³|=lÛû~~/í~‹ŒÐ÷ù}dôõÔòÙx=â£{0s>?Ÿ2‘§ÔçÀ#…ÂÉG¶N,È^ŒöŒ $g^Ö4xcÄAáìâuï‡éÈ ˆ,ÞÄz¿¼®<*œw¤@xÞ§@ ¸4øÛßþ†'Ÿ|ò¤ƒ¾„ÙÙÙØ±cRRRNZÿ|UPø4xó)voBÈ\?µ« Ó"}ÐS¸ñ»¸ý.-n½\‹_Ò¢j*0ù ÝÌ·žæ|2Ï‚‡õQC£¤©d_AáTÆã‚“K ¾Ô'˜Înk<ˆþéÜô}4+ì¬Öa5φš»ZŽ+ŸZ³‰4éS‰>„×cµ&Þ}:ÝÍûºáq·aë¿ t僳0†b ™r a;BktBaßJv*~§ÜvZÅåTAE@MÓÉã¥ÏB Ýó"æ 4aUô5i=~ÂÈ>ˆ9E«ëÓ×@üFÖT¿ZÞjŽ´+ðAÏÑ–õ@Ñ(‘¥ÀÎS ˆÉa_Áe¡ß¾|ëY&jƒýS°žÊHóÎ0 qRÌEmÖ|ð÷§`c (bx,*¸ Ж»E‡¶a‹ÞˆÿøßÛqÙœ»pÍJx|J„PóTW]…# ¦Ð ˆ¥0È/8¶ÿúYH\: [EM¢'@ "°Ä£«ï1! úü\%>]`IAÓÕ…f»µh¬«CÌâ/#IÅkiÄ'³bà"vPûž MŸEoi¬ƒµÑ†Å<Ž•k­{ œJë½u… U—‡c® ¸!9 ÑbOaœ½-è“ÊëÁÔÝë1ᆮÀ56þµø÷gJ(~¶ˆ\ T8¬ˆ}ü‹8bíDÅÔY0¬¼³FÔ›¬$œN‡78tÊ>%‹ [n¹sæÌé7ªÜÜ\<úè£xñÅa4ûýÓïû…öÅZ¤ÅÑ6šŒÆ×~þu ~Èô43dÆu©nÜw£o·vóöžì|SM @#µ4Nòt‰—ÜçB&} uÔŽ¸êtxï÷4[,R ºçœk¿åÇœ)nLN£ïe¿ö£¬z(.e;V”d‚¯ý–—u<˜ß+t1Q¼\Ôæ~‚WßÒ¯ôböJ/fÅ {zèÿÖPL­à¦naëÇ¿ÞxhkUbË>-ÆÎsRê_ÿ”¾y¬pµÁë•1X‘™Âq‡„“ÁÚèÃ_öóñóf…åÀë¯S®v¾ ´Z)ÌRbL¸ xø[v„ë(8ЬµñˆOÿ(#&”§a ïÆîú9½¢¥‡·¾‚mì]ìšíˆyúþÄ™}Ô„(a-×`Õ#4Y¥vRÄÈ5P‹)æéú9õ~¸ûôUE3NÑzß¾„<,b&{ßÕ¤ÂÑ :¼ø/àÆßº1¾“©ìo¨RÀ9ª-b»¤íàwÿCMa¾šH „Ó„(fE˜{©ýõw,\mh/ÿ ¿[Û„)—ÍCM¹v«©WtaßúMÈÙ[ЍÌ[qÃ\/ö¼ö ,æ±ð%NÄä–·ðÇ?n¤é1uTà·žŒ?|'EG\¨s֠ѵøñ±%myçbÌ䩸Æÿ»Yô­ l@ J[ù.”æbõëoãp#ÍQ}±˜¶t9,»7Mî®íkFk}¶­}/|pNú/&[€‰³¯Âý÷.B8¨Ý¬´ ¶Bƒ´•ÐRjSôœ³ñí5xa]N ¤ñ 1qÖ•øŠ8‡B ¢«œçÑïð߯`õΚ™*‘<þ2Lš}5¾vÏ<4­ÿ+ž{f-öç×Áâ0ãw^„ß}ï$˜¹ÒYƒ-kÞÀßßÙ '4*Ó—ÞƒûïYˆ8­Jg¬Í%x祗ðî¾("f Škk¼™´˜¦"1Ì “« xù•̾ûNd%†!ÙÜwE ë¿zÕ¹n%T¹#®¥Q7–qZv=ÿ#tN™ ÏÄ)ýZ miÀ˜Ï>Fqk:®ºîøDh5'Ûnèׄü"œcÎä1~ŽI•ÝI$©©©¯E­Vcþüù #×ó9*MG…o 5(‘~Ì¿ÚS_ ’d4>\~— Æ ‰äh5~±«O¦j ‘±€™‚¤‘‚á3UpP’œD ÒkO«ñÙnJ–dòãRý°Sà“×ühf]×R%Æ’ï{îY¶¡R„Õ&L ÙýÐ>Z¥€÷?Ì× Ñ¢»hxÇ÷д2>f¯¬¢I'™ë10"( YØheM ) ™gK—¹‘ÿ®Å5Ô†~¢ÆÝ3 ßzÚÅmë@kñn4REW0²ÃÐ3‚ž<4õl®Öo  %q ‹tœ?%µOß/Sc.ñ´T(ñ¯çTØY LŸíƒ½ž>x¥J¬yVqcÜH¦¬" ù_l{[…„tòàÃŽîvæ.Vci–öj%Þþ§›ò•ˆ›áGÏsÒÄvͳ~„„h0™Aƒüô{ ö5aŠòzúøûŠï#ëØ¥n/…UþþÎû*X¢ý§†Ò0T0 çiïz ÁB3eî³\½Âw~¡A)ý… îJ „g&"P#¨QÂDóSEm@©G[= ·­E¥u ¾vãÙœ‹\‚Ùf§œë >;T†<ÎÃÌeK‘¡­ÁKÏ8 )Œ¡ÏŠÒ’V(ã'#"3ñF-Ô•¨hó ®­n/³Æ#%®9[¶ÀÒ^ww,À÷®ŒÐ!æ%X|–\¼¹úß(,©C»&&êPshŽn%ôÕŸ}¢ý ØõÆ8š[‚O¶Ã–ŽG9êK÷¡¨¢¦Œ,Ü™]† ê\:ŒKmz#¶¿¾ºûœÃˆH‡Pk!jK÷¢¨¼QYcpÇD 6®^¼üì)v qÜxxªö ¦à3”×6cöÕ³‘èc ê6ÒâFĤqœÿFo݇CŸnÅÚM0dtÓc£_å»ÿ~¡iiøò 7ÊöíÀ®Owà“Üèãb¡q££¼ ï4UÀ0v¢Ù:HSÑ®• ¤¯üR…Fu‹Š¦Ý~7RJf"´³¡.'Zÿô[Œ­:„ OZì˜óíïz¬í%8B[u!&÷¿0aþe(¡Öµµ•;a²H$,£{׸`‡) “H$g– j ¨= ¡6(#¹wß§Û´ÅN¬\éÄõW¸æyBNlªõ#>Ó‡å÷ù°òj?ØNN!}ÉJ¥ZUÏ+QF³Ò±Ë½¸í«^ÜD¯n‹»6*±ã #S–è°ê ò„1~Fÿôᮼ¸f¾Õ;ø…J5£ž‹“>„ÍÅ<÷=6Q€»Â‹ì¹d† ½áàÅgWQóC!²€‘RSÃFMçDjÑi•W¼>v4–r§W¨µ¶£.ïLѳnFø((|4Ñu0°L=Ç[@ÙŒ|¸îK>\³ØÇa`#‡6”êP~X‹Ukh*I!î‹ÄmåÍ>ÌïGþÄœ>ž¡2dš±†?.[éà ·ûíØÙÎ.giæUå©°ê_ÜëÇ’;|¸å6?®˜íGÁÛ ¬ß©¦° CYŸ¾–ßãÃí|õí«4wwF-d³yuØñ¡ÛÑçôn/&$yÝ'ZhwåÞÿ>›‡sè À¿ó“ìÆT35£yË!j0ÅXN{žD?Jú‹©ž ¡I3Mf}t4×âÿ·w&ðMUùÿ%i’¦I÷¦¥û((;ÔqØaYÔQÄ gSGÇó|Ïç¼qÆ÷EwP}‡"k);t_héÞ¦M“ôýNJJ©l‚2þçCH“Ü{ιßsÓÞßýoßnÜkÌMªò¶`á‹ðéç»yƒe0î¼ÿ~Ü11 ƒP|pö9áÍß Þ°ø‡ ~àXŒIíSýd¬üŸ½ò9väth™Ïð±(ܵ‹Ö“cN:¶._Žæ­F±'ÆM¾ ãÓ¬è^ýûÊá—?#46ê(dkzXL^0N}çÛ º¨—ê&ÝàÁƒvëíð™ù+X~ó8òÌþˆgÊݘ„fî@ßN‘Ú¹#â³!àØØâ:!þö»0xä($$$\Ô¸²“—À%Ü×½|“”‘„€í€Ö§&êÀ“—õ<ÝûÞ¶#-Ù‰ä Š²˜ÅóŸ&lÙç…öO÷ÃbZ§~ý»zŒ¼Þ‰X&ÙhêO‹Ý;&¬ß¦Å*ºA&t§GÇ}ø¡z HcI º™öºV²»°0Q ÝX=í½GMîi@ÃÌ·q{¿&î8·J¨ØaÄÁ< ös×™cíÐ{ÑuÝ-)Ü6­v•iBKã÷=n÷DX® üx6,؆ÄÛžDp„…ò¹çã9– y6ÒxòàçvLîd‡Å A93pîý³‰™0õèÁ¿~õ¹tídÒŸçÿV‹N~Œì¥E¿a^(9nDSΓ¾‰ªŸß|Ô€‰É030¬ŒÙc·>cBÆN=¶j|žš]‡”0Z$xVчbüEÐkfÉ‘xòòŒ•À±ŒtíMê¢oËN ŸúƒÌÐ+ü}‚…Ô@Á§÷¼æÀÌÞi8GcÕœØlD:Ez|g'†åtr3°•Ÿ%´*ïfla …üE·&/x{"6. Ú]F4T¦SìÁìt3î}}2­™Œ‹s¢Ž±%'r±ñ“`+‡èŽi×Ç„ªí´š9(ˆBBÑ3‰‚°!&CI<½RÓÐ?ÉÅ8`£ünyàq®›·1á›9ÃÞ¨…_LÝ¡¿{¦Z·ue¥Ð›‚b Eµ3Æþz.zßR¢ÂxØ¿x«r}àèvþñîÓ¸.Ä/}4惑"fC£¹(­pÂnêˆn‘LúôÑs-û<÷ÞÊ}šôþ¨/,‚7÷ÙV[ƒÕo<н9Cà×% /0 ÕÝ‘ä_Áä¿Ve"ŒŽAŒ}öò6"Øê‡!&!­'¿Só†åØ8/­xƒ­Íó‰°†à.º´n Çõóþ›¿Cyà½øò³GÜ1•^.+7¬ÇúO#¦[<]ëM‰ë:ÄŽÛ<«{ ëìéâ Ï?þ¸ûÝ9sæ`Ãm³€÷^FcHãJòðR·8ôå |z= u0¦}¼È0ìß´ÊJB ýAØþ×Hf(„ÀO€@XGZš(.T•º?S+XgÁÒÃdiQÍ›°ZTB wk½GéÆÿø–-t¹\¿ð+”&cZ·8ô¾&˜ksG3ó–8 ‚àGwÓcÙ{¸aµ˜Yš…1‡.ŽeÀ’”ˆ¨P°ºˆ»Uåͺ«F ¾ƒát !k ºê#¿<FÝ…ø¾'з°'JsYºâ]e÷#=¢†Å–#›õ]: ‡Þ´Öõ7ð`¶Óê㬥Wj§ý¯³¢ô›24èÃhѳÂÇQ£ö–}º‡ÓjÌ5k¬äùZFÑÛ¨G¯^&,Ÿ§A\ŒñV·…WMºxÿT™P‰0¤1Xµ8g'šxû& ÂÂó}gU4ÀœØ C¯IAßhcË|j«ëPPF+|ŠV}¦AËíD&¨xÆæV•{¥9y(kêˆÈ0#oŽ´œ½'·øñž6lØàî|ĈîçâŸÀ±Ó]>㿃‰œJÑ7_ÃNÖ.“§á«¯¾Â¤I“~¼ IÏB@ü`<¿c~°¥#! „ÀÕHÀ/––‚PÆ‘åj°r¡ÃÃÁd"ô0ä} Ž3ÞìkÆ«åvá{´*QÆ5cRÆA¥N6^W¹›y Ýq·žÃ]ñ3'âhl`±ôeó›p—"!Œ›iÝ`Ì[aib¹M-[_¼c@ø zÒ5±¹\7щ¤hZ®Þ×â¿_ãðvø[|àk`¡õšr쥹Û:,Ö¸æ>ÙmÉ¡=‹á´¢wBb­µ<<­Éig†ÚÝX¶ªˆ™9ŽAÁ˜ ­Â‘ÌUx)ãï8šŸKwg’R€Vñ.@øµCÐ#5÷‰€³¡Ù›Vbçj&‘ ÄÏÓ|ðÅÜZ4˜-ëog%÷aÌlË>áþ58¼jöf3Ž×nƈ~´øS¥7i¹¸ZÎ…¦Úúò¬üòkìŒÆÐAH鉬ŒÝpy …%0ÚÀIcož%¦ºŒ¸ cß,ÏÀùäæ–c{‘?FÂj#ãXh¤ÕÕîòeLc¶¬Ýˆ-ÛrPc郘@ZJQXlCc…£:Fº-mϯK}>À‚ôª6 9([öoÔÂEwزž1”ß­K>g¹ŠÄðHlß¶ ãÇ¿Ôae! .³ýù½ CËB@+‡€W¬ 70!‚‚é] §5>˜9ÁŽ ff´hðé#ÌfI!2š BzÑ’c¨a|ÔY_‰A“?‹È'Ó‘…Õkðóo„W×Fß`Â_ÿÉëï^\?¸.Û~ ,æ6¹uLhÒÝŽ5/°z+3‚^ëb¡GºÐÒâD$ãÒBfЕò]>xš¦Xâ‚`Z [7—vx…'èŽØ–ÁñcYàÜÊ7O¶ ½ŽûXjái^Rœéãåùð‚žmÈ?tY´vtœ>Á]‹äL»k9¿øQ´Ò}(Öâù&ür’½±– ]V~DaFÁØ™ÉWÌõêOÛÙèò~Õ‹nœÜ+w˜0¾%[uؾDµ4^5;áe?·ÊUýtèÁ~‚)äŸ~Á~n²!˜‰xŽ¥ðÔßî¿×áÆ8®%ÇóŒuç°Xè½¹œÂÓ3–‘ç e…{n¾þNôžÂØÌ8-~©Ç?cBÊâZ$F³@8Elëf£?²FåY:j”±x\ãdZ¢§’k´‹ÿŘOÆ>†¶¶­û:ÿϼqÀZªFEç®i•¬ä Ï]#@{}ð³ßÞHwÝ„ÐòWÌ`Ímû€Ø©‘ 7±Vf öî.…1܉jc÷èj­ÏÆ®, úN ãÉïnMÈÞµ….ªàÕ <Ì–Öä¤ëdÑV¼òâ«è_ý ú§ ÀM̤ãrÒJH•îOaÙ«o}Œ)–,]R¸ÏÌ}âr]Û$Ã1áöZÜõر1U¬s}Twä.O¿± ±ƒ:`Xÿf·­Fæ®hôi`P(ã+«™Õ×—e!8ŸÇ²°ê«µ˜8ÀºÂÕîùdr%]g0T&¢ñBöÃ(ùb&t‰Šôùøß¹Ëp Ð &"Ò¤ƒ-k¾\|˜ðÈK¿E=Îfhow‘?<ú裴<;ÑP˜~ æƒa–8d0"/º“;fPu;pÆ$lZ¥¿˜„?mÜ ¿ ·“öEŽ(» !p¹ˆ ¼\¤e! ®p.¤>\‡„qt›cÂì·€=ïÜø*Ð^Ü÷¶7j@(­ '¾=û¯_oê_^<†§Öá¹ßúâ/ó€g>b1pJ¨ù¦·L´cúML– Ñ'gúâ¥O(lÞÓâ}/o8xÍ>üMÊä/×Ò¯ÈCÞ­ç\ˆ¶ãÿž1`æY_ï[=šVêðØHN°¥iàhÐ`Ãë(áL\ïBoΧu31AIÇAc²ṎÒ“ƒtÁô4¾Í¤žWgx¶fêþR)4áfZ… Ì8y®ÆÃ OÖ¡ÞHgÕãüf³„=Ùª:EÙŒ9ŒJdmFºjž«™ùqHï:„sοÏ÷Á_gSg3ŽŠÌ›ªöæ¿4!%’¥!rNÉmûôa?þÝêMÿ§büa.0ëeÜF,~FûÔôZÄ3aH}¶®e¬M/ÐÁ–]ÈË3–OE²<(ehjD|w-þëI=¦>ÁøÍoLÌ0iÇ0Þ8Õhä͆­ïQìóæAßND«`ÒV-ªF-]‹_÷ÂÒ¯½1fµ[+Q©\]•°½¦¥ùÒÅí÷aèÔ!HKKƒokó¶»3|õ7cú¨„ÓÍ’©m늎b=§ýdr0cý(*«B?ÓÂ%sðâû±H퓈ß÷Ëro3¢ ­‡)‘É…¥˜:¸“G™½2´ggÆ©žrÿUCi>,…r#¦ôxï¿ú(ÌÖá)e±£å0yÌ#˜~óxÜÔ?„ýá¶Ç¦`óŸÿå¯Íg²ž˜ü"õ˜Ž)¿›€[Ç&ÃZ¾ Gm¬óèoæM…(M¡hÀWg mIDAT¸û¦c÷s OÛÇÚw&Ý2·ÞÈrF;z|™ó9–¬y+_òâ9Tñ¾ŒñCûahž”h€5Ü•_}‚CyûñO–7ïéâžíã5˜÷ñ¾è©–ù\;e žûÝxDû7áÁ{áEÛññüGqçO0>3¶úrX¢:".­+|ùÉÉ\ŽÌMù—< /C«J߄ݷŽEÏ¡ÅüÞuÿåC9ý.w5üìgþŒ.3+až:ÿJ¹wnÙ}f%C!p©4Ml{墪V«HBàj"Œh>ÎÖêêêàäáËÕÖ¯_©S§"##ã'W‡°™ãÂl{t³óbÐõR‡£Y,A—ÀzncdmÁ˜N´¸àO·R{¥Î=V$ÝUýدƒßè¹ïÅÇò§¥,¶àsÑå°`;Ç¥n  »Ç;M²ñ¸”»âÞ]t)åþV&¹Q%wÓÔ±7“³Ðm²–ÉggÑêÓ‡¯iáSñ’-uèŠó QVQÎݬ\&[Þ²aë4(¥{h%âÐJX^ÁñÚÞ›1ß 3lºêÏø9f­ª¤å,›ÃþäË8B-UÊŠoe$¹‡Sð±ëò-0³š…éu|ÃÛ·™]yhèŽêéGƒ?AÍÑÁãVýTQ,Çq-hLB5ç³ÿ e*…«ŽïQ˜©uêBVåNëâ>ž±jX˜^Áô <5–––åzÆŸf:ÅKÃ5°Ñõ4ã0-rñ±\Ï Ó“ûpŸâýtIæ:%3[­J¸ÃÞO5º¦ÖÅ¡ƒt!å|;Ñ`SRÂGyó:ùÐÕð ûw‘Q,]ƒ-<Ï&¼«Q{" {•#8¶›û;fñœNT碬°EõaHîËŒ¬tñtÙa«.Æ·{²×½˜õÇĬ>Ç~Ëìœð‹JBP€±~uزã(Xè<ÀÌxRw" 'òw§£Â£ÅÝBñÔ¡¹-“Ë~ì¾#y¨æD%{Ñû#*61V„ø›Üç­®œû÷ĉŠj”×ó;àãÇúŸ±ˆŒê€!>0°žÞž]GàŽÈh–?a‰’ î³ÿ°{†ü¹÷ OD8á„…ø²–(³ª*ûŽ¢¬Š¥œz~¿h%ï‚Ð -ê s¡ôèVº®ÚР±ÐêÛ•LÍüNæ"/7¹ùE(¥eßÌ2j>AHŒ áïˆ&ÔFNNròO°oüBÂ7ì€Á›B8¼#’Âͨ/a ÂãvT4XЭwÂî2ZYY‰·Þz =örþúêw­¹dŽïá 7>Dpß0…G`ϾÁ–””3 ë7¯Ç»§ ÛÆ ¬ã ¶î£³fÍro#ÿ !Ð>ø0õµ®ùáf„ícMdBà?B@áƒÝl„¢¤¼¦9k¤–BÉî–ª¤Âië8¼“¤–îõŒEô¦Û§ÅDqæ¹?Ù‡‚£‘ÿ LÏoôcEèùÖN m¦¦4*¾JR)g¢(¼˜’j­ìd[LJÖÈ>( »ïÛT?ªfc¥ÊlÃý \#µN­›g,׳‰Ì=ï‹«uŸ?ÍŸ›øMpÂVç„‘Ykuž´°{0ÌüY^rv{#œ=Œ¾VøÑRÚ%Ê+¹½EZ˜ô|cŸÚ‡VX³×´MbÎÙ^Ë~™FÖ#U²§ÓD5çWO룒êÞÊü²¹(°•È.©¥›3á™æc¯«áùY˃^„„ÌÊÕî o*{¦Á׉0>.µ©µ2Ò­V=.¥©~t>Nñq¶öCu¶þ:ïÓªL%ïsÖ,BßóHšt¤õì|MëíOAÆD1çú"µéäBö1PÌÑ0 ‹{ß3ÜLàü¼½[ùéžCË:5ê¡2ž­|,,gc9Ù÷Ù¶úqßwÒSd׌™Hœ cÇz”.dÉóÞqZµjæÏŸÿ ˜Íf¼ýö'¨ž46s ý™Ó¾³¼!„@û! ‚°ý¬…ÌD¨®®FJJÊeg¡â¡–,YrÙÇ•…€?ö/™]˜qƒ_kŒ¨½sš,~˜6mšûq¶ù›—¬‡eÅ Ðgôl›ÈûB@´"ÛÁ"È„€h&Я_?wüà‚‡²JB@Ó 8YÿfÓ˜QÈXü ë%6¡nôÍî æÎK×õó;°&m[‡‰ùY̸$1„§“•WB ýAØ~ÖBf"®zJ”ÅÄÄ\õ€B =P¿“S rOEÝì–ÛØ•µV¾G‹ öC`Íå øÓ“M…€ „r! „€B@|‡€7ƒ.„üá;ŸËB@\Îoë¿2ŽSŽB! „€B@! ÚA؈¼B@! „€B@\-D^-+-Ç)„€B@! „€hC@a òR! „€B@!pµAxµ¬´§B@! „€B  „m€ÈK! „€B@! „ÀÕB@áÕ²ÒrœB@! „€B@6D¶"/…€B@! „€W „WËJËq ! „€B@! ÚðR¯µ-tZцmØÈK!pÅÐj4Wü1Ê ! „€B@œ€¦‰Íér} ùD+–€†‚PDỼr`B@! „€8Ín ¡XÏÇI>B@! „€B@\y” œyå–‘B@! „€B@œ‡ÀñÿW:$iOHIEND®B`‚trove-5.0.0/apidocs/src/images/Choose_Image_CCP.tiff0000664000567000056710000006320412701410316023353 0ustar jenkinsjenkins00000000000000MM*eŽ€?àOð „BaP¸d6ˆDbQ8¤V-ŒFcQ¸äv=HdR9$–M'”JeR¹d¶]/˜LfS9¤Öm7œNgS¹äö}? PhT:%G¤RiTºe6O¨TjU:¥V­Á*õºåv½_°XlV;%–Íg´ZmV»e¶Ýo¸\nW;¥Öíw¼^oWº­fùÀ`pX<& ‡ÄbqX¼f7ÈdrY<¦W-—¸ß³¼æw=ŸÐhtZ=&—M§ÔjuZ½f·]¯µf¶=¦×m·Ünw[½æ÷}¿àpx\>&çeÅäry\¾g7Ïètz]>§W­×ìV }žçw½ßðx|^?'—Íçôz}XÎ?¯Ýïø|~_?§×í÷ü~_»‡·øÿÀ À, ÁL±ôÂŒ% ¬- ÃÌ4çAÐÜ=Ä EÄ‘,MÅJ3¤æ|\QŠÆ€qÅQÌuÇ‘ì}È …!ªÑbLJÉ?%¡ŒœrŒ‰)Ê’¬­+Ë̵-ËŒ’ɬ•& òp£(JRìÕ5Í“lÝ7ÎŒå9¯úI0Ìdü›'Ê#Üé?Ð AД- CÐS²GM(1ÉI€,„ÔÈí›TèN›@CQ€ULµJK`¡WÒ´¸aY€ÕlŒÈh×€×_ÓhØ€f¥”ŒVhkÚ½i€¬vÈ[€±oØ( qÔU"~]¹u€ÍÜwˆà9{€ ôŒ7í¥jX áyø(á …Ñn‡âŽ%‰â‘Í‘Q“" 3Mò;ä{‘¡6LÙMÕv!a\Yˆe怛€Ît† è>h9¡€öŒY§Ò!} á§€Þ¤ˆTó«€é†ÚèIl¿±ç9Ý21L†»n;•›ˆ€s¹€ì[Êzo™¶p`p 'Á€w l[X¯ÅñœoÇò‹—‹¤8ÍÒò ùHƒ¾€ € Õp¡è-Ôà@dt `yw`ñÝ€§|„ÞQ•!}p\žEœ…‘žh zGg¢‡G¾ øXxÇ7½ÚtG·Äß(³í9:t}ï~|~EÆ!ð„?ÈVÿ–ݺAÚ|â†ò€#@IŽ@9àðL X,›’ƒPl€ P8$ „BaP¸d6ˆDbQ8¤V-ŒFcQ¸äv=HdR9$–M'”JeR¹d¶]/˜LfS9¤Öm7œNgS¹äö}? Pa/ú#þX•¤ô¸)Fœ=Ô`§z {W‚‚ë@åt%°–0¢Í Z@ »`o#î@eÖ ;¼7¸)úü]à@¼ • ñX<*Yä`¥¼ ã—‚¢ó@Nv .Ð\n`ý »§5@^¶ `€û8)çm€ÁA {°«}Ip@ $éÇ9@ç4?è=:ý‡f§„£;@çt&ð 0ËÍBôz}^¿g·Ýïø|~_?§×í÷ü~_¿ç÷üÿÀ À, ÁL§**Œ•°êR˜‚)Š © Š î«+ Í€øÌ„‡‘(|E)!€(x‚š1ˆ1ƈ(Y€t„¹§àuH5! §ôŒHGIˆ(›'€ü¤‚‰ò¨rË)C-€\¼„•³CÌ{¬Ú:gÌ[( 7€Ääsª tO™= «H@º.Çí({Ñtà#ÑŽ{¢Ò%IÁ”­-KÓÍ5MÓ”í=OÔ EQÔ•-MSÕMUUÕ•m\˜ÁÊ:’¥“êjž¨jšª«—¨,ÈC€-„„ØB(wÙ)ie€Åœ‚´æè+Úˆ(Qk€%´†›Vèb\q€­ÌwJ%]€w¹Žrð€úWÈc߀¡6é´ X,ÌØºí3Qj è(‡ÏSâYb€ë‹€>4¸ê w@(Y"!NMGà«•€ WæŽe™æ™®m›çÎuç™î}Ÿè…¡ècVpš ÂõÌ3]èäG€‰Èvkˆ-y–´¨!Á±€¦Ík[ÑH‚;h8n·¹¡9p ï­½ ³Ø˜xp€‰Ã ¸ÐŽcÈaÉÇ€‡%ƒ¶M¥øc€o4’—Üèt aö;D`e–Ž“¨ˆaàhIönh½¯mÛ÷ÏuÝ÷ï}ßø…áøž/KèéT#ZVеq]Czú ’\^„k"F·® …ÿ¼u¨)¿ñåYb k…Ͷ‚9BÀn~.J¢{ªØã¦.߸ ’xš”R™^n©5AuÔ’”!]̹²l©Øncl˜,A@¬[ØË!Æ ÀË…@eBОA ðÌ ØlñáÄ9‡PîCØ}âAˆQ"DXÕ;É%/-¥&˜óÚ{Ñj$ ê=f°Öšàì ¯x_¾ Aß|¡U´>–Ôhé.6T†ŽÅMà‚—QXDyo©ñÀˆ xÃ’ ‚¢$Bˆ"FÀ7HÐ2$ƒ”a#ÚJ…%׊> ‘Ø'â ˆrŒ9L©~V#ô‚3å€2ÌmÈL'Ò2G@!/ÎáÞ‰bLY1æDÉ™S.fLÙ3æ„Ñ$‘(”DÅj…º hi+צ‹tW{Qf-½÷Ã#+Œ´­±Ï;Àþ «ä==Ý’8Sìpèu ª :C¶A¸ò†™å*A4L ,B`ƒ pAÇbAA¥!ž’J¹Z“rqNd+RЩƒrnX,UþÀ‰§@S×D­A*¢R D&*DÒ©U.¦TÚSê…QªUNªUZ­Uæ¡'šÏ5¦½ ºŠ§&{aî8¹tèŒSª2¾¦ÖAýq\«œ„7i@EyD褂%àVØ¡ «Ú Á×PDW`JÚÆSÀ €¦²IÝ<›àÔ™" ’ÎX¸âˆ+¥I („kM&@Š  Ö²ÛÁmW¶VÎÚ[[mm폷VîÞ[Û|EªÉ&«saçM¢7‘ㆱÎ:Ê@«<ç q†\NÈÌúÈïð?®%ÈB€3öòRúbÀ–RÌYÀ`‚¦ÑK{ÀÀ¾O½øØð¯Äùv˜*#dpŽ… ˜ \ ¡C¤$`ÛÌ€ÂD6QŠºÜ•ÒÉ n†Ð Á}¿ÄXb\M‰ñF)ÅX¯bÓápYñ­r¸uR`>f9#-ŒpWŠõ@ªþ`0iäPrAÁ à LœDSjX«¡uœ¬%ø#)´våØÐÛ­o-ÅÃf\Í™óFiÍY¯6fÜÝ›Þ60ˆ”0>Rªk%úk>/…ô›Z‘²d.èPôFpÑZ/FhÝ£ô†‘ÒZOJ)Ìåà8ºëN—@°ŽÇ€B ^@B€Ž÷¯JêÝ]«õ†±ÖZÏZk]m™´¼F©` =}ˆØ@:ìPöF·Ù[/flݳö†ÑÚ[MW뙆á€;lÖáå·À‰ "^wFÔÝ[¯vnÝÝ»÷†ñÞ[̆ímé½÷Æùß[ï~oÝý¿ø–M¼‚p^ ÁøG á\/†pн¸wâ\OŠq^-ÅøÇšCqÞ=Çù!ä\’rTÇ97)å\¯–rÞ]Ëù‡1 €?àOð „BaP¸d6ˆDbQ8¤V-ŒFcQ¸äv=HdR9$–M'”JeR¹d¶]/˜LfS9¤Öm7œNgS¹äö}? PhT:%G¤RiTºe6O¨TjU:¥V­Á*õºåv½_°XlV;%–Íg´ZmV»e¶Ýo¸\nW;¥Öíw¼^oWº­fùÀ`pX<& ‡ÄbqX¼f7ÈdrY<¦W-—¸ß³¼æw=ŸÐhtZ=&—M§ÔjuZ½f·]¯µf¶=¦×m·Ünw[½æ÷}¿àpx\>&çeÅäry\¾g7Ïètz]>§W­×ìV }žçw½ßðx|^?'—Íçôz}XÎ?¯Ýïø|~_?§×í÷ü~_»‡·øÿÀ À, ÁL±ôÂŒ% ¬- ÃÌ4çAÐÜ=Ä EÄ‘,MÅJ3ÅQl]ÆŒeÆ‘¬m²QdqÇ‘ì}È …!È‘ôu"ÉL•%É’l'Ê‹i#ÊR¬­+Ë̵-˒켘ʒüÅ1Ì“,Í3ÍLÕ Ì3\Ý7ÎŒå9Γ¬ìÑM³¼õ=Ï“ìý?Ð ›O4 CÑMEÑ”l¥BÑÔ%IÒ”­-KÓ¥!LÓ”í=OÔ EQ³ÝISÕMUUÕ•m\ ÔÕ}eYÖ•­m[× c\וí}_Ø …G»vcÙM•eÙýwfÚ¥iÚ–­¬ßÙö½µmÛ–í½oÜ Å³pÜ—-ÍsÝMÔš\w]ÝwÞåyÜj$fߥô›†wí÷à%ÞÈðf€èg­˜8b齉á7ö‹ãÎ5`¨†…ck6ˆX–)d9NU•刂A`ÐxD& †CaÐø„F%ŠEa/øÃþ-fÇ@9nE#’IdÒyD¦U+–KeÒù„Æ-f€Sy”æu;žOgÓø«z…Ð(ÔzE&•K¦SiÔú…F¥SªUjÕzÅfµ[®WkÕû†Åc²YlÖ{E¦ÇLf”Aªåsº]nÒ+|ÜEw¾_o´&õÂýƒÂapØ|F'‹ÆcqØü†G%“ÊerÙ|Æ:Ù2·È.9‡E_¼Î4z}D ‚ÔëuÚý†Çe³ÚmvÛ}Æçu»Þovù»t{=¾âqtZ[ß•bÕðù|þ‡G¥ÓêuzÝ~ÇgµÛîwh †v‹Þòyiœ7¦]Íñú½Þÿ‡Çåóú}~ßÇçõ`€€€?àOð „BaP¸;6Äa‘8¤V-ŒFcQ¸äv=HdR9$–M'”JeRs4"˜JæS9¤Öm7œNdéäB%: PhT:%G¤RiTºe6O¨TjU:¥V­W¬VkUºåv½_°XlV;%–ÍÁ&ÒÙðÎÏo¸\nW9²`"º^oW¸Tñ½m¾`pX<& ‡ÄbqX¼f7ÈdrY<¦W-—‹ZföÈ»1ŸÐhk×iŽ‹M§‚ß°f·]¯Ølv[=¦×m·Ünw[½æÛ5k‡çw¼>&‡Ixâòl:®+Ïètz]>§W­×ìv{]¾å5ÎOû¾?%/åôLùž/O·Ýïø|~_?§×í÷ü~kýù§‡<ý@ÓÏÀ/[ÿALÁlÂŒ% ¦Oâêཤ4ç@pÛÑCÑ EÄ‘,MÅLU§P²dÿ(§dqª,G€l|²ryÈ€!#€”•"±‘ƲˆJˆ±ù+€”µ$ÉhÊ,Ë`Tij*,tÍÄG±ú™¨ÇÌä3¨4 )=€ü4 pvЀéC€u4jLtRà$G2,~Ó5èaïOÓG¤¡))‘™SÕMUUÕ•m]WÖ ”\•Æ ¡[€%t•èDX1a«¤Œ–H1Y€ÕgÕ§u¤6¨[Šm¢Åõ¼)\ˆ±Ås€ÔSç¸Þ5y€ ì÷Ês߀€^¦Î cƒ€¾Sê Í‰€F,ÚøÈ[c€­€.D‘9(fe…•ÛVâ*dæä¹ª†€ætÙíA)5c¡èš.£  ƒAá˜T. ‡Câ”N)‹Eã˜Ôn9GäŽI%“Iå™T®Y-—KãOù“þPÍ›s™€¡=9( £A_4J˜~ÓÀê]UÆÒ¸1WÀ«^Ée³C×®ØÛá/»‘u6/ìR¿B^8· áÀlT‚’2N i\Zí¶ðíšlÍ´Ö6ŒÓz»Yn¸S@ÌÛ«ygßEüÛˆ+qÀ^Te‰Í :ÇNüë=¸÷À_7fà:ý¯®ÌÞ÷N'[ÿ—Ïéõû}ÿŸ×ïùýÿ?ð@p$ @ðDApdAð„# 7Éšh“³¯€f§¢‚~ q(ÄhKc€lR‚ª¥r¢©¡tb±£\Í‚Â"GpñÈòÈl“(„+$¶®«ã± ‘‰Ü‚€Ò Jè)é-ºS L`-3.+™ç5,Ú³€wÎ`Ï;9"*IòtÐ`3CH #(Æq« M"ˆÌÐ÷ÀMEÀõŽÆ‡(aT`³S!'…RḨ WKó ü)!&ejõÅKSÊàb vWà_a5ŠöB pÙ`—g¢Ú¶é„0ÐIim½º ·7\h*ä}€ýÒó½"­Ú ׂ9ó¬îÃnóÀ‚:Çà5h-¡)˜Tž-\5 Ryañë¤êX¤uxÀ‚ªTé#Ue³Œä9C**udàjfVZ„ßõH‚W•…*ñ¨jž~ÉE çàp't¼3 èúF“¥izf›§iú†£©jz¦««júƳ­kzæ»B©ªnœÃI„9ˆÄÜE!(*mô󉂛 hnà½K’òoíÃuºˆ.îh÷Üb ïà™É*‚Éâ¼°,CPMóà3ÑMó>MÏyAÕ×[š¡9h•¼ïh"êdgrõ=Ž˜0~ ¡/ŠþB |y`åçw¢_ˆ.iâ wí ¸8Aûàƒñ  'Ê@Cýt‚Ÿ€B~`´z ³0-ï|3à 0`„¸ÁÌ£ ¬ÏAé'aÎ_KùðT(µ¢ÈkªìÞ˜ lAYÐp”ØP+'kXÐ’ÇZ&€«††8pÌ;_.A˜0U"Sbn ƒR#`Q H`•ýˆ52¦ÂÄS,|%@)‹„èŒàãb€H hÐØþb7eHH4Ž€;GwÚÀÈ!@GÍèA³ºA!X#,¥[p¬rNY6B‚ù BY8êLi0´›]c¬ðž2 2ejGì´CÞÿZô«•’¶WJùa,e”³–’Ö[Kyq.eÔ»—’ô«¶.؉/lÑðIlË”‘°zÌð.æYc…㼑+6@}WAe¾)º™S8¬œ‘,-Îw(å—›j /Õ-Ç@h$€/Â{Û>@ŠŸ€!Oðæ ã=IˆØ¤–¡“*f Ú ½bNù’Êœ£€Œ7Ôû¹t.Îä6<$rä‚e$vãr—:@=g»p"@ Jp'€4ô"`sF!E(âÖ£PéR@Tº ³Vy°X‹CUpDìá5 úÀAg°°³GåMƒõ´:à±ÖI¨B +22«ÑK)¡r¿EAODž¬`Øp¬Q nQ‘ŠÀPq Iº×%£¦ËÑfgs÷ †Ø¤$ˆ­Oª/ ‚)EÞ¼Uw/Eñÿ€ÑlV‚Ò³! -¡·òà «‰rGÖ!À¾¸“†‚ÓH Ë€0¨¸*£Út­EÆ€×»“‰ý›ÁKÙ¼„DH¦KZÉ’ÁXvA1&B(á¤ìŸgA¢ÝÁ¯i.0h²ª__ü€pÀ˜`|‚pV Á˜75Y€IÅþ%Ó ‘hÛ  XvÇFh×7í…²½ÐŽÜSŠf<ɶå\Á'Œh2Âä¡HfVzÁ«Ù{t®Ï0†£½bS#“Ap˧'CЀÌ*Œ>kL@ݳ¸wDOeóVkapvn0Æ5Ú@¿‰ý@‚ÌáL1Ř³VxW¡”½`ˆNÏ„4 i/&^ìÙ®¢ÖL«%à 1<‚‡ €¼E©t…ò"`GˆZvvEkªwVD&|ŽÛÙV@ÇWâ¡’ä«((`†žSâø†ÚôhË$g¬¡0SóˆIFàˆ€6”Íb6 £B1/¶@±ÛŒù ÍÁxï-&ÎX¤T€ÌȤLÜ[µB¨vÍ3Ç­¡6ˆEW™†¦ÙJÝ¢‰×†NAvÈ—Ðгl«ÏEȃö[„HlÐï‹xÚÔŒˆ[ÑPNu` ñ ’Ûû0ð#䜗“r~QÊyW+圷—rþaƒ0‰%Â|ˆ–ád|Ù›ø(ÑEܼçY>çëùÔÓê£ PhÆ6Aˆû(‚SЂ_Svqþ8‹"xO·|"¸ùžÎîñÙÙTmvrï@ 0öðû•â܉˜‹Þ𧈠uï€ ‹ü9‡žprÚ‚çÇânŽkX"Q ãíõÚÊÝà^­;>D8dnÍàÍTâ ]¦3fëdE¶Í!vï«°ˆÇWmJC)pÜãUtÕÝ[«èý­xòê„1yÏ [ÏÂó}jŸËÎõîºû€}ú„uúŠ¥“ ÀØß\‚ÏÀiM9ÉÁ¤{7â¸f'»> £*&È/@ãÄákÍ}û­C‰ÜGN4ó; <À9¸JEˆzÕª>pЊ¯“Š¥‡˜xy]@»[ºˆ½½è–9 ²9ŒA AA,A<ALA\£™‰#šÀû ‰òcCOµè{=SÖ&¨)ºœ”ÒÀžê¿ã¸Ëðà&(ñŒ®XP«z¸´@DŸ‘ú,+}4€<)ê"Œœ¸­>{ï¿ ”p,4b¯;!æ,j¦¹\’{ˆ+)2k'²ÛrBúâðT?(8D Ú€/D+É®±$¾|µŒŠL1æž{%{É7!á´˜„¦â³8‚œ¨8¼CÅ5p´ Hb&ºÉˆƒÝ¸s¾+¹[×;Ø ˜‡± 3“Ü.C0 jK<$o¨’ŠªÉ¾@“¡ù„"ù—€3'’HˆC£©«ÞTF¨4Ñ'êK@½ò%·2¡Kþ,hÊ›!@—ñ5¯“êêĬ\-b‚Ø,³±,?‰é޼]20JAì§ V;£.ˆ©„\ ·@¹¦Cˆ7“ô¿ì|6ðµË”¸Dc!0LX‚¼“6/Ђ¡(Q¢Ú.ÈQ³;ScÄÃ÷B` "B%"×·J˜àˆÃ” ™a—¼X³ÒÖCkLLt2º"ËJ´:ÃRÀØ„À¸]CT)3,b6c H5Î(;8m Àž!ãE|½:«„4AÅèFΪá®*àÌ9Ï‚L€ÃBKñJ°‚M4µ1 t²´¥…<Ù(9Ïz¾ Œ~Ô¼­D§(2÷FÃÕ‚è‚´$R¿3LçÈ;…zn-{è®ô¸ ô¸°‡’êpTTH•@ô“н PÍ PÝPíPýQ ÉH‘IY²ÉiA´˜Lÿ+hOˆ,ï5º Ìß=34õУpL33R”[PŸ@De(àN9ôºñÍiÎD@ƒƒ(1£3Jé’ ó“ &ŠiÌJÞOº)8‚=‚>XiÂ:怫T½iS&ñ@ܼÅÿ…Qˆˆ$Š©¤ª€hÂüØË"»R¢¤ƒ¤Ö3Ìàà D£B3£KiàŠÊ5! ðM\Y«Àƒž¥¹Ý$R†DÃϯp†! UÏû N ã–À–<“ÒÂñóC1¶ºt#‘¦3·ƒ O4½&…)7MH˸„Êhˆ$µ<Ø„Œ V£j7»²ªÇÌP•H¡O(@@Ä2€{K,³ÅºÀ2ôˆœhÔÔžHzùÐp‡I˜jÓC2%AL0¥WmwW}xWyWzW­{Wu &%Xƒ·L"ªÊˆÊM„í„ §[fI­[?qš<0\«*³¿B?$ÒN".LÔ•6ÃD’YÍ’xÒl_Ô¥Ãüñ+£×µ$ZQ¶•ă5<’«ZֲѽÀ„£c¶ÕU3a‰yQ†2øO"ZλµV%ˆCcƉîÚªpÚ¸»›NÓ¤#ô?(‚¶=¿ˆ9‚Ì•…V/)˜\©Íð-E´Kä{ &*cž@dAdBd.Cd>,cŽã棆`E­¹2K)¥ðåº" ´€Pùc–C[¥e\ȶSe_°öÞ&De>TeNUe^VenW9vEæFe~Z>Ne©§cþ\eÞ^eî_eþ`fa .XˆÞYæUe¾d~]f^gf~hfŽifžjb¾bˆÖcæ®@æVm&fæîpgqgrg.s9^kˆÎlç>+fævÎoç~ygžzg®{g¾|FtˆÆuçÎçve.^.€è.ƒh>„hN…h^}ˆ¾~è]ähˆ ^xèž‹h¾ŒhÎhÞAèh‹h~ŽW†‰iè®’i>”iN•i^–I®imi™¾“i®œiÎiÞžiêZéxŠiŽŸIšj†é¾£jN¥j^¦jn§¶ ž¡j|j.ª¤j¾­jÞ®jî¯j樈–©êüƲk>´kNµk^¶gv &°ˆŽ±ënºk®»k¾¼kνkÞ˜&‚ ¸ˆ†¹ëæÂl.Ãl>ÄlNÅhÞÁˆ®Àˆ~Æì^ÉlžÊl®Ël¾ÌeFȈ˜€€€?àOð „BaP¸;6Äa‘8¤V-ŒFcQ¸äv=HdR9$–M'”JeR¹d¶]/˜LfS9¤Öm7œNgS¹äö}? PhT:%G¤RiTºe6O¨TjU:¥V­W¬VkU¹|9›‰L`pI…zÀ3®ZmV»e¶Ýo¸\nW;¥Öíw¼^oW»åöýÀ`pX<& ‡Äbcöh¢Å˜ã,8ª›¯,}æ@üàŸ†?´@F–Ôe5Z½f·]¯Ølv[=¦×m·ÃèŸÀ~ôúàÜ<öƒq0Ýœ¾(,èF¹<·¤X ×û@wßðx|^<K0±äaøß$øÿïÒi„¿0Óí ê¸B[üçº/lÀ, ÁLÁj”ÌŸ`— ¾'@ä ì€7 €¼=k“ nDŒÛ: E ç€Ä\Qˆ ƈÑ뀴t1èÈW!€TŒ™Dq+8€@-JŒ¥)­O4È,¯['*%xÿ ¡Ä3*ü¿c›úÿº ¹7ÎŒå9Γ¬í;Ï‘ù=€üP€/C¡=,Á%ÔŠå…,TÈ ƒqTY„eq´qÀ_U€Í\„P'ÀxV€§[€mtuë¸ï%´¥,XSÕ9<ÙM•eÊÌzõ+ïe–KÓ[m¹®S™4Íd´@†W(v]lSlˆe({Þ y€EísÝ'ÍõVÕîè26zÞõ"5JQG=åzIÕýÿiâŽ%‰â˜®-‹ã ៀö=^WÀ†D‹(je‰•€!@PW˜ ˜VH<Œ]ÌÕÊyN}"ÈèË {h€à{èÆ|€è€Yñ¹êú¨ëÜ8뀨fˆ{h׋†â4ÈV‡…»}1l_ˆQœjŒêQÌwà=½¢Çnüp’ˆ…èz.Ž€P0& Â@ø`ë-â@¬TŒ±°v={H@y 'G#ò¹d¶]/˜LfS9¤Öm7œNgS¹äö}? PhT:%G¤RiTºe6 ¦ÔYµ:­>½_°Oö0¢ÌÚ@Û`{o.@¥Ôs¼JÒ×»E©¹(p@V^Ä ˜°?cä@  i—‘ÉZ¹¹\`7q¹à”6[<|Y§µWЮ´Qìs/yXCl1î@må>Øý5¸@ rl<¾g7Ïètz]>§W­×ìv{]¾çw½ßðx|^?'—Íç—.½@—·-˜›?¾[0/í+V~@à°ÿ¥b p(jAôA€ù€¬$𫎥gd4PèžÍd= E’Œ\…¡¨HQ¨GúE™@×€`µ­¯àN4áb…#ç œòˆùÒ#€â„@½-€½@ɲz€´Í BÌ@<›˜Óluˆ“Œ3 Ç“|EŸcÝ‘S•1ÿ%Æq¨5Ç19Gç\ƒ·ïÛû$ÐE-KÓÍ5MÓ”í<¤5ê f«*êuB®ÓõJt±í!Ð 5Š"‰£ã½lÕÊî¼£ëÙ-)¾d„Â0ÂŒ6À€`Ù€4vˆˆ ¶« É2…V[€]o€ÍÄ7*V]3u€íÜWˆ2^ˆ¢,s_„ADAÝü•˜˜ R€6 ú¾ê4©H^IHìðb“ôYUcÎ5ã˜î=äE‘ä™.M“åñ÷•€†\]€æŠ;òVQòm¥wX2ºhh€væ”Úl¡)úˆj”Ë3‚:Íìç Ó¯€Åo\0•Ÿyn_Hº2˜áy"ºã€qç™àc¿€ü(_q‡rž ^yrì¿‚i¼Ç«–Ó@nMI»6j€ÏD =*W3s:È#­ëºùÓ°ìvø]ÅmQaDm¸–á¹^'×Y»ï2|£¾ïáÁå>O•åùžoœòÔõœ«ÐjÊ¥QyôÝYW—¼`&%é@à7ýëõ¯KäebZfœ~–ªUý«•ÌÿÖ¸Ç2fUŠ€"à@Ð, ØPºLTBAV,¼hm'ÁС°6O…ÍÅ ÉXš…N8©(\<1(l-¦ŽåÚ»ÆÔ9©É&ÜgÉ8R¯f"DXâDI‰Q.&DØâƒÊrÉp®4*æÜéFfàÅÖüà s¨YݶñËÀmÀ$FÒA 2¯D¨\•ªR$-Ñ¢6f-–2çoX“…à^HXKIhÈ‘@)€#ÀÒ’@sÉXa ‰tqvÁ7,D#Ws†$›:!³$dœ èŠMG2 †üv"j˜.—kÄ?pŽB‹É)$L‹‘²<#E1æDÉ™GEèªB°S&lË;nM=à亗`~›GÒ§0ûUùi5²4ÈÀ'>üénnø¼P'„€kjZ=²>-gÀSì?@TùßI4Q¥7FÐé4`¿Åp®£™´M†(äøí'é⊀…GÐPØ%ðñòÈé )DÒ¥T®–RÚ]Ké…1¦TΚ<¦+éÀ[€ º|ÍÔ{• •©]QØ›j —°±çSÀþqåCÑÆôü¡µOk‘sQȺÀ2¬Qñ´Ë‰FGi&¶[à¹.5βP€¢øBæ¾òHm¥“®RO¦IBšf4Ö-à··K]ÈýVIðÔUª¸ãjô^¬@ʺÖdrÄ«Hâ­u¶·€ÚâKk˜ù®¶>!Ó[]kí…±S³D¦½ELõÕE²)³P³Íg³× )z_+ø·.¬[så«20¯Åq šÃXÃ=ÙžKf.EàíwÝ:gP¼ËyÉ]Q²7ZÁ+{å@ÒflÖ’“*KEÉôŠ4 »ü•‰z+,ü %UI€Öë`܃ð†ÂXO aS¥/cx —$ÚäÂFÌçbÛØp@†¢–ÚŽ+€ ÅRè›CÊô»…ê#O4” cY=GÆBvnζ»>Fjºu2÷—f}A#[½ ’û,.2…ƒL$ÕËØx±bI›ˆÏÙh§'kŒ1“IixÔ cz@„€­×³’ß;œ€“ò˜È¤×#¾ü-óÆyÏ6Ц[g¬VÞÆz'öñWÛæÌÂÄV‰¨—+èXVÄ€{è¶.ôÄ_xÓÀ4]³+WÀß×…ÌÞ<‡y¯E0w®ƒWe–³n¦º 4ëRWzèãfúì¶)8ðì­P£„ë%…`¬©@ µº flݳö†ÑÚ[OjM|Q:)ÈäÈfmÝ7ò“•Ä%W’øÅ+òZ%ÃÃvÇ»ÍÙ½º`ý^ÊiI]7§8¨?ÜÃN|¹2òCG¶h IxàáY†¡‚C‚pÎÖÁx°B?†Ü†ÂÍùE" „g¹"ò<|I»Çô‘z·|ýõÀwæþ£œ™!8*éàü'…ÑεzAèO;>¼ý4-ÃÒèd¿B=×¾Ùˆÿ(¹W1RÜu~³ÀÚÐ\&õÓé–³ f·Ó–yÝÚÁ¨µ"-Eúžó†[Óªè-ítÔLv€ùÝÚ´{Ìœâ_@° ïÇY¤™*]Ø<ÓñV_.( 9þíw¥ù_-åüÇ™ó^oÎ1•KÙ7;cÒñé÷vðªþIÖýƨ÷).h#tX±œ’ FÆôãâÑŒ˜è[ÁìŒr7W5CîÛàäX´|¹…móáÿdêû‚]G£dvy×O,DàþÄ7Æ> {„•e¶áKÐúÂ&)@™ùÞhjÖ¤xp‡ï È߈Çtõ €ùoXùÆú¤ò ˆú«€.‚ìvI°gïºì¾Ä;ð1#¦è¢”èâ—0é­ ?¢\TüIÀDIöJ<¤ ,‡…b +„¸Ð¡@Û¡˜ì­@í>ŒML3ÚíãFÕŽä²<ÚáE %œCgÞs — àð¤&ï ݯ–ØbŠ}å’—JõP½ ðÁ 0Å pÈè*8œíæþÂ>â­âÊt[¬õÃõ¢^T­®ððÌ*ôü£ bÃB>ñA¦ÛìÂ#æÌþ…’àŽ%pq¶± ôÏPöÏ”>>À;kœº]1(‘¬R‰$¾OÖ“ÏܰÄÎ&PÙéFMnvœB_Q ;êé(cb?©ÒÜ/‘—ãï`; [ ±™±œ&0@)<)Q£â~aoì¢%:råJú±­ñÁ1ÅqÉ­¦ÉJ8Ý ì¶¢QMÑ"jT¥ÑÖ' 8}ñd%Å"þ‘\'Ë’À‘àSä$QèI‚sâ"[jé±Í!ò òѪ(Ѧ)2'"21#25#r9#²=#òA$2E$rI#r.(’*T“$²Y%²]%òa&2e&ri&²m&òpé ·.Œê²tßÒs(2…(r‰(²(ò‘)2•)r˜dÒN(rR(òŸ)²©*²­*ò±+2µ+r¹+²½¦(2¢(ÒÃ+òÍ,òÑ-2Õ-rÙ-²Ý-òà9²Ê'òÆ(²ç.2ñ/2õ/rù/²ý/ó00¿.â{.¢ˆ5ó131s1³1ó!23$d­‰ŸòW2s53s93³=3óA43E$Àv°¶ùFl Šõ “XA±e!‹Tù*ϲ}3véSG7sy7³}7ó83…8h£­vétÀGjQOÀS\#“ž`Œak’´22Ç«'ók8“¹;³½;óÁ<3Å0Ù>4!S¨$¨rSï'³ó"Ó÷@”9C´=CôAD4EDo;@ÌmAT§.róà”“âÀE"¹4+Bð;6óõ74IGtyG´}GôH4„ˆŽÈïf$an°râ?AÓ #‘0ÁSäT´h‡S·Fô3%TuHtµKt¹K´½KôÁL"Œ­|Žnö”Ð Îû§ϲÀˆæÜ°¤ê=*ôþŽ1F“0‡0”ÅOõP5Pu P³@ìˆx²€v§€ïsâ‘©4êNö)'ÞötõTqCT³PÕ=SõAT5ETuI++Öan"%oåIñ\ñô^ïôòØ õŠù'MÉS"O‚…OÕKWõX5…Xu‰X¯,½d¶Â"xˆ(éIìÀ‘d¯IÎÇLŒoJ´÷St±WU\5Å\uÉ\µÍ\é”a~h„ñð¶ú±\ÀÑF#•^„XuðþîV†5¹ST¯*T7] `¶ `öa6<ψªóާª~Ýj¹3ã>5¤-PÙU!QcmÒɪK_õw[ÖS¶d¶MdöQe6Ueb`ŒUpqf0áÅJR)4À•]I¦ >/–|¤ÉŠØÖA"–E,–e–‘i6•iv™i´ÂŒMç_Žö#çj“MÑg6v#à½k  –ÀìÕÀ(µx(5}iÖÑm6ÕmvÙm³&Êp~ ìŒe•f¢ÚÞobƤñò#¢ä € Zö†(¢€?àOð „BaP¸;6Äa‘8¤V-ŒFcQ¸äv=HdR9$–M'”JeR¹d¶]/˜LfS9¤Öm7œNgS¹äö}? PhT:%G¤RiTºe6O¨TjU:¥V­W¬VkTUÅtì° 0:Í~Z@»`o·\÷0ÌÞaÛà°Q”ƘC™¸AY÷>r@îTi~C™°<Í]%oM§ÔjuZ½f·]¯Ølv[=¦×m·Ünw[½æ÷}¿àpx\>'º¸;y@O4 èBÝ0§¬ön Œî~íx½AËÞ0?Ìzq“,<Çë¨ãß`¯ÔÆü°¿ü†À€;€<Ð39Í€ŠqÕ ²Œ°3 §G¼<Aç0Ä€%£ÌÀsÅ€9¼ D¶h@+£L‘òqèH ”uGÒI­QaÏF ÌdâÊŒ¥)Ê’¬­+Ë̵-¶ïÙÐÓ <ÏÒ`8—óH‚€7§Gtäþ¿àÜìí£N™ýBü bŸNGtèw€°7<#§…OQ4P’›”¨Sìʌд=;­îä¹QÔ•-MS Î®gf Ñ¡/qÀ5¹D ^\ÛUW…u%VÊÚa0v‚€ Ѐ* 5C€¨ˆ91âàlh­Ž€ò`§$dà$¨(–ÈŽ"f"M€ ‰È]<§ò'…ÁNÐxLŠˆ¾¢qZ]J§TªÕªõŠÍj·\®×«ö ŠÇd²Ù¬ö‹Mª×l¶Û­÷ Ê¥9\÷€.ö_€,ý„:ðà^(Æ€ f:¦^Ê€ ˆü„gœ¹€úýÍ›¤gyë¿VGë€]ˆ£´¾ßìnÈo¼97à§ ‘ÔÙ\\€K–Hs€NAõ€žÍPõÜÀ`¤0—Æ,y¸<4¯¨§ö÷€@ïÈ=ú€¿€ö>ÿ€~Ø3àpü‚€B ¢T á5€ô…˜6†•DPœ ˜€0"0÷‰€") ° "õP€Œ€¢5 ˆá"'c°¢ˆ©Ù@VE)!"3$°É“€T^ ©Pi Ц––‰,Ì“dùFS•eyf[TÎÉ¢@"¸¶/ #Î5:#x寧yâyž§¹ò}Ÿ§ú ¨:…¡¨z"‰¢¨º2£¨úB)0ù¥€n™Y#ô%é𡨜@"%‰Úp„ª€V­HƒŠÀ«0Æ­ ˜,ø®™vdw¯€ÁWCø;¬p²¬ Sy…€´€FÕ+£â²­&¢ÝHœâ@,.0¼¹€é[,KȲ€û0U,ûFÓµI¨¾’”¬´¿•€óÀ0tˆyÂ’d¢Ø‘ä”ð®O”2ì±Îë&˰o+9ç´{RÖ¤r<“%ɲ|£)ʲµqu]×™†€ç½"nN–%‹c@j‘’ÏFY˜ }¦gW6…¢\¦J¢Š;tð2 %X-Õ³å’=K] Í€!ö0 Ù¨òSi/âÐ-v÷éüw4AŽÉ×mÝ.·¼Á8+²gʘSE|§w…œ„ŒåQCQÁ‡i¯> ö›Ã»\ãC*o U»'i-™©c´6¶×Ë r68ŽD0_ãÀqúD1ý"C…H]P–«•y;c,L1®7Ç2$†U25@=e`½„´ˆ‘œ`¬bsÅ:ÈW2n4—xàCc¬›³~pÎ,š¹ÆSµ†¨Œl4Q¼¿Gèóý•*QØÌdžŒjEˆ.-2Ũr„< øGÏv$˜ªXí•¢²ÜÛºþ¢ñøuž®ƒb°6õ89Š``«€©ÏYÜyˆÅL+€U}ƒÚ¿p4ÞH¸°ä\«˜XÍøä­ ¸y¦ ¦ª9G0Qm@ ¶ºpjr,{·ž-ëœÉ&÷N›àô'få¾›œ©m–€Ô[[l;¶öîßÎ[ë}ïÍû¿·ÿà< –<¥î`Áö’”DÊå“ qLID(€Á ª!q† ,†òcXëØ¹>0*J¨B£ËŸ,*uæ%„Õ´€'F{‘È6§É¹A>M»€µ~E‘T¯8ŠPÊ0Ó³R¬lP«sÎJ‘¹Ï)àk­õηJì,exg‰1s´«XÓÖA4 at=‡¶%,d5 èöD£üÒŒ³Ò¶œ€aÁ‰…p¼PõV Ãà4#F4£N5 9üEqfÖt Ì[I»Ì\î9!ö!Ä1IaQÐ5µÀ©)¡UÞ¡ÃW  ;­ß LV}IUñð–@šÉ­á+Bå4@ŽBž‡ãmoW]vEn=£áí¡^? ÄC ÐÇÐ ˜Q¨…xÇáÆm`ðÄ™˜C$N@$V>¤`ÙÔ–5dÎM$Ôj]~/“Ù¢D)áDc£ª:– ŠÎ…¶)bÀ£`E„aÂî,›8tIP5å]ªG%gãÌßΣ•h£™ÇE}€ú1ã¾Aq±L» ,—íö wYá¯ÍA°baL^½±…ißXÏV@˜¥J]•VªäTýäÈW“”ò8V—Î Å\MžI\­:A¹…a¦¿æ$l¦.F¤Úi&–i¦žj&¦j›ö5ßÈX‹±–VÐÞí“ÒîÖ˜IÏèÀ+æü.:ܽ}$B•ˆ˜WÀvs&úp1Þà"@DÕæn"*šäH„\1ÄdFÛ”nqç$X'0g8+ĉàX͉á¼7MaIP\ý]ä™XMù™]LB§yg–sfþzf®€¨j}ñÙðÍK±ÙÌèc¨dݱ`ݹZ0[å\.RŒ’ƒeàáŦÅNGgjy€¨•¾Òa¦œSø‰ƒÜ´«aù›è¶‹È^ŒhŽ(推î(ö”»¾Þ$WÕåìÜP %Lá®Åm#êB€Œ¢äNm&Òéf–©lZa,vŒÎ$öƒ òƒŒþ„%hRäì[è`Êa&ä»NÇìÌÖérž©îŸ)öŸ©þ * Ÿ–zÏHáéæ ê&¢ª,žéxÙÝÕåžÅý"i–¨F¼g¨Z›e&£*~¨*†¨ªŽ©*–©ª‚©À|Ý®ª*¶«ªž£§Ò_ZaÚ*Tdj°Hš¦hN†hV›º›ª¾±+±«²+&²«.³+6³«>Ÿ*ƤJ¤†ê‚!JjèC*òQ)ªQ«[k´+޹+–¹«žº+¦º«®»+¶»•ÈNžÎNK½.IÙ†"O©’®hĺ¬a6­!<Î+ä% % a*ú¦ë€[+ŠÄlŽÉ,–ɬžÊ,¦Ê¬®Ë,¶N‰†©²˜”>¶,fÆéžÇay¢, …êzËlþÐ-ЭÑ-Ñ­Ò®…ò‚!®‚«â˜Ñο*îëvÀ+~Ï*vÁ-^Òmv×­~Ø-†Ø­ŽÙ-–م¬i‚¢$ò-F¥¬r„¬éÜM*ÀíÊÙíÞÞ-æÞ­îß-öß­þ±kHƒ™âµLâ‚*á`-N¶íV¿ì~Ök Ï®ä®Nå.Vå®^æ.fæ˜2ÄçÔBÕæ­†.â¿n2¦¬îÝ,öÖînë.¶ë®¾ì.Æì®ÎíÎà‘¢á¶¤ÙöOÚ¶„.¿®žÜÑöêîÖñ¯ò/&ò¯.ó/6Ë«Ä^k̲ݔ|è.Í(6¾éšéjbÕ®:êmjݯ:ø¯Žù/–ù¯žú/¦Žîv¬î~î­¶õÌòÆ¡VÜ*öøkãë†äoªÿ/öÿ¯ÿ0° Bl½öìÆÖlÌÖ/Î¥é¢ð…Ƨ.Bñp0W°_0g°ní¯A è_í8aoYÚoglæQE³oëðs °¿ 0Ç °Ï /ÚHnÚѺü0–âojÕ/rãn¢ñ/ß q±1'±.ÃîÞ½Y箥 ÛìâÜp¤Zð®Èoï1w±1‡±Žž¯²•é_*ß0šýow¡w1DZÏ1×±ÝÀ—9ìÑšàêFûñJï/Êï®*ð.šÇ±¾Ýqc2/#27#²?$2Eœðz\ëÒõ w Óoo²ðáw"²K(²)2—)²Ÿ*TËšlã Ñ­ËæÒ:²hqqZý²„Z1hZì‹*rÿ030³10ÔˤO+bÉ^nÙMž:®o'…Ä@@€?àOð „BaP¸;6Äa‘8¤V-ŒFcQ¸äv=HdR9$–M'”JeR¹d¶]/˜LfS9¤Öm7œNgS¹äö}? PhT:%G¤RiTºe6O¨TjU:¥V­W¬VkTUÅtê° 60>Ì€¶ºý„oî@5Ô¼].ÐrõôQÀØ8„Jc‚L!ÌÜ(ηÈdrY<¦W-—ÌfsY¼æw=ŸÐhtZ=&—M§ÔjuZ½f·]«®® ‡·wP‡ÞöÚê[ÁOä^€Ü  œ1t@þ 9×ÆÌ±_g_ßðx|^?'—Íçôz}^¿g·Ýïø|~_?§×í÷“.¿@ôÀI¹®yõ€d¸®nî¼€p‹‚¸ð¨=C— €¼<ï&Û‡¢,sñÅLUÅ‘l]ÆŒeÆ‘¬mÇÌu¢…Ü|2È€Y#¸Ž1ó%€ä ¸k«—)ˆà»-€L¼“L‘_%îìKM“lÝ7ÎŒå9Γ¬í;ÏÌõ=Ï“ê¬_ÐÉAÀ Ç2´L’Ê\±BÁà+™pLSDÖí1MÏÕ EQÔ•-MSÕMUUÕ•m]WÖ i…Y€l5ÈW”8v ¹K6$« 9 nY€§g€5¤ö¨x[2]4%ÓS XÜ ÅqÜ—-ÍsÝMÕuÝ—mܘ—ŠvÞ€å{×uí‚€UýF¯”© œ ˃Èr(Q…€žAP•ÏNÍ1%¿wãÎ5ã˜î=äE‘ä™**ce<®´ÖǘwfEÒ –|€¦t7§Ø h  ".d¶nÛIm¸–ÛÑ6M§êŽ¥©êš®­«ëε­¥9Ašà9Vxß(²Ÿ}+XÌá:-ülGöì{Î’–iif›®pÁðœ/ ÃñOųzó™cXÖKŽä€<¯'er\”©É û±ý&Éæ×E½¥{êW¿ñOUÕõo]×öeÙÕœt©c8´e*snO3ÊrÎu µŸ½ùãµÒ%]2UÔvž¥éúž¯­ëûϵí§]²íÜ8Ü‚ç*l<–ÙÊ€ е-’w÷y=%mâ–î-§{ŸÏõýÿŸïüÿà€J¹ï·Àî‹›».Ï”ä¾w,úÞ#È~)æ—œJ^Sg¬Ù&AàBÜÑvrM†ÂXM áD)…P®B×]T{¹Qð(åÀÅ•ŸL}©> ¿&.ýÜ~åJ XŒq´wÄÒ"€ñLجµ³œ1ïÀkÅР`ÚÊeˆ=°ÿ½¾‰½|@”Çô^½>›RývÈ /×!uðCl¯Sü j@€ÿ€ Í3'Ìpˆ#B€ã €4„¸€LD‚´ n±HCÅ€!£%dQª"RGQ¢ªò>È‘!´ š€<”Rkó'ÊŒ¥)Ê’¬­+Ë̵-Ë’ì½/Ì Å1Ì’Ù3ÈR"(Í€O7¤¡Ää¨(O ñÓ=€Üü FÄͰ+ÑáEÐh)ŸG€úTªÎ¦„ËèûHóÔù?t©P‘=+ÑTeH‚œ•hBV =àW[€muBH{_€½„Ö*à¹!ÙÓ>Ïô gUÔÔM8Q³-­kÛ͵mÛ“c>ðc²ÜLÅÂϲ( Fı-kî»x¶±:NÝ·œ¦}߀«€6˜-õ‚ ë?Äðqò؈›÷ö.cIÚ²‰ñ€nD¹(4eä ¦&]M ±™ä9´XgÍ&BeèÎ6eS˜ß£€•JRƜڈ'ꓤì„zȬ“tŒƒ•¹à“±€fÌ[HOmˆÌ> QÐ ªo½¢£6ü<¡©\(JñèœU»Çò%Éòœ¯-ËóÏ5Íóœï<…h|Xˆ‹8 õŒç:¼ò”=ˆG™ú·[…wÝ€}TÚ˜À¸cçÀ&ùSåÀø}ÀÐA×逭 Ãi'bDöt‡YGÙwÞ÷ö˜áP­Ä”3û;~#ù€¯íô€ïô†–¿ìV‹^*ÄXÅÄ—¶÷]«ßwéÞ;áð;sðN AX-àšß\+˜Ê®E̹ŒâèqÊ­všƒT¼ y±6kДsp½ÍÜ+JcÊG ÀÑìÉšP0ãˆâ qÄP-"Cò~¡s%!Þ«×1L&ÀJCEYZ.Kâªm! È=1ÖÒZ[{„%ç€éâPÑÁŠ;Ñr0 Œ8 ÈüœÚB!Ôk3 ÈÐ'$í!¢âJ<Öà¨Q“Dº†pN›0Œ!±|R¡äA!±È9DP*ÊÔ&…C,±$£JZÅ-Àª—@KÒ Ç S ùŒɃ.fLÙ3æ„ÑšSNjMY­5È3¡pÌ&MÀ¬á9pü†6¾'@-Ãp½çˆÁû2z>÷â§Á SìõÉ 'Ø PEá‰z ŠQ”-Û8¶rLÀá!žSÐ&O`ì'ÀNŸSðõ©$O@Çf¡´ ÚLý) §Pi€ÃKÀ8ÜGÌÙðD§@J ˆ+–< ”Nz¿ /FhØjŸ´zÍŠSê…QšphÎAÃ-ÌÙ1„F…uÂB²»¡9­^0¨Þ’x\naeJúSøÃäÐQoMðŒ, -ëÀU£JüØR”вÂIb"‡c0ud‚×e$u‘\`>L`¾M™} `Ù×Éì±ý?ê­²€Ð%©ÆÖ3.ZjåŸ,$5ÛPA„½¤T.ÛÐîelä=\P3.CùqÜ•kKဠúê81L‰¢òWnìQ0&owŽ#Ä™¼SÇ0S pÕ+Ý{ï…ñ¾WÎú_[í}ïÁ&›H>n…knS-†/§H' ­€!`ÐìOA`¬8DD @.ò4°¥ÒB,ÀàHâWˆÈ0hB{2vRA†1¡Î6…‹È€GðΑØ\ƒâƒ‰Dˆ˜£˜-k@*dÒ r….¦[†*PH.9(8sœ}ˆòùÌY2fRMU ‰“ƒ«–¬—Z¶hªñ¦¬Âå ¡jö$Ëá}%&J;O!æÅxÝ {H¼Þ–ƒJ>ÇôX!ì™ùˆ@ iP´À :l·àÌ! òz!ý1ÉP"x‹¯µÿ ÙË<õ€šÏU‡L°D”¹—yUlfΙå8 ²èUL)ˆq…Ñj-„S²á‰O$©N‘·VòÁHûoYkHelHÂÑ»Cà\ñV†ªÔ†D’’È ƒÞE8¨LG®¶.VLÍ¿øà\‚p^ Áø¡¼.bÌr!«H&ĸR‹y0øÃº¢(0‹D%0Zs¶-¼oØøÏ«1Ÿu(-rÒK°'š2Ö_©E*C£Ÿž[&Àbù‚hæ\Ð$«•ví2-3!”Qã¼—ÖCt\¶—  @B3ÐZ'Cæ¼#®uÞ½5³AuªÐxÉ ´gíŽq’K¾±B“±ž+6z$¹ó¸% ÿ DÓÕ¿.‘‚Z=¨ ”ÿHf³ò7¼@ Þ.×GhØ Š½X΀BdLjɹ8YÑ‚5€sdPX; A'\<‘u5¼ ~ÎÄCŠ ’IÅèÁ°ˆÆaûÝ"@÷M⼞ôa€£PˆµÚÌX5ŒD€åzSKO*›ÅG»ö çîÝ÷À@kêÚ†¶Ã€>&BÈ”¤õÿÝûÿ‡ñþ_Ïú_ìBâÒ †"U~¢ j€žï‰B!#† jÊî¬l ”âbÑèßBb!¬úIøl` ¥j†áF£ê0@ÄçŒò( pL1)Ê•x Œ "ð1bñ`¬m0!Ž¢Ê°Y'/§bþððKŽÂ\LÖ«2„.ÐÎH«èLÎŽÚÎÊÈ…ŽâPÈ`_.êIîî<®ò· ºyG˜3‚0ÒeþãjL¯÷ã|C {l¤ .ôoâ8§« ½ln Éä›ÏÒiÁ„ô bÙ ”B ±.ˆpçÛ`nõí‚MB¡jÔ`Æ«f"©†L1çºF@ÏŒqHé.ø$ç)^Κ  ¸ ô"%„àÿí‹ÔSBR¡Ô•ë0³@Çp“±ñ‘1•q˜1Köaëú"j:w¥xùNséòXÍJ®ì¬Ú˲dPˆJ=ÊÃ&îÝ'ÒvîBHî’t?( 29Îøº" Œ‘¸ÒäѪpúIädO´¡‘¬!ŒæØÅÂ}<õB×&Tí\óïBÉ,À'©ssÓ¸0ß." ݬ4(/ðÂ;Áe/²ÿ.Í,®LÕ²˜!®þ‹Jqö²è"-p¤ªNׂyF¢ˆ¡Æ“)6´ËPµB 4ÐY…C:Œ ¨ÈéŽq"€—>´éN´íNôñO4ô0“S"-LœJš!H`Mr·Qhã6“ÝçØ!ж/H"±(¶GÌå ´"0 Fu.! ¼òÔTt"IRîqS.\"Æ ü­~¢ 8á¢`&Õq<í#³Ô¨BTÇÊQ•5Ou‰X® >ã3 ³õ%ìÝ *»&n×@2l:в^°¸ÏjÓ D C¡³[³“`\o÷(ÓäÃ)¼µÔxàáaËEPù>D¤DêŒü&ƒQæÀ­•.é“È L•àŽ æ`ÊB¡±`jf«7tcFt»FÈÏGÛ‘ a´²»`U-CL­Üµ\iåF+!f=Ñ6ÆÐmAÅf`ölª•lÑ"2!Ôå ÒŒ§© n4Š!_2æDASaÐc`HÝcáÀ§P,æD:Ì*Ygv{XÖ½köÁl6ÅlqO³V"ðÛ ë3h`¥m´‚+©1M³ †À\ÍR8ýO³ep6!g É ¨ï•†!ŒÖlH“·ÉÉb  !çeµú¦S$Þô'm6©+Ùoò­pW U"päæ Œ¾egíJÆÒK\"{=.«=vqs¬œŠÀp·B2Îl—yw§?Y ' ÅÏYÅÙ ” Ô s[T Zîç[$®¶3 ¸' (Ô¶¢3‘¦÷¸fK'6šD¿J1ú¸ë’^DNßgÑqMÀ bï —WJT©T‚b cbRÎ rÒè´rÛ‚îït'UÕe-fÁ>lFÉ{ìºØ!eÀ•iÂ"zÖ“ˆˆˆÄN\Ï[+;èúD²âü8#úã9P˜é¦ƒ0± uÇ‘ì}È …!È’,#ÉL•%ª‘Âú>Q:É­ñ[ò࿱{þ¿ÆP$r—ÆÉ{¥É“,Í3ÍLÕ5Í“lÝ7ÎŒå9ÎlœÙÊ2Ó)­²«~ý/ÓK®lÉÁ»CδmGÒ%IÒ”­-KÓÍ5M¥ó¼JÛÊK\¨ÞÊÑl²½Kp /@ÔBÑrý9YÖ•­m[×Íu]וí}_Ø¥=<ÔÝE>Ô“ü±@Õ¹UÐÕ’[0¥Î•ƒkÛ͵mÛ–í½oÜ Åq¥ÇÍÌøXœøÝÏÑeþY±¡LLÅU·%õ}ß—íýàà˜+/a¬’Œúݲ¼]xЗ—Z‰lÇi`ØÎ5ã˜î=äE‘ÍxCk=-qEûáÕ;‡b.Lg‰Úwµ«|d™Îuç™î}Ÿè…¡§™2Ë”79]Kw¸™…Ÿ™UŽ~m‹g&­«ëε­ëšî½¯Îš6”Ý`Ueaù}RåfxÂWŠ¥˜½ó°n›®í»ïÎõ½ï›ê§±iUÙdÝÖ]áµYÕV¡hîy­_E^›÷%Éòœ¯-ËóÏ5ð.ÉcðyeMfq—È¥¨líf<fD%f`=RSf| ÖŒ#Ó5LKÒbX ζGy††5trove-5.0.0/apidocs/src/images/Cloud_DB_Infographic-1.png0000664000567000056710000020144012701410316024256 0ustar jenkinsjenkins00000000000000‰PNG  IHDR±XìsØfgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<²IDATxÚì½kŒ$×u&x)RIQ¬¤LQy¤ JöÚzØ•-< Û˜ŠÞ5„µþøãÇz·f–E¼wóìGE}JEAìl‰í\WìW×Üž(>':EhD ¶£ßäÞß‘“â¥m÷½*@lp†Ak“î%îƒTß‚ÔQζ¸®ýßÛ;Û]@8ÌQv´,¶ûé\J@`ûÇ%ôEõ#ß~"îSfµæóÄT2“:Ä,¶|°ÀÜž(¾é(?j?²Ëctúô°Æ¤Ì^]üìå–+«|˜µõìwωÛ#€ÚslCß~_ó¦ãŸi/7rްïx8c3á¢û‘=ï#íM b«j¡‘øâj˜àMÐ^£»D,ëc„ףɶP˜*ÕÀkÃdA¶á,fAbR¶½žµ×)Ó¶Ù§·˜]ˆó9>À© ÒsÛ`ÖÇâÖ°ÕaBLœ¶Êö£–ö#1•é:Þ(¿D˜5Ëóб†E¶Äú.– Ñ)ÐózgKX·F§ŒÔ Œö®Ûcܨ b9`¶e?7E»*åúõ—ÈéGmüj?RS/UG“]|†b”È#ÆHtÈH˜<<Ña¹ÃµÄ(tZñÍêç®›`›¯Ëu·íMRC{‡ö|iðÒ·ßÝ3f^¬Ú‚Šý(0™9‘û‘/ö™¥µÍêçRU;ÃÀUÕU¸ ³†E&è}'à“æ öOa_6 ñoFeQ›«ó{?òÐÃà‰>Óï»ÐÌg:n™Ì+Í« ɬHÏ›m™dKÓ¼‡!úLOô#ž£,êG[؇û‘WS?jÕ1ˆQQ[”0M%«B1+Î(~f,€÷VÇdU¬4§Ér0·ø \‡ÈÒy'ûm„RI?výqð%yëj`a;󰤹˜Ÿd.×´ `Q˜µ&›_'ô£Cô£ÈdæúÆõ#±Æ ²v·Ñy1z œQiQLL“’v@°´Z’‘PØRN€ç›<Í]€ñ5xSÚå(¾ÝîHîk×Ñò^]`g‰ÖªÆcȈí]±ÿ_oÊyÙç´½l–-úQ༇’ýhàô£výHe~¢i§æd±`³ä¶}t´¡5C%%=£úf4¶‡ŽÕ”Z§E Ê~® Õ¦›ØQ!>=ÞÞ*¹—Á2ó„Y»ÝSös× Z¾SÓs队§ûäeÖ¦ý\²Ÿx^/›šÌ¦ýhkŠ~Ô¯¡…5÷#eb—–c¡T&ðÑ3™90™4jœ` Jh¦sWöd›r„3Ú>ÈSr‚mù¼Ê(:´ØYŒýCÛ.)”®=‡ý†µé's1>|=ôŒù%ºOÑŠÎ/õ °÷j§ ¼‡¾½ÇçuL2à>ÇÀn¿—ÃÜ¥ø¢_Œ¼¯dnÎyÿd?jUè‘èž©6'Uw?Ò´S b+%ƒ)ÀG޹ó…&óˆ*Û äè±…Ñ XÄf1ŸÀ$H›]¡ù¼ÊŽÚƒåÖ·mÅPœ´(ž*!›“yƒR@]ÀÖâ˜+¸ØÓ1®¯ÊKFfDqÿˆµ…ÒíOë¹öJÀLxï;`øc¶KÄmà›ßÕ艦ó¼Ó²ºxÆ~äÕÜÚZ’EAled–,œ³ùÛÚ‚Å¥&Éil:PÚÜY;"OÜDYD%]°'ú\ÅH=È´Ä”›Ø'åBóe!Lœdb»Jù O•Æû”`Xm³BÞc°„çÂÈAE2=ùà›Û)®öÄrRè¹i´T=ñÁzË(ÿ¶9íMÈŽ;©cEÓšW,êGIÉ~ÌÒTÄTn+ߌƵK;7€®8fÏT÷Zh¢R(µ«´  õ„2 ư¶4¿ Fñ)+³JšR6ñhÜ7ùAÆC‘Ÿ”ýþ ½í"g6×åo˜÷qNL¾‘Þ†‚ÉTµ$Ð}çX­ûQ¦bûŠ_47†~dféGpIT)ˆ­´«AG #”ÜïŠÉ†‚™Ð¤òS9쉤}“Ť$y>µ £HfhPª|ýOF c—pí ¬Œ€lÊü¢Uâ— ”ý*§o¢¹¼máYÙ*¸FÊßèô.ç°/6¶J€ER0èóga‚9ŦBê.Qý¡ëö#j‹\éçÜÄÄVZb“%/­Ê†|°¡PÑ)"˜+ÙDâ™lŽGœ¾q1«tã0¿E Å Xž² ráÆuù9£nR<4WF®øW¡È÷Öíå¢k¢4V4¯g²\€§©®"7Œ €}1™ŸÐy{ó.§Ñ1§ÉßAfSI…~Äæì•ìG b*Ë”J£…®g²ÔRl®i‰ÑrbF=°“¹³‡ß4#à­eŽ&¡÷r–“B>f&+_7}wíú4ÞnÎJv™@v™YfP°¯Kx'ZXDÈ …°î-jnkFé‰~Ä}&ýjR?â÷£hZ&i4ðYAì, g `S‡ýÿs$L=‰MVê ÀI&3dÞntªÁÌvpÍ=¡l¸ÜF óâZ¨Va™p›çyƱà•ã&Oâ9^ «Ø:úQPS?òÌ.,»hÑ`çzEŽðÚ; T”ÛÌCºSsÌ;Dļþ,”ŽÀœ{ uŒ}÷ª ;Óª0ÆÀ"3è2x{ß‘†û{H÷xIæšÔýûQ\¡õÏR?R&¦â É>u$;’›5ر-:¡g²U!Ös¢Ó®èœÓÄ¥ø«8j„‚½Çp³¿9îŽÙ\<gÆs)ž¾PÂìv>(S-º&ÛÊ{¤Â4Χ‡@å¤6_Mf½1ý¨%X<;MÛV*dC™˜Ê¼…;¥Ì¹.‘r0&ëÛ7YŒX¶›qôÛh¡¹ ¸Ç»`¶ï²€Hê=‡ù :Ž¿s¸i?Ï Ö—š«0•À’•¦/X•R¹†ÏÜX"®÷„}CÅœb™FƒlÑý¨Wc?Ò¬õÊÄT„I„ñŦ¡³ôÄvg¢h¼óˆ Çe§€cÇed÷ˆ(ù°k2«^›‚í²Ã@€ßTå\ιrAË>oCîñ&Ë®Þv“k°Ç8™dïN ø"ò‘ö£³ÓÄTÊŒÆòþ¯£C^]Àùû¦Á¦EˆL“sš©qÛp!K~\Èx±9ž'e9º@¦´s¾}‘]ƒb¸8À;˜‘K|ÖÀk`'¡â>8×&Ï¡iâÖ[Å~¤¦ÅŠšë“ØÌVЯ ÀÛèó†òå|ˆü6€ìÏ%(ëìO`!ß2 Öš’õã8^‹Mˆ+biC˜!=Ä€M•L¸Ðw\s®k&YÏ÷bC“ŸÌ‰­p?RÓ¢2±õµ·½ím·VÄìruEî©Ïì§JM0rAŠ*š#ë¹ßDzÊëPø}y,Äiõ«±00®ËøMûžÇüX$S­°^•D¾"vÎV íGgº)ˆ©”z¡ÓÔIz'j•©Gåàe ; ‹M6g0p"KÚ†È ?²vyÑ)xE·äµ‰ß”^Ê/d2-W‰ošÿºˆ¹¹–ö#1•\ED «*^ÃÍ 3)_d=0²c‡ù¨žLà¸N0_&EÎ`cò3¦lÌG#æ…cwqìÀŒsº€ù``Y!³IäV¤`ñ¦&?–Õɵ©(ˆ-B®©g²Ô5«*톟ÿÌó#²®ÃÀ$ˆlÌ.ç1­IÎ“ΟŽ'M‘8&Ç)íâw ´äÁ˜g¶ÄÜ×å5ìGØðª÷£žQQ[‘ŽÇ“êጱZMQ"4á­³}ßa.)`š,8Ø8`7«‚>U‘^ŒÎRï›`©gä´VÓuÌãÝ»€û³Ný¨¥ódóõNœíeÝ ¸ã‘Ä~n Sòv×°íI‡Å6[Î6WÄï+Øf¿7±Í%y|ÚFüæmÜcËm¶sŽM¿w/ãÓGî³®/Ä“ŠÈ˜/Š œ68ØÔ‚ã´ ÖXζCœÓÈ(œbI–7°EN?Ú*ѶKô£K%úÑ•ýh§b?js?ZDáY•ªn/çÓò%^³ë{ ŸÆt@xómã{ŸKS¶µA¹ìƸc—hs§Ä6ãŽqÉ-w×y/Í3cˆö£æ÷£u5'Nñbš,]›…b„Hf¸Ë<šƒi*át7ÙÓ‰0Z;)§Îû sD§ä)õ‘½›KÑãu°Íþ˜ã]“û!g–yª˜à’lÈZOÌ–.Ód½˜wa¶€K!ïí<ïC®fFs÷‘“Ç¥i3{°è¾ p}í&Ô[ýÈ8ýhäýD?ê¢]wÞë^Ýý(¯?ÀÉ$M%&úÑH/êGöëõ£€¡¢²¬ŽG&Žgçijƒ)ãf…íoÌs‹öŸ]æH™Y3±qlªd{›.kBÛcŽ_†eí”<ö•1ë¯À¹¤6ÆAÔ`°ôÿ-íGKéG×–ÝÖUtN¬üKx £Ù®3úÚqíÞ¶ûÜù,¶Õ‹Ñ)±ƒ Â©¥åÕãÙý¯åØðóæN”,®µ‹‘ä…%Ý~v¾ ¹ .€€î[ßLçÅšï±1̵Í#ÁmÞÇÌiÇ’x œãƒ‹}mÉý(Ó6Çô£­9÷£°D?º4e?º*úÑ%Õ¨õÉz Ê¢Ð!Ȭv0çãDUMèxd9?Çscï1R> 3GA·9O"Ø=‹–õ*¬ÒæM3šµ‚@ŠKÙç‰_ÄêÚ.ω¤5{?bÓ.3Û„rïP÷´îG8¿ãuêG bg¼6ðÂù&KÙ3/鈑ø4µ¢9ž#Ç¿Dr=g»Q³5m{4 Ê„»¤ÜÇ„Hà;±í2ÀPâxì5yTvŸqÇÂ;Ar`cÝEäOÔ~´ü~´Î¢Ž“GÌ\X°cÊOÏú‚Ï"áœÏDÇ*¦Þ¼'©a2¬[ᵫ‚A«ÂlJ€÷@:Ó$t?¦(Šw"t–ŸŠC›#°h?ZR?R;Û`´¤´td}Ë,&ÿÞ̱Y%¥Q™Ò RCÅTíUl‹˜ÖÕœåTRfQ9}íGKíGk-êØQnD¬rZ)-"ºWgÑF0»ÄY¶mš™ç®å€ÎáÞÅÁ‚bÆíG…l1ÑÛ  6oÊïëm8-óšøž7(€ïª(’ºcß,Æ´§ý¨` ²ìøËu5'Nfb”Qûe½§”Ò**mR¤½˘¥m•DAF{º­<¶/Ê6ƒfž˜a«àîM£qîæ,òD¤|œÚ–ÖÄÎ8ÛxPïÄâf¾ÚM–y`2ÆC/²Ÿs´†˜¯:É"ova—^…>@¬…ï]l“æÛÄ1´QvbŒq¸ ¾¤ýHEALåÌIí 6'«†,dÙwXÑPxr©ó¨>²Ïû‚U…`KWM 6X12øía› ‡áµM~M• ë4A©cŠ‚˜ŠÊªˆë.?¡eÀ1 y«ŽÉ\°9›†€ b“å‰ÜZ”u„Ú>eŠDüÚ~ÞyaYÝs(C}+TVYÔ±CE¥X8TL,b¶%oÓÅÿØž4…F/ 5D{•ÚZLEEALE¥y¬í€Càsˆ`f߈ y€Rš[ó\=h {&s éÔúSœSÝ^¡jNTYiQsâäÝï~÷µO|âWÅo2QùvÙžýŸ“—r\Ml—ïb»msÚU9²ëì:J#äaY‚å+|›ƒ:¯a˜—õ‚ØÍpè8ŠÞ7§Mp^?_έ!°¸ÍËàâÍÁ48Õ=Y³~Աˮ£uE9«ýH™˜Ê\¤£HüOßCÛqÎÓGtN©@cþPÇÃ:O쓎úÅ~«È‚æ‡60§ÝÔc3&à$ ¹çæ.+¨v˜Ãòª2¤y°&o]úúBGô#S¶ 0_«~¤LLe!b;̱í(#£E’¾ªT—&øÒ[å`ʺ•v%wà Ú53¾U’d‹³ô#»þÐî¿ýH™˜Êâ”3™BL–ñ›>¡íH²¸_dßÂç‚ÓaÓå`ÔI5±èiöãå0¤…¦Dr‹^ÈÐÙgÃÔlú›W,Þ ô£¤L?²Ÿ§µ)ˆ©((Øì ÌÛ¬ `ºHí÷ø\Š—Ì›;ìgßY’ „ *FkpŸ•Ûo8AÙï”ie^ÊH0¸øsœŽYí¬Óö£äŒõ£µ5'.GÈÖ>°-Jmùi­!±Îw ›H8D_Ža¡öÈóíÜŠß'®Ü|µÆ6㜢˜>Ò/Y^àqÑv^ð˜Ðžó” ‘œEj/zºˆzb+Ü:kÒÄTêùÙ" z4/V°.¶üM¦ ÅÛíÈVZàéWw›ûðÜ̇î£WT ÁÎýIΧKoÏEy~¦f dh?Ûס©¨¨,QÈD…[g›;î¼* m¿]æ*˜Çk9Çžo ÷à†¾]*ë :'¦Òt6Fì¨îr!=9â–RpäÅV‹óæøÆ1 :¯Èaa­šëªÑÜ]_ß.1•ÅHßuš å>-CÁ<ÐØ‚1»Õåîï›g N)åÌSÉLUìŽëƒûÅé¯TTÄTTÀÆHáú`q.Ãî4Œ˜s pÓiËMXSÞÂq™:º°ÜqUÆfMq[âüÃ<ª¢¢ ¦¢2_I3Ä B¹Ï¢”{&ËFŸ÷[ ffÍVðpÌ#ü&ài9›L:†Ø/Ðo2«[qD1••dcÇÌT¤g–ÇS²1r©°©’8fF¹íLóbæ¢N×ÒãR.SÈйo'`VT©ZEeUå½*«$`bÄŠRÌpÃ?©º<¸´LæpÑ+`wìtQO˜ñÁ±#æB!œ@8Æy¤å_¦eLð ¬°¹<‡{¶S&†MEeÕDƒUVQ‹¼ û`N1ØTá\ö%säE°±.$3¯Ey ¶Íjg«WQ)uìPYI!™Á}šã‘Wd€Æ¦Ìÿ÷Læ˜Aå™,£zßdžñ´”«t¯TTÄTT&+æÆqºXÂ9l™ÌÝÇ÷Ðd®úÉ<Í„À7TSYWQs¢ÊªJ¦³·¬pÌ ½OÑd¿* b*M—7ÿüŸmB©{`IÁ¦Ìh}ÿ³úÞ£ŽÃÌ#O<¬›Çqzè{i¾*\&ˆ¹òÞ<´ùÄŸ9jÊù€…‘SÉu{?·ÌhÚ,MÏç¸âóáöZcÞ.4ÚZàqbúT=ŽÊꊚW¼¶M0• mtú½ ÛSL“/”Ñ4Ç¡ó;(qœ”ÑØãˆtQ2 b,ˆ5&;<OÆ:$üì{>vÛ lÛÏç¸â{J`Ó÷\qœ.0UsÁd¦Ý^ÕÁ“Š‚˜Jyð‘mXÀ–H鎇° •8¨ž{<(¹nÍÇ ]å‚ã„8‡Ò¦¹&Ì Û± Öˆ¬ìUY6K‡ŽžÏqÎûÚ)3©á8vÝYhÒqœw²Ó%óëõC±³^¬È©cìOè„|F»À°.³ ŽG 2`°|ljùúù8öweç8WMp\° vË‚ØùÝã¾½'ç§x>œí>dÅm—ÝÀ{q½Æ~Àƒ£ž8Î5ùþÕ|œ®|‡íòUö9€«(ˆ­:xMÅ4°ï˜VB€šoÛ¸<§ó”Å'[u*.ç87Œˆ]šEq5ŬhAìi b4Àz³°S1Ј À£y0§™÷qøZ</š¢îàߞι)ˆ5›™9{©^óùÞ€]\Àq†u\æË2bb4Ȱ@¶',œÕµŠ€wvoŽïÀ3á8ËD@–ÔÐO1;•剦š?€¥Ž ¤¨gyé1BMLAQÅš…½½qœZ®æD_$Õ]–´—qP˜U#0°£šžÍ`׳¨ã°°a ý05ƒUÄÖÀÒê¼u˜þàÝE#ÖÿÏ“5’‰§ï˜çî½:¯9=Ês8lã3­‚jOyÌkâs¡âþdnŒ“%Fa_kÎʺ‹Ï¼šÑl+S @PLAlílf‹ºLÄæö`ÚéÌñÔù8ûs¸Ô}=ðÆ£jÍ7«&¾0; E¦ù¹ŽA^„WùS¶ €ßMsÛŒzµnM(ëޜ߷ŽÏë8 48N„Z]÷¦«ÚNAl]e\ñÄid¸ ó.á^ÕzLdŒ'Ñåìóeª¹ßp`MÁê¼ FL=‘nð`aƒ1ûlÉàå!~­s°  ¬«\ÓÖ…>޳?¯ã`P SËàLxUn•¥‰fì˜ßÈÏÓØ’å XÉUÄKQé”ÄÜü7_að±ë[Y‰+\¼Û`€tŽYÙl’”»/Î…Üççét34‹™{2p-À,ךÇqlß…ƒ’ºÞ+ˆ) +ŽW èæÉ–†b.l®¬ Ç™«²%UÒÌ!ö»ëŠÁ‚ @ÅÃïÊÏñ½yhë‰>sXâÜP/   \;Î9ÀõÐMAÆöÙøö“‚*À}Ç9´Çèâ8Äœ{s:Î=ÆMûš¬bw2 ø:ͲQûù(çóˆã‚™§5ï€K6,è8Ãe0V04Ïœ6a \6T¼vDªé§`òŒæÌ°ªZð“y*çu8ÄbeªýTÖÀ6DP¤Ê æœJ{fZðÚ`³Ÿ—fUy sd*«Ùï¯é]XލcGý¢UtW\0탭•‘ˆLˆâ·_ÀÒì% ¬ô¬¢¢ ¦¢²æ@FæÄ ##ï7ö*ôJ‚×\ìûð¨TY]ê-PSQi"µ¤£E;¸"161íÓ!9D `**Ó‹z'ª¨Œ²ë8ˆË"ÐñM±;µô´l;Û¥ûÁ±‹‘{#JȨ¨(ˆ©¸f…¶Ñ¸‘u2š«ºÓ"¹h»^hIÎnî;àaî‹¶íÕ”ßP¥9ÒÒ[  ¶‚Ø—Àþ»§wcíÀlÏ‘—³ªÄF7M¨y¦¢²n¢sb**ó/‡¡«¬¡ c¬wBAld®àUš%O|ð™<Ó±¯wæÌH0ÏZh* b dºðµLÃZJé·;¦#õõdaÛúl—+:'6?I+ìÚÏU¼ìhäà™Û¦¥H+ÉŸ˜Ì„È%:h^ìpÂ~*ͪ4­~˜u¡Owç]]E™Ø²Ø)/J z)iBtòl£ÀØHïÒê‹H1•8€šù•™›ÌP tŒžes#RnT¤–‹ŒÖS&¶æÂY³‡2!0²„'%;Ñ–).(G…4×wÙ:uÀcŒ}œ—Ÿ×ŽJeéX sŸeë‰>³k—¯â¼( °Îëc=ˆœXM¨ŸØ~4À}JŒ:ì(ˆ­ñŽ˜O¦"Ô˜,»CY‘ñ' ¥œDÐ./›¥äC¤%#j•`à;‡™ï1²{f´dL—#ï"Ëç̨ ˜¬fU·`Ÿ ¢}v' šÎ¼œ.ŠêÍ¡,Ä„ö¨.𙂨:I˜7Ù‹:Fš©a½$žcÛ ^QcˆP’Õ¨‰)ôÅö'ûq– j㎩æ±âÁd.ÈYë™ÓÁUÄV–…í˜9U¨-£P™­a dþÅ2š/ÓLÓË |jczyÓ`Œw-¶¿¹òsêkâ\úš-d®GYz:ZÝYAl]Ä›wEä1â»Ç¶ë–b±ÝF'îk’'>øÌñ{?ò±—Öœ@ŒÊÁtÀ1Õ€r2a˜\å<àݤy2†Ã1çëSJFBjTÄV•…Ñ|Dއ òõN7’Mõ\0ïåav´Øípwö!/Ó6ÖÇøDÄÆÐFž«kKÆH@W’$ÒÇ9;ž`QQ[ ñçlšK¦åó>< ÷`òdi=jÀæô» FÙ:,K;tÀg€â µð|RG·, ý.˜¿ê`ûë¢íÀÜvÖ $Åì>ŵsJ¾t •Ø„z+NùÐ@VÃUÄTÆøƒ‚uA=>„2K'§1 -M_lŠR™mp!ŸQÛ{ùõ+–a‰ ÚpïÄ‹àv}\£ìáè,;R›‰í#±‚Øå½õÜç;öE¾®wãl r$²¹® öX&¦Êììé aY¬hÚ©úÄ3ùžj*k.Òlˆÿ;æt=1•³!‘Þ‚ÅŠšëŽèW9›’š1vbÚS9[¢.ö b«,íy™ó00£ó,©gZ^–d u(dðMær$öã #òØÎ0‚}0Ïûä´Í2r Ñf»(¦Ù6>nû[&¾i°äŒ “æÂTTTæ jN\ !O27 pH…ï@š£1g݆ý<“G m¼ðÛ=NŸA “Ö}“å ¤6úœ’ß}G‘wpžIJöÛɰK>vœX·žÉbrägÙæ»D_Seb+-sd±Ãôv‘T˜X‘\˜Ì.O@pդ̨РVu˜Ãþ캢=PMÞX‘£Àn˜ŽÑÅöÌ*}Yï@7Kãó¹Šëõ%”>íÍ‘·nz—›— úDu”9Ûí²ý‚¶vä>ø-­ ]»îPl¿öÇœ›‡Ÿ­‚X ϱ < ÿº³mWœw:еÛìå´™7(ÌÝ^Al½$ƃ_ˆrxæ´3 ³¶,+D &›èF˜V^"b03“!@ëX\c×®÷p%%€»m²ùB:¿>Üý9ãÄ®XדæLœã*¸}«SO³$4YZ.~÷|î3VY{V!‹ß¸Tq>ÚbPᾸ&¶7îÀPŒó mž¼C“L*'  ìµna›¸Nê“]»ŽîSG\ëˆ çšB»=]ï9±õ•y·øVéË9ÎÂ9Œ'a–bÇ$XÎÉo|zEóU+ ÎÛhóà¤|®n9Ÿ"ðLãaÄj–9;w0H c÷õãf±çå»`?Ì\aºƒõG`Q-qœcyžäâûûÜI&ùäŒ&Ô¾sˆÁT`r©™T¯-”›‰º8¯Ô€œu‰©7ƒ½ÊlÒq83å€ÌÂIa•ÍÀ!浓ÍSÓõTɅ겫¶mó–-Æš0#s‰u±¯W¢&T‡¸ì…‹ôI³æ39ÛøÏdi‘øÅ” gà¶ÀI LPŽìz`n—Å‘¼GfÔIÄehlwÙdókÿ¹Áþhý-6Á‘Ë;9,l·a?Ïœ&—oƒ­T>ö¸s©x…çàœÿËîùç\ßvQ›Øæ)ûiä»­LllÌÙ½ gV4õX³ÁmqZ>³w˜‡vùMÁΤkz¼ªsh0ý‘¥#Æu»÷aÆz0¡ º_ÙFSDçÄjx)ZªüÌŠš›óú˜/Ê[×qLc¤¤SSµÝ§%$=×…¬%Æÿ/£-9KàØsœJdxŒç{8Ãàgâ~Òö\Ò’Kö;«èB'€yXd¸Ú§QÏZŠe½tŽÊÉFÑ/r¯ªç14·'[©£¸^|Ûã\ÏŒ©=Ý ,†'`ß=7¸Çà²eNÏ}E Ú©9qƒbÆ´œyÆÇ»O€móIĨšæh¡ ¶>@FŠ>Ä'0Yðð-¬â2Žöb“•¤g¯=Odä ypÌþ}“yy&›Ðí°¥‘e+Änx÷E0sÌ#K´͹²u­òè£^2Íö&<þøãZ“LEeŒ¨9qN7øÀÑ“Å2d¥Í"–«+]Ù&ĪŽÍäø˜—Ì›Èñcdb¸(Î/odÇÀÉJu$—#ªcÄx­Š«r°½½í?üðÃ;±/|á æàà 6Z%XEEAlILìá2ÅÁkqh²¬ðUlà°¬=(wKžKm·?ïì ›Ù M¬“Ú[™xß÷Í;Þñޯק>õ)1íHsrܨÓa&ÄaYóÞ¤ã#¯™Ô^ÕãN{mMu¯W›€m4®BÁ§¹Ò¸Ž–©>ä™Ù²A²4Ù)æ7a¾.1ùijf>qr*gh®™œøG ¼È‚Âéà&‡yñ^4õÚãÊéräQäöFœ8ЦgгÌï °9)Yds‰Ðæ±h³ôñqݘíl€ý)ØfÇdÉŠ‹2õ?=n=]‹Ý¦'×ãØ¡Í¥:À½9tÚöŠÚ¥ø3±fXZÊÀ‚ÀÅ<À˜¥LJi•˜â Æ4»F8¹5ÇLVÜñ çøCq¬"°s~]lG÷jÌ}‰p®y×1 À3r®û Îç§mo^LNãÄê~aê”ô%Å+OŸý< €<*:Wlœr‹2r@¬›³]`Æä`ù¹m­Ð\Õ×¾öµÑÞÇ)½îu¯3÷ÝwŸùøÇ?~jú¨Ô'Pˆn±Ød†öÍh½-^~0Áä˜fÓA6‹c±ß>2gÄeúDÑñ¹Ï‚‘ºµëïŽi:RÔK&Äy¥€ÈfÌ E]u®ûzžþ›§)R™Xýâ¹n볊˜o’y sëþäTKöåsÌ#B¦ÿÇG®ÒÌž‡òºÚ¨ôÌEýÚ` {ëøP ¸~õW5(ªøÃ†BÀhn>ÄÈžþù–öØc¥ÿooo› R¶¦RK‹klÏ7Õ燃 ˆ*sÎEÇï€ÑÅŽ¹/eW0§–TGø¯H9£=õûóE ‚…å%çvªÄjޱšGÛHŽÛ7Y¬YšyÚ™[ë˜œŠ¬b›sˆ÷âÎæ >·o·'°Œ¸˜'X˜'­+ņ’bPAÐ3<_4é –˜_Qùœç Š#IAz¬NÎ( b5 ç'0º>fýH±½i·)³ÜèÏŒû±* Ç<]F ‹Ø­#`#&F¦E¯ßùß9Ù. ÃÔ¸•Jƒí°ršl.¦ï(ÁÈœv`°§¡E®i×/¡TcSÝlé9û”=~Âl‡€ØA_°¤`*«®»sÙ |2ðº?@2ó€7c óŽ¥LLEeQTÀ1)bdd$°"bVì¢OËØ¤¸³³“22Ú†—“)’~ÏCl»›4×5cJT¢z±1å«#wy;Ìÿ¤§°K—”ý…1^Ɔ[+{üÄŒÎswq¼[hso óÙ4Ù4A×óÖ¸êÌ`~iÙaw'0¯ g®ln^uì¨Iàå—èX¡90b^,Ä´°\Pb!³"3±ŸüÉŸLYØ¥K—Òo^NâΗ¹N"%Àê2œRôvùl³c?ÏbÛ‘íiÞÎi÷)ñÿMûyÚ~^í¬BRk¯ê"Ançô ™ózyÎX6ÖÙiÒñ ¶õ'³0”iyP”i9oJ8Ÿtz`rÎ:µ!<óì’Y`Qebõv Mþº&F&À£££tŒÙƒ31$M&D7bkd.#£vià§?ýi³¹¹™2¼ ’Žªm‰=—€Ï_vìÿ× äØdEïe×.÷íö—͘P »Í6ÚLçVíÿ‡ÄðÐ6äÐ.kªãhîÌHÒõzðš+R؇HnK,'¬ ÇŽ›óÚþ ’ëÆ&›ãê”a‘cŽŸ .˜›ÄºSàvsîccÌBÇ À–íÇÌØLælæ›Ñ"¶ÌV¯äèGŒr¦¬4 b b*ŽÐ|Íw‘pðs"p¢õ̮صž²kàH¸1Ѿ?öc?vâèAsb €´9…Ð:Ú¦ŒXHôí7;ñD&sø!kÙíØ”s`·KsfÚïÞ˜¦‡îqðMÊòmxKx}39F’·ë0ŒX2Ö<‹ €¤“ÓæÄ>Mîõ0fH±Z@íp̺‰ËTN å]¥êæ b*ÊÄÖHÈù‚̇ä˜AÃÎF3­ùÚd×zöz$§4bj»»»)3«ZešM|®× <9/_êò-âÊ8ö‹×ëÕóp6ÛP¶ºxQsâ|^d•ÎÒÁ&?0šYLÄÞÀˆÉÉôTÔ.1½iÎ)Éù*8jœi4Y°2gåpã©Èô˜ hZezáÀ_eb++‘¹í’|]oÅê ±0fH2h™ÖËTTU„Ú$ c÷zfeÄÀÈ”H:E‰z×bŒþéÿ]a fuѽ§ÁÐÒ haŽÜ…#™šÃ§]K)ˆAݯ®Þ‰Õ:f bJô›™Ü4 L•†4'ÆLŒÚ¦e^´¼ª) Ól¬—çaÆF 癜l`n]Som®³fÙ6š±g)¢æÄ9°1ûB_ÑÛ°º@F ‰]èÙäGŒŒ—U)+ä^OfD0ª3FmQŒoй°müËÎC“¥" ò‘À÷e¬#SWó^ô;ëû°P߀©%0k^Õ\™ØÙacä¥èÛÏVÝžU'lÞ#GðL1blZ,ãäAæGÞÚdS"×ýÑMkF4‚YõJ\]°±#0Ç¢´rìÈ¡iÒêaa4hí¯sQX±³'iy2-ò‹M f²t¤p"§*³JC„€‡3ËÓgÝàìõld ¤™nÞE)Rœ­ƒÝë‰áx‘‹=µSÅëN)ˆIB.Spª¤¶M–±d  ýÚ£Êëz§ÄÖ‰cnŒ€¬@/ë6`z¸¨w«yBà {Ù;ÑÍ^O@F@Gßô‘©£¼(5ÍŸQ~$Òë‘s0òÜ[!ÇŒ0Ç%¾+”m4f»)`éà“c¿ð›úõI¦z S[g £—<ÄÈíº³.Ñ»ÔL!æÄÉ ¨h¾J21fMôMä"ÿK¿ôK'û“·!}èxq»0MsbÄÀèwÕ܉EÊr9²rí¯ˆÕ ±è»ì¼aÀ 9%ÄÖvG.ËCûÒŸ#sEò‹Q°¦ði°Ð|™ù˜X±Û=™ ÈH@E`Ä™î¥'#×c“#-ãJÎlt ’i²Ø«,L`^~{‚õRíºHç¿ÄÖ ÀȆîq}1rö0Hs„N7XˆyQ (®ùEB Œ€ŠÌƒd>d ’æANOåΟÑ>DM@Gm˜N“ÆJe±ûuÙöÛMü>rú:tÈ#9Ð>­ ¶.–ÆäØúrA§ÐÛŠ™t{ç92bMHÛ%³oÈDÁ¼œÍ\Š…ö/› _¥`vTr0/˜éÜØDãÄê~¡UÖHœ(!01.úŸ‹M‰4ßEL‹¾e†0Y’Ee­nÈlMEAlÕ¥¥®óë'd $@’¦@b^”O‘æËdP4;rpÉfn´Ï4?TVBz& 6WQ[M3‡&]C!¦EæDò&¤¹/žÛ"F2ÒÿÄÄè›~ÓrúŸœ>ˆ½¸Q9v·WY;6Æqcš|Á¢sbõJ[€6W$š¬‰Wv_&2R g¤g¡ù.°S.ó´ÿ_õ}Bsb?%ìEžCrë&ϸ>ÇŠay«¨Æ˜ÊB„cBU(ˆ©¨ÔZT|Ç&«4ÐÂÿ•jÀåÅtÑ\™ ú§:u¹ç˜Ȧ”¡Ý¾½ž×á;´Ë¸ä ]•S¡å=_ûxšÉc¾lŒRÎ)ˆ)ˆ­¦`R7Ñ;ÑðÚÀ¨˜¸gÓ“Š\€ÂLBÀV5`¹¤$`VÄÀbšÏ™ê)O"€™²w\ŒŒÁ™Ê­ì£N15õ ›¯hü§‚ØÊ øUZ—õ¡ÄéYPÖöc€ZϬp–qd㸘³ìÁÖécý1¾÷‚ 'Ö·fn¢ÕÝÄÄT*1®ØŒšé÷P¦]‚2ÏÛ›¶Èå¼¥ìy°Î.33y`z$s—&«U&¦ ¦rÄÔ©cî µ%͆tHV0¶Û„FÃ1y6Xâ’Û¸¬Ó+ÂFû ì6‘[<fÈ$gù6@ð@þÆ@áÐÓ–Ëò¨]ÞN°E ®êˆ¢¢ ¦r&ìØFˆÑ.ÿßv‡ý*•þóc»+vvÌhIŸVë,Ê ÓþyåZ Î9ÀD 7ÄüaŸæ¶çäàyh²)·Ý ,VÇZáQlµ•W‘ZEEAlÁB1"š?m.Š{ V*;rv K… “y†Ì8û胥¤L`UGÿp·oá¥ÃŠ»ÝÖÅpÂ<#‡»î),P ÁÎöpìL»ŒŠAÆðˆ,#¸çs½i´šŠ‚X#$+PóHýB 9Ê­sý¬J½c²`óŒ-Ðf/öÉè:ìzbY J{ieKxó¹{ïålàü|»÷l8á>'pá{×ø·]¢y7»nêr$¼ë+®¢ Ö QϤúù «p×å˜÷ €˜›ÙµË®Q\ʳ/ØÆP£%La]¡àìBüÏlo –±y3™Å æÓŽ8Rö½œ9«M3Z ³À9T¹@¦æãzR£vŽIA{ƒW•Mò}ê¹N(ªTÄ– t¤ù -8X?Ãe–$;Ž2vAîH(竎²¦uçœíÉ<9ø°#B(”ü6€B2·@(yžóiðeFKÛw¡ô}¬ëÃ\·aF3‰¤ó^´vsÀ=ƶ}Ζw`bí:Îp|¿èfÓöˆ?;)ž‰vSF–ŽÌ„½Fæ*õPìçM–­¢ Ö(ñôÔÊÂ(þ«%½¡øúŽrl9ʰ“!ËÐŒ:$³àßCì»Aàq0UèŽ2ßǹ2Èĸ†cžC’ ln{ †&s˜`VØ-áìÐÅ1Óù>ÁÞ0œÃÀÉ[îJS£ó¥¬!€n‹÷dæzÅdñx]3:9à{ˆAà Üsëð¾j±u´ƒf±¯uPå,TqÎæCidö›lì(1‡f„òa}b=ä@‚蜀×9èXÔu˜;ûÂ}ÓLˆ7Âþ—ÀºÚ¸F?¤„Yvªù+8~ìàÚpû¸ÒÙÝwXŸ;x çI7üßèüÞÂ彵Įè¬23Û‹¹*–ݛدØÖ-rLcb0+•OPÄF1ëë …ìáCàqƼ]ÆtÁêúò˜`JCÌi±«ü)ï@•ZûÿMÛ÷/êP&¶²B ̾ȞމڄFµ}Ä‹eÄ:¦{¤¶@–e!€Ðñc˜ÑvŒ0#0°—­þN'O›qS§qÄ‚<'ip ¯ÔÜŒçJUÄV^"rðP66»$H1 È•»&“Sâ0«YØÐ%*—Fa3߃9¬In<çwËîД(u_`˜k"`¼ƒ®µ¢é•óLú&KãÀ\˜–)b¯˜ ±ßJ½'æë‰H AÅ“™! ö—âPQºÊÂÄÖ…¥åìgËþ¨wdf »,Mæú~9ÌFÎa ð  h}3{BàÙ±3‡–6pÆJä(ó2Àó¬Éæ¨x~¯“Ç"K²ßfºWŒÜÂ}Û` >¶±µwλž{œ²  µ‹öÈ“2‚#I£ ^É=í­ bë$!Y *¾nAÑa{êRZ`<<’¿î°)—Ùt%éA1¾ì\ÇŸX2¶q^8}84”:‡®0@ø7{?Vhï ;îÁÔìExƒÊdÉ—2™˜Ïd¹44Û9À¿‡º®¥‚˜í»Ìhœ`Ÿû.ª¹§ƒWí¦ bëÄÆŽ À¨ÚïØd^K!ֱɿ²Þ­RB`‘f’ ˜*lt#É6àé¶;FQKðk †àºé·y­84“¹Ì—õúóͨ‡^ìœß–㙈4M§à¸~M¦V/–tR)JçÕ7S†žÈ–:Ï„Á'%÷¾Šß©ó–ý>É!i×i?V[[ c³Ì£·c±NK6”Wfì¾#ƒ'NÍ]ˆ³bP 8§¢NñûP¸±_1ù©¬ò€cÃa .ø¦ä¤¾«è(K‹Ç0¥¶8nªDk®6mƉaA0t[Y«8‡cîû²¥%pÞŠ0m;¬_e ¢.öóÁ±™ažK.2YŠ)ÙVb2g‘ƒ°kœÌ“{ Îyì.1YЮÉŠÁÒ8VW(ÝÈM¿5†íEBi·\»¸& ¢F/Æ~¨I e •Ç`À Åo#Ö·Š€ƒ°M}N *§û}¬^Š bëÀÂB‘-À¤¢g)òsb›NpÉÒ%±Í`Pµãaðì›lÞÍ7™ÉO§çO_&;?Hse„ìöLæåœW+‡uöò<4]wþ1÷õ’É< c×û3‡‰%È rj˜³;eFñaÆœö` éåµ)˜ Ølz¬}_ALeº9Íd®“¼s¬ÐŒš÷(ÐË®bJ5€b;€€NËaT±ÞÇÌm0¥àæQb²x®ÀaU %¸Åhã’ÉL”\©zƒA—ç»Ø£/O‘ÃÑ";.¸ÏO FË÷£Ë^”ˆs‹Ýz˜Çs×y&3­¦ÇAæ ®11£ålÄ.ÂÑEÎÁñ\Z[ "f™æ¶·æƒKêÿ×d:7¦ ¶² vMGbs1V®\¨‘kc3xãm9óP¾ö(íáx¬pûPØ— J†Ô2£uÆ8«Ç¾ DlOš‡©Åh›3ˆŸ«pÍ7ÁÆò˜Ú6®ïA‡é´Ä}z™çìjžíl9ާ±ŽçÈvK¶yÓdÕb¹_Õ4[5÷4µÕëXALAL%ϼ•&îÍag=Áž0›ÄŒzF˜NÌ_3²BO(ö-pÇ"ž‹“G&3]vSÛL¤g2³¨a)!3b æ8Hl¢ý(§ô¶)v“ß ·/Lt}¾_`[¾Ø-‘ëd&“¥öòM–ÌÃ=º€ûóˆs˜…å‚&§£Ï74Yð7ŸÏžê€³#ì\ßË«ñæ'RÁIáù&J]fgå–šñìº!i(€hvÀ`°có ï*D1ßÞx²vØ¡ó›ö‘Š4Š~Ë/ANÅ ñVœFŠÎ#õ»F „ÊX51÷˜)óB\<6Ëaq Æl.À2»ÑyIæ$Ö À”ØLzßòù90p^,h31¢ê³ù&›[ë"ßâ²JÎh±•V´½ µ³0fý¥;"ïàP0“ÔEBñ±CHb²¹ª@#‚2 !¤%åb<vftnËá!YŸëÜà:Yˆ¹­& Àf÷ünÎýa'çÜ:cœ:[î“H¾V¾žóc€ðqþ<—høEx3Z&“.LVÆîò`ïF"ï!'ËL¶´×*ˆ)ˆ©lb¡¸®™ÑP±3 Å;Dª©}—q§ê²¹T¸ï€Î.Ž—8Y<ú0¤ã‚1™—a„Z^-3§‘e^\[K€â ’`œsoä1Û`£›ÌFÌiG În2.Áp ŽÅyJïN“SÊæ”yÕnb@pQ 2øš]S§ç¶‹A@Œc 2vçè3â|Ü·®Ynueeb b« b:¡[»Æñ>4™“ÃÀ^'ñºI|g”>p”Nú›³Þ“u³\qÎó¦²0†Pà}€ÛâÜ{òͨ«|ÊÀÆ¹Šƒ™Èé Pæ7ȉë Ýqó>€ ÷DægŒÌ¨9‘Aiל6a9˺Nû| ßm@ËÁ¡(ÍãÔ$ûMͰ˜SY¼¨cGM¢…0çÂÄv ¸¤÷Ù-(⃊mÝ2Y°pÛaBRK³P;GYŸb}¬ˆËäSÌK’ 3Y\5YÄH±8) vV= LLe¢œ;4SGíÒ*¹l¢ VLš#cgfw`x‚a¸f£> ÇyAæ± IÁÇçàÔër=O<aeV¨¦¢ ¦’+(9±ú$¢Éz¡|c€Ð´Y䉉œc–WOª3sYÄ5ÁêìB>–St31£æLþ=&GòÄó&…€mÝ1à"ü’u}\51•©‚ÊXæÔÕU¦~ó]ËÌ8 Ï‹ëô²»óÛ»i™Ú9ïL¼\á<\eš©‰næªTÄVM(ñ'ÒÎìëݨÈvÁ&˜}¥¬Ž§æ¥0¹ëXص› fUXIEPæ81bRyózÑç5@\b²ŠÓ}{TÄTtÖ a¥¼ ³›ÞÜä°¾Ãr¤"÷EÐ,=0 ‚ß’€úTÀ1·Løn5]åã±wdI¶ƒ6C>÷ZŽh±u:wÌE,(sÅŽS^…wO(íƒIE'K€Ÿ/À¯…¸$ ¦Ìv8›—µK^KÛQt=çܶÆ)B=“¥¯¢óUÃfçÒTY ¨‹}ý#±@]lëáaÇ&<ßdYå#“óòæ»K ™‰}l¨W±ê²œÓr]ð[e*'—dVžÇë8cEßdõ΢¦W>>Cýÿ†íû—õN(ˆ­ü‹lnWyUÅ2?0KLäKd&5d3Y¦÷¼ªËm“™9ÛC`²y²®Hâ÷¡@×Ó//þkÊë{Ödõú‚í8WYYÚUÆö{zW4á‚ØZ¼Ì¤d;ÊÆ €#ñ[¢œJËdåQÉâ°L†ÎºÐd%W€›ÌÎÑŲ#€q%¸”ˆ`Ìúú`Ž `Íì÷7mŸ¿¨wBAl]^hòRŒ´8æÚ€×ó †aÚ;´/Ý÷–Á}/}©wï7Ùi„Ñ7"¡°XÞ3£‰};² 2¶©+¬@e¾ýLÀ1y(ëÝP[§;MsÄ@f_0Y]%R†=¹õ‘‡~öIRdák¾þ÷Ñ?ù›¨çÔc°k @cïÂÛyFÌá©,½/sß5è¿;o¡š{Ëþ¾®wJAlÝ^|2qéô^ü}±^'×ÄèóÌ¿}×T¦daFcú1 B|)ôë®ÉªV´tê`¹¢.ösrì°/|l²IúXïŠÊ8Ñù®FÊH@;úu$ØtOo‘‚غŽàh^l@£4¸Þ÷ì·;_¢¢¢ÒìÁhš‰Ç~|“M¤ªh´Nç¿UÖÀ.ØÏ•U:çw¿ûݳ¬oúùÏ[Èœh?/ì¨r¶úûM˜U”‰­Å ºQ/b¾Ë*n²×³9ƒFˆá'>ñ‰=(t6w0óëÚuØ'²ßþ>@Ë.â÷M´%G—4¿wëo Í“4[v[^GJ¼c²8¬À®;Â>>ÌHvÝU¬£}zö÷.Ú¹`².ǧs:Æö1]«8¿È.§kÔl)*‹–ïªÎq/A^¡· vq«ÖΓyôçÍhFôqëR€°ûlŠm"@Çf4ïc—„íóØŽg÷¹Hmóë/a·ÇíÚß—±, Ò~WyΗŒË™´±Ý#3 Ø]2 —ÂQQY¨ ±ÁÏ*ÊÄVž…µ”;1s9@Än¾Ä²Î "&bŸàçkbà(hŸ€Œ[  <ÐÈ;À6Iwí²icœ˜•‹¶®Ûc>åœ_$ÎÏe¨Äì–[a°f&¨qX*uÙudëQ籕–:Ê{”•™œCˆý(Ø™årÖ“é1c&ñ$YÝÜ…LöüzF˜9å:ûuX‚Éúöû3SÇ|ª¢RU¨ºû†¦œ[¬¨9±^i/ÐK‰¬ë(æK`!˜ö¤Â¡ð] гœƒëØÿŠqỄ{\Ì_U•4Õ“tÖ@»çöp­•)蜉…Á¤I÷¦Cs® UE¥‚DFë¸)S)­„‰)“xÚÜž/r“Ý’)¾Ù±£ö²%¹ýÍ%EŠX]ëZ<‡°ºf²dº-“¥O¢ãØÎ3§ç‡9¬ˆ@²MmÒ1`þL+:Ûo>ºÆÀmÃq ‰¦¼—»MOƒûuÌfÓ<¶ª¢"…°–‰Ñ;ª™V(š±£&Adû¬d±sbɺyèÓR-&sL budìPY =pÍꀫz'”‰­¢T*U¿Lp¯í,–Yd¬¯·JIê-P[UñŒ3g£²òÏ“K³Ðe¿€¥q&stñí2Rd±edê(¢¢¢ ¶:JO â­‡ К™u˜7ºx‘¹1pVù hvÚ?°`¦ói** b**‹‘I±cœ¸ÞWëýïz­yÿ»_k~è-¯9YÿÙg¿n~÷SÇæÃ¿ÿyï+/~+¶ÛéGEe¢.öõɉ~UÖXÀÀúÜsg«ÿ¯¿Ëüò£›#Fòæ_e~â‡6O>övóÎt/-Šì~ô(ˆ5Y4íÑÙ´–Ô/ÿ‹7Ÿ/W6î½Ëн©u7™&‰UQQk´(ˆûÁGî7ï{G«ÔÆd¿ðÏ¿“þåX:•õ––Þ±•ª;dnþª¬©`.¬Es`UDž¾** b**Ëe¿ùÁ»+ïHìMAl½sâ‰Þ‰ÅŠz'Ö+ ½È Êb¿tùÍx#G1û9›ÆÎïÁ¿|ÏcÇ3C–aI³ÒO{œ~[νðL¾YÛ}‰½7G޳]â Ìh8B¥c¬p 7•Цª$FeË×*íX¯@M# < ¾[B ×u ƒöå'.¸÷~ä¡MçZO>_}ñûÍ_üÃ/¥Î“œ:\yëî›»îü]ã=ôKòºG®ÿ‰>s8áþËûR'«‹A@˹þº¯ãƒ*<Û¾Q5¡‚ت¿È”7\år VinbTÙÉaV®Â˲ưµN"–Š=)Ë"pÎRáJEÏŠß½–ÁŸûÛø?ÿõÿ=üÚK_ic}+ç\Òïg¾ú>ï³_îä6ÿÀ=wVº§øôs¦ußÄoyÝ/Ä.8æÜßø½5o}èûü¼sÆùÄ@Ÿœ³½7‡Øì$ÀJL– z8á±µ¢ Zäžó ö{ºvMxÐD{ë¦ÇÉP=ѱûu”ìÙç‚è4 ¶?³=½ƒ¢mìzꄞ]Xò|[9Š,=wÛÆugû+fÔkŒ:VÏnw4å˼²l @š, +°TÙÌjíË‚•¬È2:§s¾ðŒÿ7ÿì%_þ³Ö³/|A*ëç339—‡~öIzOã_ø±ï4ï|ã½ÕìL¿þ—ô•›ز¿m¡ä}~g_yç«Ì[¿ãæM~wø†×¼¹¿Eï°n_ô{zzöflC´-2tïÃYÛW¦ –Fœ?ŽG~~0]Í>u§"ì>× 4»þ\ÁyP†ð–{¼‚mo™Ñоr¤:bv[®Šíî\3o ÅU°“,ævI“^JÓ¶OõÀ8+|Œö^UÙÆ“åCLpN} XŸ5ƒeéxSEçŽ_üýÏ‚XÎ9r>Æ@01:ÏÐÖ½]1ð çvµÆö7q]´ØöWŠÍÀú-°– ŠIŽÔq:V™Kes`•ßa/Ì–hyd·—u@EÛ_ÏÙ‡;jßþ¾4†áUÉݵíL»´xcpòõM-ÄÂhdf?C~±íÿ—æ\«i/}ÈƒŽºGÃP„NwY£m × ,Óó°€ÐÈA€ŠúÁæèz8oê'çy.È\¶Ï°‹gصÿ“Ér·¦öið°kÛŒ˜í™†Õã ´•×wѯ `ͱVxÀìç¾h̦vs¶/¹°‘ñÐGEº¨¸UŒhO+t?"û’ǯHÚËír*äx¹Aï‚4ïÕ-K™+tÁºf2ûüèÛ7J;“|ô¯¿j™ØÔ€FJþ"dÏ~_²Ë®äñÜ.Z°y `º[sûG¶íÄ4, u‡<=`§,Ù~wÙÊ£ó`ͱ´ª¯˜ØdæÄ“‰wÇܘÓU|M‰ö{Ž=22aÎ"t~•œhvçÜhäØmžÆµ%¼¹mvœi”NŽöE—2™Y.Jè¾%4êž“I§·DàöðÝŸñÝ2—þϧGJGÏ=û’ùÜð¥yœ„ûç-ëå€I¸mj®¯†y¸žifÝH@ѯùÒòKM1K߈Éf|§sFv]O0¯J/7æÏ<Ç|Øf1z©"Û~dFçÃ"Éòhçp¾ÉœARÛ¼]žz³Í0/vìëÎ]ûm„I¢‰/ÿɽ²Š¥…Ÿz¦ÕÄ Û.ÝëpV|ôÑGÓñã?^µb3CËf*¿cT졟}’û ûäß¿ ç±Ùô=œ¶$‹=_6ÇV½GC{f2udB“8K›ìë‰ûÖ( Cÿݱyíd>¼ÌýÓZngI2µ‹=”ÛC€`? ¥XrÿxaûfÔ‹ÐG;bÛ*Ž;©™Ì¶›`›•'±a'7«df°JåÌ:ç1âf#1{îñà`XÆùö·c2¯ÇP(­X€déÁ,†‚>_¶Mæ&~â\2 ˜-J^]Ü7>orîØ-yŸv°¿_ÈàAè›ÌƒŸW¿é¹Ÿ/ч=H7þ¶Tt„s¥ÎJ…þ`Àž•$+ÅĨƀRâüfg¶ÈÓ‘LzG¡#£Wþ¿ƒÿe'‰MuSå$Ú0•¼¾²/tªx¤¶Š–´'F̾`ãGQÉŸEsjmÙ®Ýç‚ÉÚ ”qP#n÷À"3!¡®Œúó/‚=aðÞµ¡0ûÄÊL—ñRœ#hå[èýO6üÿ7?þߣ21hß¾½oÈD¶Gç™Q“%{'“‹ÿ1˜´ÏƒGÜ}]ágÜãûíÄ®j—a§u±oˆÑËk•=½x]9Ÿ„¹&z±c±œ~hû‚X¬PŒÀH‘ôóØ’Ý6éÓ1f—š ÔÙh¾¯ã°¾ 1ú&ëx¯nÀ]2 Ñ½¡ÏnŽòs•V‘Jó\ëésmùb¤ÞÍ£ôû¥¾™üþ¯ÿ±i½áÕƒ·ü“×›?ûÏÛÎQ¦¹çðÀ=¯•çB’“„Ò@"c×Næë7€• 0–BäÀ­Ç …Žo÷3÷>äK³ž£øcùãO~ïÈ»ãwô¿ñâ7£ÏÿÅ0¾~óCÉkÿñkÆ™ëÁŒâ1^¤Eó¹¼ï©Aͪƕ̓õ)àY͊ͱ/åÐ*÷/e/{„Nf`¾#†E+‚§á@˜ft~©#÷•Ó$—}?¡°\°@”‚.Î%÷šè<àÙÅùzP<"MYeU 3ënV&ÄR÷G˜‹Ú:œ`†2/|å¥ÖÇþãŸw_·ù@ì{¸÷üñK~IEš*öO|î?×ÀcOEšd#¡<²ý¡£øóX{^j¨¢TZ‰èSôM”Çö˜;ø>Ùðÿî£áÖ(Îi/¼_zþ›Þ›¿ÿ¡ÞÝ÷¾²ý±ÿðþ»ü­ý‡Ù8ÇVŒ ”1'ž%¡©Ä‹)ˆ5 ÄÀ².²iæ·ƒ1ÛSG g ™Èõb¤íÎO8î®Û.@©U°ýøÿ:Ø\QÛGâÿÔ¼öÕÎ;׊RÎ*•@’ž?‡}aB ÿ_ù]8,Ð}öË*S̉eæ˜kºn·½@se˜“â:`Ã`ê˜â±ó»Ððh)V‡óÜ™Ð,XÍX…‰9±øo>ý%óøã_´¿/ý—ÇÿœÀ9œÕÙC¥p »©scÍbb.@•ÝV*¡Z¤âñ*¶}\Óª­1#õа4–Ì*àYí‚x²sˆe¾ëL‘¯E@éPLî  :pÚ¼LÚö~]Çý:ɦ“7G¦2³D`òª(ZOLeDáaŽªiáC fÀ@6§Ã¦NF4˜»*dn9Ëüœe-³Üj¿ØÀdqM]Ycž5œpVºò5²vh͸¦21•‰f9XeIãרWwÂYRTeãÍ‹ÀxB1·í÷­2í}ãŸi=ÿý2q;ÌA¥®Þt<ûdä¨s÷ç¾3xôÑó~ÉÍ=ܳHüoÀ4ƒ)Ya¦›À«k²Ü‰**ó±ªYâÅ~“2Õ—Ê–íä¼S?Dzгûãâ¿ÃU4KŽDö¯¨°‹;)8 L–æëxŽm›Iná9–%<Œ¢#L‹GQ·Ä~É7ür`ÊÏWÌ.[ãsð¸ì|‹ßÚøJ,úIRpÍí’÷…îIl·›G°á¬ñŒÀ%Ã6|ñî%«ÎÆL33ñ(ˆ9Ò3ÅYâ‹@á$S½ÉI`êdËÄ(’\üÛà$8Î$’wlrof: %ç¼dÃÛŽÉqõž µ§ãYF´Cnï¡ÉÜÎy”œËå>ûI¥TRç™qLϰ¢< sæÈ9Û¤ßVù'âJ ÍyYðJÄ»ãwé€ãº„çÖ¾}÷KÚ,1YÜ_!#ËÉ2?  Š¹¾9íAÉ}9­'/ÖŽQQ™3ˆ‘t˲”’™êKgË0íÛuf‚§äqÑrx:d‡Î¹¦æ£qõÏŠ˜Øºd²S"v{Ý©ÿÄ.ÝÝ1JlЇ‚éÉ@ãÁúðÇ“ÖÊx KÁù/ÞÓúÖ_åwÚ.BðìBmGvYËz (ÒrHuoÃsdì=Cñ¼}B“¥…£`ç—§`ù‰œ f­O§¢2 ˆU‘2™ê«dËŸYp.›çl'E&ѳ(`Vð\vÔíJªÁ¼.btïhü¿ixø^sïkîŽÞôއ†ÿõ?ý•4Îd©<üÚ7xk¾Èà[¢,¾éœW‰@fVè¿È×›¾Ïû±Ä|-=<›Æ<(óÄÅÀ10&a°­ËlNtV§ÌrÖy3IJÀúâ~%è'@öŽóo øº7¾ohïM‹®›®ÿáG6’¯|ñ…€¯ûùá×Oî×kÿñý­ÏÿåÐн±¿;¯~í=ž< ^qç+R¦gïwïëÏ‚ïùáï wþçÿmÜ@.©«ž˜ŠÊ2@¬T–x˜çJeª¯˜-¿N »LÕ‘ $US˜å}!Ũ&’ Bs`ø[ŸIÍrϽ÷Åä+̾\¦žüñGEŒld›Îó$sÇןû†ùƒßøt8;òr,Ì,Ÿüýφ¶½XãÈTÖÙœ–•ÔôÀYè@ù®!Ïu¹£yΖ + 04¿Ur{áú3æjä¹5ŸTŽ£ê»f®ö?µiñÛ÷¼˜ç56„ €i,Ì7Ù”Šê†ý.ã½U޾Ø. rÆ(¿c2—ö@lO_1èþ—Çÿ<¦àÛOþþg dwß{Wëø_ιÊÔ3™ù’M[ÃÏõ³þÿû™?91€O¬,5…SaK3~>ä„•U±s怈ùÞêy[ß3,GòH»{Ÿ ¾ùõoµ¿6üz"ìŸý«··þà7>ÿ{7=¯ýpðã½§—3‰“­C<›¡fâx±µ²]*UN`F®÷©1£àQrÿ,\èäg„räjà>уo¼øƒïÿ^ïnþEï¿}ô¿é?üõ!²MÊ6a­ŒY+]ÿüK_eV›,qtš­cJS_1c Ä~‰}8°:,^Ü6'ÞMÏûø…gb ã’(ÉúøU÷½ÒCÖ“Ý¿ýÇ?ï¶ôóæï{(÷ˆqPw ž5`Z;.³æ¾ 3â`]ò¦*ˆ©H »Ê@ðêɪÏv9•{Y:ˆA´¿òâ³Þ÷<¸´2µ£¼{BÅ-#µ˜‘Yí™Óf­až†9Ž„•`”²|Ǧ|Àj¯x±I/•¢I)îæœ_jÆ[óÀ ‘¹ãå£g?ç¥|ƒ€ùü_ƒãxž³‹p¶j/ø_ÿ§7±úpKÜÔþ’+O0z.Cé°AÀe?l9ˆµš»‚غ‹gJ”«Y0pq!Bꈩwد¸3õ¬´Ê‚Eć••"g—^øæ´f-…«H50Y"âPÜ£ŽÉò¶±-?ÃtDÛå›& b.RÌm‰Lì»c2sד¶€ôpž½öÄ @‚¿oF_ÀVVà™Ø5Y˜Gʲ‘â+,‘¤yž’äü®µ8¨Š‚XM”—/æÄÀ~{Ò$±$Û‚Bí#[D*O˜_Ø·Š¤e2^d‹ ¦ öUë)ˆ­€]1ÂNwû¥ÇÀúP‡9£ùË/V,;Y˜¬¾W²¬Iý «U0Ú'÷ù¶,«"ò*h¾Éæ’ ôÒüÚƒöû’9šŠƒ÷}Üÿ}Ù>+h0ÀºYÄÒ €å™,TÃ`O÷°—?ˆzn>X™Y]³î2@np–*X(ˆ­7€¥sã^ü%ØÆ8@–ºÒ#õ”/XO(ÿ+û`ŒyÅÇ~—Ч²dQ÷Ï)¯r…º›Ãzö¡h÷À"ºp™m |ô4(éÖ`¢6˜ú8–è;÷¤U¬è|4áÞ Î#ÊŒB}ƒÌ¿êb¯ ¶Ò5%¼Ï– ©2+›³sb#UGÔŠ©-”s™Àçîöá,Á®È‚&¿`²z2;”P†{Ÿ³ÕÏQ’: ÁîÒTÚšðŽ'b€Ñ¿é3˜f¾U9àÄËvøpØØ±¼Æg-¬FAl=YØ¿Ô caÄ[³Ž`…±r;P„œ/“Ì”ý²#ñ" 3«PÞ†¾,æÍÒ{@‰€lsq˜¼ð¼˜‰w$‹\– >€¬‡{ب¢œä¡Øc1•Y$0Í zì.û¼0ú¾Œ”S]¡9kÇ@|j¦ýž‡ßÕþÌž\63£{ÀY>|ÜöŽ,%¯½ïõÞo~ü±:½GE¶ߌÎ[“•–‰àL÷.nˆ1=kÕÝÄÖOšš”LW`-`]t.W¡<}“¥8êÅÊÿ–M2Ìâ™Ó;Ìs_?6±®Y~¦ñói'óf0‘1§Î"æ{~3Ù¼ûO8®ëΟ7_ʼnC°äF1 °1º›Kv»Ïv8Ò$À b++C=¯J€Ff©C“eíàK D®Ò›¡ÂQÂü?)csÿ«6÷¾òÕ¾ñ5.õMPÊl·¦à—ü·>ô}²ö–!Ï”›‹¥›ˆ{¶*‰yyž±QçŠp-ì© ¶šb_Þ­"E¤RÔ&&žQö9xÊœœ†fÁžŽLù@°.™¦ŒÏ+M,-UÄj•–inU×–>žæssRÄÈx®°ððÍ餺fÏÕmos!ÇOEB!¦ç×@әЂ˜‚ØšIÛ4·ªë°¡sË2>¿Žpßd&;7ÎÆqbfƒ'¡1Ù]‚ù¬DÜדç/²Ïs>Àmñ»%ò#zÎéÅ®¾›¦’IaÕx1•ØNƒ½’úPλú˜NÚȼ@Í3™‡' .d³˜Ó:õÿ‰ÁÈ"|' Z“s„I“âÄTÄTæ/¤(È?§¿¨íç(Д]=÷ÜsÁ·¿ýíö<Ð5އßóÏ?Þ}÷ÝÑ]wÝ•¨Ì /¼Ð~饗zž²á™¥§,LEAl>2ljŒÜ’Cs;SDš­žÙ£¹íT Šµä}´_ïúùÛñT`N#ìÉ® -EO~([n—ñþzŸ«±®b@±!î£2V•y…Þ‚Ú„Ý~›*^>F±§tÎŒ/!¢¢²LKSœÑ{Šj ea* bsfbM1˜Áb#jˆÙe·L3UTòX/±-r€¹e?Oã=MQcÞ²©¢žÀ 5'Ö$t$€hb•Wb\‘È¡µTVÈÎç ÈhàÅ ”›8¿« ¦ ¦R3 #o®vSÒNÍ*ïúyó”©© æŒOqî/Ÿ±×¯ýä‡L­É‚ÉñFÌë6ÊÛV(ˆ­ƒ èÜA,læ¸òò²•ERvCrò€3HÒÃ=ˆúœâ9²’žif`zÆjž_&ˆ=þøã2 S¥²ü ý 윶´Tý£>Zçèuu«>XÞ« uŸ¯Å4ຩ÷=2ç!\E›æÆÊÖÇ+#Vÿqà{kIýÿ¬Kš\Àê¿Jïô]xx0ºñô>._ìóHƒ“íÃœÉæS¢š7TÖQˆ‰‘Çâa ýmŒ¶£·µú/±_]«ÿJÍÝß%àÐÜö\Kª"¡JmoÏ „YâüŒMzu‚*;˪έ J†¿+Uó© <“Í¥Ç{æ™gÌÆÆFüÞ¼Ò˜,§‡¢O¥ª¿‹º&{œM3šdܼ^bF+!žõçTpêšåðêw‘ÕÏ·ý·m²j ‘ýí•ÈßÓQMYäS™ûÃÜÁƒlÏbZDRYе™º¾‘¨ÈÜq”+Ù"a³ŒeCïX8®>•=߀¸'Ú¥b·Ê1¾çž{º÷ßÿÀd%I<3ZÈ‘eaQpñ<¯ Î>›ùì±.ð³d À1ã ƒ¾&¾Æ4FŠAƇB$b{œÝu}N%Þï[yŒû;c„¶¯iZ¶fè¿ <°(C¨äœ˜f_nŽô øf5—xf /ºœ‘z:ê­XkêÀ4&Íþïç1 «®àšÓQX‘b£¹–_|qð‡?óâ©l¢„Iˆó~d™×#Eݵë÷¦9Ž`V „ÏÐsZ„tDŸSi†TÂ" vn Ì:&DŽ‚§•¶0M%`ÌŠL[-#™p¬¶)0‘Â<Åû{ ¸¦¶`'³_Sk…ž“_ÃsÇÂ6ÚçT,êb¯#š2’*.;:ˆ?+¨‘jÀ` ¼›Žü –FÄŠWÆ&3É}ÛïýȈò–æ°²uÝuMÔndÛh™Ìœxr=Òl‡y3O\—gFÍ|gñ9íX»¸Š‚Øz W1žÉÖo•ßE˜š|¡ðØ Ó²ëÚ4åè›” Ù¶iŽåÐnwkÒùÒ‡UŒÌè|P8$XYG8¦7IÁ-ðšúv»]»tßî 0‘Žøº8&®=á>¬åsšb‰vq±5ÄÓPþ¹YSôÀÔ´o²Êž+g*ð,sM˜_â9¦Ê×eåÌ>§ñå`@åìŠÎ‰­¿DfÆøazš¥ 3yþÃClÛ¬â7èšÚØnViŸÅçT~©ÙUKÛ¨(;; S¾¹ØXó,drŒs¿vâ–ÉÌMc«#³r´Ÿ¹ñNæu&)+”íðpß”sXÔ5Ñv‰Ý/2Y,ÖQ‰ãl8Ç)3´ŽÏ)OB3Á´ª¢ ¦²&“")Pšˆ¿,G²ö3,™¶Çƒr£O æV܉|ÉØ‹•œo&»1Ç]Ú6`EeÏ×ÝÆËQ€|¬þrM=€ ]OÌL¶ãŠ „8×Ô=Kω+iË÷±L™k+¿ök¿¶a¦ æN>ðœ)†ª v6€ì:UɵŸ›b”MJ¦CŸ')̳ì§ÌÌóUnæ‡2ó:(³'Ù–8gVð. $ËĈß4åš0vÕÙo³€‰ bÎuMó~N0v!óe[€¬¿æàšÌ¡g(} Rf¼CKÛ¶Aûu-˜žýv×’Ø–Y½dÃûr¬bLIŒÎÁÞq±½O4må)»²æªØíˆeFà­]S«ph´\ëˆyÍž»à÷ñ޶À0ã†Ö« Àb—'ÖuùSPb»ý?°Ûí™5—e3±ž0g¬‚°ËòJå–Ä(¹+ÌGŒp¦\¦‚U€qÅl¬\/Àì4ií£âteuTñ:·qœNƒ®© óa4 ˜€±qv‹ÁYyNԦݮg²’3=ªö ‹Â•YR©5Xú$Ÿª:ó®&öØ~·ó̆b»>€l°îŒ¬ æDU¬(ØâÖŠ>gR6F°ô¹ŽOY ø¬¢csÆÀLÎØÀf¥„ÍBŽ“ Ítpc••4GÅƒŠ–{adž !סM:ŽgFƒ‹„¯‡¯¯{⟥ç$\øGô…É•ÖfîÇê– &›Ó,bVv[Ïúò˜íív!¶»¨ ¦²²‚‰ðÁ,u—÷o£u6OR¨'yLò¸­#ˆ¿Aâ™’UºÁ²Z%¶;&s¢ýl¯Šµk­@l™NköÀg ¥§kævJ¢©†mã’É<Öò„–·Á§ŽiÃè¾gÆ{p-êšèB»]<‹sæÅºFÕëøœÆ ×[WûÈ䘘­¼9£L;¶¼ç¬ƒ;~ã@ à›,Sö•>F9þªOˆBQÄ36ã³Y‚”±­SE2â¦-ÌXy‰eƒ1#9µçÎÇšT{+/±,m,ûšÈ¼ÇNp[?)É Fî+Ì|]ž¸®h£^Ççtf@lŒ!ç?åyѪ˜QÓuÛd±‹kajl"K‘Ú‡º4ï# b\3i&DgzQáåæ&–•Y<8,°ÄÎÿ1ØÁá)Ø«·&Žãã8á8">}°ƒ£&\Ž•:шÀž¼gþ) ÃÿÓ¶ñ$vµŽÏi’…ÀÔWÙ¹ÉàFž†±ÄUB|À´Ž ×vL梿 Þô|+ÅlŒù«782ëQ$Ï7“ݲ˂ÙBËBIΔX¶iׄcÉÀ+M‹|NÇ'ü–9BÓäR½Äñ_¥ÍÓvûMì»*&ÖY¥žï²AÌ«‹Ò é×Ùf塟}rK˜šòþà™û®#ÁÂÎDGWQ±}cÓd¦Ñ<æF}c%§À¾.ÂJÔ·ßäÉ8qð×ýŸpE5TÒYK±YF":݃9¤_×D%b,šÒ9eph«ÄöÔaûýÅ?ˆÞòº_Hæ}~ïúy3mž·u•¶½'•GŸO~Èhuáj¸ºè^‰¾A;2gFÐáÀ3_©w„¬Tl^´ßižÌ<'ÒW‘Þ æ¶OXÆ«Ò["àyU6^Y{Œ08¶b-#Ò?ö¥ûÛ¶ÓÝ2"öæ{î4ïüG÷šzËý#Û¿ø-óÉ¿{ÁüáÓÏñKÐ>ÿϺO==ô³&ìl.JÛh•Ý0Å>ôŒôÖ•g]OÝΦ/—ÿà#÷›w¾ñ^³aû‰”þõsæ“ÿ‚ùÊ‹ßj±b·mÄûÒ? ÿéw<·ìË M…¬üÐwç¬$oT6/î ݸe² {o 0 П—ò>>úè£?þøjƒÅ58£cgý<ÌËdï¥Ï8Æ¡'Ù‘›’ËÑv®ÿý3/Äñܽ®÷½}Ã|à‡^g¾ï÷MÜÿw>54ÿþ£_d@Kœm“€l®s–IÜ¡êu*&û²Þ…Jýƒ¯Z \?aûÆûÞ1¹ëþÉß=o~ÍößzòËéÀ!J^o>úÌÄè6lÿX.cÌÙç*ÒKõñÝE§ATè¶YÖ×AVFV¡(fàÂ7ø0@«wÀÆûÿ¥1À”¦ÇÁ¾Eî–™¾¾Q­#LYK_¤÷¿ëµæÉÇÞn~ùÑÍRFByÿßmúÿú»ÒŽÎÞ³mßP¨²âvƒû3½ÛôŽÓ»^ÀH¨Q_ú„íSÔ·HÐר4¹R‚¹‡O kOŠ«*M5'žÔÄAY´XŒüa›rtAÔšlÅ9ŒŒF$±Í€Ï}ÀìR/™š]¶ŒÊ1r-b_¿ñ/1?ô–×LÝí»ÿטÿÿ}ÁüÜoÿ­ %u‰žeÔ)Ên¤1F/¾ø¢÷ÜsÏÑò3šp0kF'v*÷]™&ÙmÁ±Æ2ñ:²Á‹Yß­gžyÆÜÿýÁ{?ro²˜«á,©ÂyM‹xNÄ”Ð7ÒcüÂ}§ù‰~xêó}óƒ¯JÁìýï~­ùW¿ù4™©Ý=Ž¿jΰT‡kU«l¬ˆ…¤=¤NI¹ÞAœ?턱'Ÿj_ŽZì²¼€Ñö²ÝúÑIé\[4çµgŒ:ZBæ Dgí™ yçZd¦ðsU|Çw°’ô“ÉaIùôQƒj’2ä¹Néeم㟓G“âªDvx>—þHÆìã‹óIØN€Çwåäxö²Çi(ö5âšz“âªuM‹|NNÿn×1¸sz·þÍ÷˜K¶o|òï_H“-Ó@o™¦ÅYÙBå÷~ï÷¶¡‹¹Iø#?ò# Ü5§ »f²Ô5‘½¨*1+溶aþK¸32€ v,à˜Ö†XÞe&;™E[d†Œj¼öM("×ßµ×_fšÖM"#ÉÆ½õ>ê¬Ôî÷ËŸaF–ØŽ:Ñä@¥/`œuœ^Ø‘Rôïúy“f±ËÎ;ûn›,6âÙ²Ò “l§c—•ùs-UØýŒÝï`³À雬òiÞh-„{|G²0—ÁLI ãçÕï™òÚ/ä“:Ź ”Ì™>2ÿÝ¿xóD#Ç>ý\ê(…ØÙù‹:9-§öÿÍÿõÙ”íÚãöK˜NHy*OHn™y‘ö:úí嵕޿¬B›ªˆ`bÊŽqÇ)RŽt®Á$åYpœ“ _Ôî˜âÌ\VÛ`Ví»l @„c]EUîÐ/êšùœØÄž²Szw'9nüî§SoÄQ ºÛüèÛ7 çΨϑõ£óëIÖ Ÿú伡ʈ# L=söbÖŽ”7ø÷ܬÞ^X1ÎZA ,$\»®W…fÚío`?Ÿ€ éW.±1Ïî 6a›x ½L–³­[ã-Ès­n{¨0#¦ô±ÿþ c;)×Ïý?éÜVjËìGÞp²îø…o¥ÀFØÜ/?úæ\säÿøîïH;9}p¾eFœ¡(»s5ÿ?{o,ÉUÝ ž–} †—‚Æ0z)ɬùðÐÕÂvl 6:{ìÕ®Á1]½;x=ƒ»úµÛ¸ëE-±Eç¼±ZቮÂfíù£ó{ì°<Óõb ÞeÀ/ 쎩æÓ )ŸB_¨ó }ƒ¬½'ß9U§î»ùYYUY¯ï‰È÷ª*?îGÞ{~÷|Üsî¾=aJ' ’ˆ”&:ÊA•ÓšdaQIB“&Ú0rÎ|Wêža›ÊÍÒD·cA®9ÀüW¢ÿ¶µþ;ž"³$˪HhH›fúžðŽÝ,ð¹nô87Ð[qéê‘›=‚žÇãwñ F0ù‡sìÇ¸È æ©V$Ç8Féв<£æjÅp/KI>]Ã>œ ]ó§½Ãe?íŸaå ¹)†/ ¸–ĤrÑ^ÊfÎ5œrV//«_’•mâ&œa¨þÀ][ðI<U«Ñ´Õ$þŽ“ÝëQ5‚“Õ4ññw1\q)°é“Õ£øß![MVbG*ËÂhãeÃU“ãIC­O]¡Âà€²m‘’F ¬X¯–*K¶'Ò$#OkW£À¬yåpn°/˜„­+-²LKÌ—HôŸß6Íä=Ñ&ÿDCc6þìKßO´ Tcíe&MªÓoûù×%×~@]‹ ¾ÿí|×u8Ñýžìc]˜CH-MBß•éYËÞRöæ5`GhÌyúBžøãIÚ0š!º EÐîºA,Ι̙×kC±4óM¢˜=´âKVŽiô;õd…‰®öyªF<“¥1œ¬¸éS=”ÐPíH{eº¿©1$ÕàI’®\ÈO오‘R›º7ï}Æ2±#­Ü¤0w…{ÌóM»'K­µ"îYí‘ KðÐÔ}y û`˜I›¤+È‘¬–ÚÄ}hÓŒÞS27pdš“k'þæ?YÈV†‹:œz8¯L@†`xâOî‡y‚yLÇÀÄ>ÖÄ‘òŠ%f4¡ÂwÛÕ¼­ˆm°@P$CtÅLÏl3;[+ˆ¡ºí_†ÕªMê¿k€¢Ï“®Ãüò¦á}£ž}¿zn]†HvÛÄ)ºc\i¢ Íy£sEªtõ¹û~þ¥' ˜.•í€ÚåÆóüùëÄÚe6z [ͬVÊr6¦XÎ0Ú7ÑTè §ŽY$ÆœI›¦ñžHÍÞæ±j¢íg”,Ô>ZÀV¦« Ñþ¥ÛÍäÜÁ9ù`ü‚ƒsTÍy¤{éÈ…9¹µYúaU Ùø#Z­“ ¥«£BõÈ’1î•E`<,øUTÄ>«øv$ø=–˜xý4©öÍÎäÀáÁ(g ‚ÑJŽ ‘½¸\zrಠà†e¹1Zü~,m3tIq83z4ÕÙƒ‘×¾\/ã¥&“ôÞ¶”¡F| Q¥TñVÄɘ抌+[\‘ É“ ’»xiB)‚¼óÊéªÕùi²›T"rýî¸îB‰-ãþe¬k6yØvrl©ÒwË”€2hP›fñž’:àM“ÂPmþî g,ÂgfÙØÄœœ—6hÊ€)‰ IÔƒqÕr_HÙ®¦õB„žÝ‡i±îPÈ>NÓ™xf ~¿®Žãtœ5€MCÈ ËLYN Å òX÷<”\²@^h'cGºÃÀE/oSªø ÜŸJ7ÞxcLn¥ý´—•×vÃ@M&¢‰ÐXÍ*iÐ;o<¨)©Y«ÍFI#ÙiäùHc~¬nô„Då©-©.1¹yó=iö7G¨®Ø$¢kežŠzÆ)ïJ&tÁàU˜ÒwX¯ÙÂBØmŸ a·zÖ¿qû݆´iï©Åc4ÄÐ5~„sÃSÁ[ õ KªÅmmL!G{Ÿú8*邸·/î3¯AÉ­Uó1jX[_I¥Lš4Š ¼fŒ«,f§¸ÈóÄÜ̺¿$ñk_µ7 …í7¥×_n_óÐlñjÓDè|!ý>pñùÒ 1ZÇ+¯¾Ü„·Üp€'j^ ‹ýÙe<±Ñß«l/orNœ(ÿÓ…œêâ~£UýA2¾vÊxá2±t1ß"iÄÏQa{WÉ+ÏÌ*Í[Uf/î±úŒ¼ô2A ÷Ï‘-Ñey‚Éøà}yÑ÷aCÚ4‹÷äñM[ࡽ—ÕˆUæªêÑYÊdsr^ –HHBeSÀ‡Œ\îwImíþŠDŽ) %r ­™“ÃëqÁïV‘Ìîºë®Rá¿ê”Äz%ViÔ®Ðh|™'§9šÔËдúâœ#O-ãDÔ¾ïri—ªÂ›V¿œ8y sJIèZŒºŒ¶ñàÅ’èEU‹è‰…îÂù^·¤ÙËÒHìûš*‘kø¬ÊboV^5NÅO¶Ä™ì™U›fñžäý•ß—üGΠ܉„h7.ÉA"Ø$sC_äÕl …6'ïð  †Që…ª±KÒª àgúîŸÄE„ôFÄû¢aªÈñ.((¹MâÝé–¹¸›…ñ&|a`¶«y(^¹ö`·}¢hÛ=ÚKÇîÉYªGPMrç§¿—ìýÂÕ&îýÂÍšeŒ÷Íàåg V]\Tµ‡1©ûXÕ¶É£nRʵcÐFÛ‰HH<™²ˆgaNß-C}v3lÓÌÞ“$/\ÈáXÆÿ‰î][ðùûžJæFQÃy€a¦pNáüCOE”Êtâ¹I®gMXmáUˆÞˆ+¨u’iWˆš›<Þ¤f'é~é,׃)`@Ä®,ª¹"à+K¥²ÑﯱÃËV2$õš¾Š fµb`uÑg¯ÂdD±)n÷(-!áDEJ€èõW'ÁJ˨Ip•‰!8Ù‘xòWŒÉ˜l¬TÌ4„ÑŸ´ðSÄpe XVŸyWW‰þ^1H.8k3­p#o uV ùvtNèŠr"È ŸDê/©6s ¾ïl‰Üw!Ùê€ÅöbWô]ÑEÒ¬Ú4Ë÷4&ñÜÀ….Î0^Ï]”ÀÀqSôŸ~8ñ~Äù¢ïÁÄg uèL‰7tIØÕc»’ÀnŸ  -qï†v/Ç” Šz|Sð‰²ªÕªÒØl3;—”Âp°K×ùEK}ІÑ^°`Ô¥¥P ÷¤VxèÆk2œP¸W'*~.J‰ ‘¢ ŠäØ}k¨žDµ žûã÷ÝXuÅÜj fºy‹•FžªAN9]Ëeõ ŽM#ioÀÈfçç¨Ân ©ƒæ&v>õ[^{b!‘÷ÁÚ,eáã‰þs ô],¯#ÛUÚ4³÷”d7ßùµdîàþ°¢†Ø;Ô}˜—í`h/Fi ç γ|æá1{’@lÊ e³€lÜç´è;FyÃLÛ—NRªª>=#„q[vG“è²xüi(q'Më•4•R'Ö!‰•Gµ`¸Ø¹‹H1Ì'€‘a¾ù›~sEeœ\úDÄÉ´ö/o€7ªIVF?Rê÷Ñv€Ï@Ò Ø\|¡ècc’ÖS¤†12I˜63âm’Ä+KãEÝÌ…íhõ[î5´á;‰˜"±JŠL’‹ª$gÕ¦Y½'£·¤jÊìÃûûßÍ vàÀù& %¶&IJ7ú?Y@äÙ²È{•$/´ðUÄY8ȯ¯-F(®¯Š4•飉@¬[Ø"’#þÇ%U ‘¶"IÀ7]šöºTÙÿ¢«¥—Nä^¿ÎÉ`Î…ˆ˜vØ»5¿£,º\S2§bß±SDQj5¨M³xOÉÜct—¶b’hö¸pDÏÄ´Ô"¸vئS$œ^k7Þx#‚H‡œÖÚÚ;aé;Ä"1NªHcP”·N*‰uS*½×(Ö˜IFLË!ƒTæ¡+ý¤€e"Ô÷£Þ?þú«Û©Ò¡ D½DlÙå»ÐdJ dôMHîÝøy0a`ÙNŽö` î ¸MËÍŠ!‰“m‰÷ 5]áD¡†À½´iVïiÀcÔä?)¡M-k#5Å…’ó{!‰„ž§^ÊâýŽÓõElc¾˜ë½ï¢R»[F"Ûw×]wñþï½ï}ïF‰†/Ãy«T']Ì•˜û’Z%Œ©åoì™#:5Œr>Ë{ŠÞ/3JëA/9Ô{ÙüXöؾ ;ÕGø{O¿=uňæ*ûÃp¢¦y4¢”FÎ#ñãwÜ|m3=»“:FƒLÌ¡·‚,N NO Šþ¤”!÷©áõ½<‰„˜vF‰>e{†…‹#Ê&úÌK}B@Þ…Ý EéKŽÙ>õÝp~Þüa@ ï¾}¤¦™U›fõžÔü¸ˆ×gÅED/ܲÞƒyõ²ž‹[]È&æ]D[ŹwÔÎs×!$$ã ùíù*ÒÉb›ëN62?çDßÄû³H`’¯0)W 9‰$æjà”äÿš$¤#tÚç¸ÂýeêSv§û@À“ “SªÉâ¢C†iCòäU˜½^'|ÖŸy83¹&î«€œI”‘yM0eò7®wéÞ @·³RÞ{ÚÄâºGeÕh†`¹Ò¾çjã»'î+ÅÐȾµ¢IWNŽz0`ÊËæ<6Íð=%aëpC~šw.Î v\* `xOVn2œ?`Ñ„vN“4ý²€iËQÆ#°8ô\GŒÓB²+A‘x7˜˜ÝëûÚ"d—Zp‚Ü`Ò…¿Š‡g!Þ\—‹}S y¤Û¦³lÁP[Ùö¡ºÇŽÎ°ü|úá$Ô:8ÑÐÓÓG`ª•4`Bct’Sé¡g33àâD¦à¿%½ÃfXl¦ešfàÍY”#@}}ý·9£¾›Ö{±ÙÁ±Š…¦ñŒã|gŸ×S©9ôX£š ô@DK å†×á\Ô¾€3ðvÊ8Y¢>hÜä}yˆºÒÉCý ÁàÂyÊî»ï¾Þ¯îÎ|_!X Á¦ŠZvê 6-]qH›˜ùRB¡PQGqpð€ Q¼ ˆ…«•¦EÇIfZQâ„ÃðS8¹~âßü}ÅõùøJj襅:|üŒ«Òæd‡¦ÌÎ €ÎË}Ø’¥<±‰‰)q~à˜½ð[oÞu ï«äzœÁ™½Ñ1œøû—n{k¦jŸCÛ^¢ yP«€+ À8>¡1q%ñ¡“N yÕQ] ‘ì ÷nQLŒyBÏÓHá­üšfNͶXÄe#¨ß™£åw’×M˜½oð÷kØTû;ȼŠu[MT,««D&Säœx¨RAo*4v? &:n`Xœ°˜ ïÍsÇçÜd å”²d©¡”H"˜¤2-ÿçÐÃzâ¢îwb‚&sÁ µyve´ãd ˜gVgâ?=j‰êO0O‚/pœEñ› z~Lð3|æ:‚ Isx¬$Ɖ=Íh¡ß\L½V#ÇŠã PŽä‰]€ ^Õ‡’m,Yqbä€,U NĬ ÐY„º~ øË“ÔJa–ACüy¦©̪f|@;Î=¡¡(­îU|àŒ¼HÓâa ÕIO?ÂÈ6棆@F`Ô§k7ÉN:¨q–çñ/ÜüŒêÇ-ÚLH S>XàºXÛ\+M;1´Ó¬0å6“ùhPFÃ3§a©‹Ð[Q¨{j’®ÙWbiA€lx2†q1V'±³9s  øF^ `'è$šú IÄWÌ|Õ°p¿ïW‡Œ}é)Œi•¢løÀE°H^ã ék v{V‡P>œTÙ맺MaRIlê{(ÈHÑ˾V;Ç»É=måѧ—~X\Ën›}iÌ$#gÖ'%¥K]4È™¨¨VĪÉÔBàß}Ï*K]LœýVì{ÁUæŠe– ÈVÔüH¼òÈ0èoÕd±úâ3=ˆ9êUT#úFïpÍ@––ÎJÞGò¿–Pv4éÍÉX$'Q:È‘ëhõœvlÈ© ;³±*zVvå4cðAra”†ÝI)Ësü¹ŽXÕœœóDe ö¶qr¡Žÿ¶Ÿ¿®p„nIGñÉQ ST!ž­Z? $sG9.þ/»ñ–7I˲Ò(G©·¶M‹Ó&š'ÕüHb<¢Ç"Î ŽZ–Ðþ…6åÏßÿÿ”xW0’ÂÜ~ףȓk½HoÂuï%Ù LŽȧäÈ1Ð$±¶±=€Œ‚-KeÄC‘p°9eÖÖ\I õœê%Ä%ÄËÒ æD>œXû_l›Äî ñû:þnpÏïÁÈM^ÿ=†û¥2È«ÈÔ¿ãÿâÌgîùìã¯lá$CUÚÐÍ ÕYjvŸGç\8¢ ZÚõ™öõÄDà ¤y€=¹·(⹟µ'‰RÖû0JòÇtýŒr\:°~=Šû—Ø$Ò²mÓb´)e~œ% p%Jeèµû o[‚_¾ùU™Ÿq ºâ£mÃY]}ù‹ð®ƒOþlåç&±Ãû9ç±Í‚™–—ïe” uüŒ¶¯>¥aY#Û—#Ó®>00_ é¹+dGyÛfûò¡Ú6‚=¥NäCa Wech¥í+ó;½¼´ßÏBÃèøñ'Zï¸öéàŽo$ÞXœtlÓ„Ý®gFû§ùpÔýb™:øŒ(.!f;/Õw;êz©šÖav1ÿØSÚžW$©£mÓâ´)C]Îqn‘Â:´ªxa#ç}±¿(”bµ,˜«ÐeS|v¨ý—’Xæê6õᤵÿ‰Â¥Î¥\ŸzNgKN&¼7 ÁPƒ½®h¦äœgIŠÙ¢ë&¥–mÓÞhSνÑ<&™Nò¤°žtÔ ¾°E9¹]ÃÑ}èñèÕ°Ñy.ünZ fRe!Yæ¢ÙÇ*Ø”“„¦I„ÉŠMMòd)äþVUÓ“ P_#?±è‰é¾å”Uhœ²—éHÚ„ÒÑ“Ï=á*Æß2q&ؼöoL¦÷¼íÄØù›^ýSpÍ•KÆû@&%¬ÿ# XuzDûý©ç·á¾ï%ƒ?îÜäÝûøW`ëâ7…ôv¾Àwé*°sÜW½Å¡XHÀTD"žg–‹"Ò__ÍýÆäùslØðœú3º”F öUÚüÊý`TÆÊÓ÷Ìh¤ Ñxo«äâÝKáw›3æw³1ÑP."UT+¸³(÷ÿùWõ7ú\÷O¾x§óå‡>ÜWÌ!€Q Ò6Tðpb5‰—Í\ˆM€ÃgµQ]mz0þ–ó½'·Zª¯=!&LZHGCÆ÷ö׿3ù£¡ „\±7ü©Ú¨º˜ësKágÜûøßÃS/<™|þòCŸ#|ÐQ@è=õü“ðÍGï HªA~£m˜åš™X&cxÍÇ^ ÓGi®­ÛË ’T²=FIQÛ6¹Ñã¼îsd¡îìJã=°üm¦öèÚªáöôlõ¡™™“Ç´@Lߥ bª#Oï! V)‰­v:ôú[àÐ?[f‰zIQ~ícXÇ ` ˜>ö°d4 Îr›·é’ª-¨$±µibêh“ê³#3‹I~û3UÍ÷Z%¡ÈÏ7‘de©¯K À)ùÿõG¾_|ðo"ÿ}?}›®.I#6ÜmI/¯ÿp­ú¿"<ÙÜ,ð»ïû_u~ð|*ðéóÑ$0¬‡^v¯9ðWµçŒ¦ÖHÊÿøïSñqÒõvbªl{jšª/‹þîïþ®¹æxÛÛÞ6öûÓO? _øÂÀó²_^†!üÌÏü üØýÌ{ ïÜ-Fp£ñì¨ñ<\¤Áô½€{`Þ'¦ÌùÆ€ íßêAÆ÷®FPÜ<ãh|i Í§I¤ÊA¿C'˜Yl¨ÄZi¡h÷EÁãT‹¬< ò'y©dìŒîQ¨[;%6ŽŸTŒø'G¿=y à’I¢lì}È 0È’ÉFà÷À§ÝÇžún‡ìGcô©oþ™§¤"—Ïiåd§úÛÂ!¼ìºlƒBºQHKÒ1"MÍ5Kb€yì±ÌÛ ®;ŽÝwÝu|⟀gžÙI˜úÖ·¾>øÁ&€5\DüÁüAòùÎ;ï!<‡à”bX^‡õñýñ)Ðét†å#½üå/OÀò×ýׇõ‚ ¹nি»}ûöM}œ%™’^ÒÜïe,v :–He«ã<Œ’vvLR›Aë“«ŒNÄÒ©[PÝ=~`èiÃz,ˆÉTÔ›) nå!4Igs*_ÑV7H‘JÛâ¥nBƒˆ÷iŒ\z¨é^il»0݇×éžmdÄï¤]O€“*u½ÝPGÝKoÕz.Z>úhòc`A Kþöû¿ÿû èI“Þó¿ñ¥ÁŸÁåõ«_“èÀþâ/þbø뺶¶Ÿüä'á½ï}oò~N1a|>Ön‹ ´N&i¬tTP:aÕÜIñlŸæGGäcUc—ø'Ú¾Žàg‘ʪC‹Néˆ÷…eƒKÐJ©·tN- F)òThѬzƧ¼T™¼ˆA·Q4î¡v XÊ|ä#ðµ¯} Þýîw’H±¿ç=ƒ'¼¥.ÉèMÅôš×¼&9Ï)£DÂ2ðÀ:áótµ¤$ýÙX^!èâ³QÄÏ,y"/ˆ¥Icê·c9ž‡E¥|.îå Œ·˜2² ö‰{rŸß&Ä m‘~Ì!°ŠR˜¾3rî¨+ì”S qƒ=ο^¶ –NJBi¥T“áÿ<@ðAU~.J(eáeáÁÌ ¿›l\&CðB0Àë„òêkzƒé‘#G`ccc0±”† úK¿ôKIý°¤P͙՟`Øüeá‘§âl°4¦Û·òRÔ¶E`râK „–)¥ÊXðe¡Í éÞ#†{À(š}UI)*ÀïfòRëR'¶4ÉãR±(Mu Vgä8(Xš?!GŒ*3”¦!£Í'™ŠÉ"0 Ú+‹øzdÖø%3–2¤d% ¥5<ôXÍ(Ÿ›W¬;‚ËÖÖhaŽ QDÃû±ÍÖò~2)%ÊÏXG¬7ö—n?Ë*%U\<òÀ¹áÒGcaj§HL¥ù íåŠHªâÈÿÕ#õMê@Ü_F&‘>=#„q‡¬Žž‡¬&~'íb3áwuW6ÊÐ÷Æ{”÷99+^t`NQž‹2!d¸Hl«À•ø¼½Óª¶™«ÉÖ‚mEF¬ÿŽŽ X言Ì¥Œ2Ž x/J?ø¬Ó§OÃÉ“'ayy9¹?Yc³D2¦'Rå"ÃÇ{\tû”l ‚–‰Ä×a]ŠÚÆPe‰Ï’}†eb(i1Ë:°D…íÌS%2èñýØ6$²Eg°;³²›ã•WŠ’¤t©ÿ<â7‰0Ï–EžÕ«$yy …¯štÍ—q®¿È f¬Z6æ–dÊÔ*øR;Ðà[ÌtY%„Ìë7ó7á·û·ágög“U62IIÌœ‘á!Ãe2)”h$³CU~×¥dœÈ4ñ÷4÷od¢'NœH®a{Þ‹ Vgî $È(ÑBo+¶O:I$ƒX}–màÿR*É#ô D¦ŒuåÏY†„Ì_w†Àß°ŽÜxÛŽß±ÍØØ6/,ïÇE‡^ö ‚:^‡ ‚*@Iø;Jo(Åé@ŠÏÃ~‘ï†Õ–LX¬JmòœN² ½¬•ÆPÓjª3ü<ŒŽSÐg ÌdÚ›²÷Õ@q€›…Gv] æå¨¥š…ÎqÃ\ìËlÜ‹ ¾Ô#Zhª¹2K$ÉÙnÌA AN^Ã’þGF†Ò 2KüŒ ™-Kl+Ò™,2Gdúø_gnøLü™$>ûܹsɳؓnuuuWÝð¹È˜ÙŽ%Ÿ‰ íEº 2_–Æta ì ´‡aý YÊ’6ðZ¼û ¯ãöâ;Àzòûàúáó°ÎÒ–„fRW²}޽±?Ò¯3¾Wv«ÇÏX–Ú¹ V%â8ÂúI›ƒ)£²Î&M!J„ÉÉFY5çxÚˆíET0‘ÀÂ[‹¦¾pß_Ó‹-ºÊÈ1µb¸–>ž]À—º•qn[¬Ö\˜Ûi’Ì™2Ö¬Uµ iÛ@f… ö¶ÛnK$0f^y„Ï@Æÿ³ ý̬Ùy%/.„%+¬‡®ÃßQRÄr$1€ìR3Èæ1^VvÄ`Æ÷c_ 4ejK˜Xo.ß$¹áoi]VÝŠFšÍNÞõÏz7ø®q‘Áï€Ç?6ØØ7¸8`[!.8 ^¸?«ŒŸS…ù/±ë…ÔgÞ´ù]’«ÓúÒèb÷^¡"/µÍÈö¼{e¡2f.RU‡+oVá*^HãÁó¨^Ê1dbølv˜@@ÁzHPäkÒ82UÜ«4d´WŠ?> AN·ká}ì<¡«up•¶œ¬¶ ¨²ƒƒ$þ^Ħ¶`û¤2ß}Z_É1ƒ‹&}Ÿ[CÉbÁ¼yó¿oX¼‡Y|‚< ›By¶û¨ ¿kÏ¢²u€çV 6¬(Ó_ê|©Œgtm !p±»33–¦$•ÂÀ¶”"«–vð?'{»é^} (’áëLËd0A~FPE‰@‚"¶ %,+ÍFƒ÷HpÏ"“ƒ†¥t qÔŠjq~´-ÍÑÐh”½<ÃùÃ+ÂïT›×”ëB¾‰iþ FÒ×Ñ‚×V6N6x€Ÿ-ÑOÇ›ØdâÐ6ƒÒîÙW֚ݨ³¤üMw[ç°(énÛ&•ª ¨øyH„QC›>O=l/–õ{¿÷{Fz´7-¥åÿš„)fä¢R ‰)gÉï6J\;“8¸ûíÚô;ŸøåÖ{Þú+»¢€!ɼٖ„Œ_ª8°¬$°±ÃÞÏÎø|övCÂkÐ~„ž‡&‰†Ã/áõz @$:|†ÜO…R–eòÒc—n<¯!YÚ~ªYI ›”o éËâóN¯†ß1%¡œíÄ¡|¥»ëÇ’xÊaMˆMiiï’±=B_|ð3ž:’øƒÿò§o+Å8Ø9BiœG 1E à «ºë6ª'ÑU%!–þPbbÐ7ô"°ð^) †Txðæc6L¶%<Ïv¯4ÛÓ´ÀŠCrìI‘ ôœY)¿âJ'~ÙåW:O<óH¢Ž~öÙgÝË.»,ºòÊ+C ÿûO?|wûÞ`ÒúþÓ7ýsõœûz”`LÕµ|íOz®tZ_}ø¿hjñÏk*±¥º´,6•¥ª´O­–9‰§׆í’fz//©¾z'…Dò[?v9@—WÊÇßþ«ðN÷ÝcÍb7¸Ž‘žój˜¶ó*äAæhPÚv äØIà™¨îoþ0 #@x÷í;iG>þ…;_¢{zvhÊ\Fê~ÞL‹ãQÞ3%&U×q.+,£OåljcqFN®øÜºrÿÕîþË^ÖJë–ðä8©h€Û§¨ä<;MólŸå:á}¥0ÉJb{„(»íªb ItmµÒïüÛ ¿åþ[ø­D:C¦ðvCêŠyÐ"zᱪïË»þÞ(9!½þ•7¸¯y]ôøÓß [oøoZKW½Ú{õ5×90î܉ÏmÉðI² éú¤däÁ(#pKŽL•ieyô_&.eϺ¼rztm’E›²qKåëNkSüìãƒ<±~û|—êãP* Wõë®A‚ãAmteÁÍÒÞ% b{ÌÐpuÝQ ¡½Ã>2Æ nRŒàµiX.’Î LlƒÊ±=%LûMµ¾ÿÌÃý'žy$ þßI-·%!P0Ñçy¸OJ3ÒSDªáú0AèVÊ=F"‰hEÜ#%"™-á®K\ê¾<‹Õ=¸§amÒ69WŒ©Ï6 Ú…C 6ã泊ÁçÏÓ‹ÕStÔ9|agÅs‘á N~ŒÛ¦ ‡Ïº^8^ëQ¹8À6†SÙ÷ÀîêŒâ&Fb%~”ž™ìF7€NÈ}üz0²/ì*{FÔ”X”ij©Ê9rÓñð |º÷ÆkßÔÞû\4þä‹™I#®¹â•»ìW\~å.EÔó?zvð²Ë¯pÃoŸïˆÕ¼` ·½#Û)8ï}_³í€AEƱó\yL3äEbÜœJ#Ò¤=IŽV–#{•S ¬@بt)rÛd"~gkb,ëRg“ììØ¹oEÄ#· ¼y˺¯v©]õzŽ Þ²O+ûõ8q|¯:oí©ëo¨ÿÅûKÞ‘ú­‡üXð;.Çãs{Aèyü"ˆqw…ê ;E¿µ©Ãøšîݤk»øb° ùãõ~ôÛQñâ^Rß÷iTâ=Å¿øâËsÕoœö»¥Ý·L÷u<ÛTæQQŸd²«s­ŒU 3,kK”ÝÏ“è¦b¥SÀ0óßÞ¿së¿Wöþò`sÅóÒVì Óüîö}ñß|ë/‡LÍû‰ã½ï=¹ëöø¿ï§ok“ZjU•£ZuÁòG2Í„™¢ ì3¾ À„ú+YÅÚÔÍj1ðK;$ýdÑ ¥M-0‡å‰EYÉ"H¶)G¥·çÚ”26·Õ"k€ ­¬wÜ JÚmX<ã܈zô‚|“HO€æ¬C(lëÑ‘ߥvosÙêßá«“ÁX—¡›6Å+%U*@Œßè¼iðÑÿñS»Ï˜©¨¦‘>¢ö6•(Ƕ©^âX¤‹b®Pɱj¨ôÓVÇ õyMHž]¹øUŸk<‡íÝ$R©tŠ…ä >ߺ¬ À?F «gBÑn (…Úêz@@Ö§g™(‚âtljxùÛT–+Eu:ΉR  áù¬=c8ç‘Èê)QÆ¡¾ßTÌ_4®~»óT Ï8TàݵÉ.SÇƶi´)ƒÊð‚&G¼Ñín¤0×ÉyÜÏIÙ‘I›•£ò¼@ÇKTV—€°£`¾\¯ˆ4l /‰%€ à @j·-’²†¬Å‹‰µç¡‹_Øá nœ¡FkAºëq­¨GÝ”¶'zn’×´²±BrÄó=(k²A„†ü."K¡ƒ,£>©Š¤Š©È{K¢…«{CQV¬o¦ÕÊYª3.+†|»ŽmÓb´)M¥¸qëÇú 4‡t[9¹mÿôaä¼ÁïyÒI d¬u"É)€%5d&œ€„’žhGäZÄt j…Àb;Eí†*ÂûEñs{Â'2©ã„ËèߡϱÁDW˜PŒ›¥ÆÒUÈä9Éa~ÚtM¬8¯~c­ìÙ×øåóÆÍYQm*ag™DUU¤œmš¸k”sĶiï´iPŸxAW[¿Ê}óθnž{W"¨à^¯‚ø@1È€W”–3>?$÷î€ÊÙ,QÎŒËíØ6-|›ò€´1*-K bïxÇ;xŸ@ïK_úÒ–íÂÒý‡v±¾ê»©Ke¸T­X'Z5Ó~ ¾P+``Y©¾Ð7Òòž!và}H>ÅÛK+÷ ±ÁmÚ YŠÖ«ŒÈeò_7K"±mZŒ6å7•–š¿¸é8Vó÷¬åf¥ûî-’:ªÿj‹ú±¿d%8 …o¬©~[QýxN0 Cf¡€ì„´Ê¶ C`Y©v5õROÜ·Q²œ1;‹ØL+¦<Œ$P–Ú6-F›LDû=5¦gªæšåtò¾ Õ‡ì@TÛ»++‰¡ÖÕQTUŒÝgq…²©ã‰4vQÙ4äïôÜ6ݻƿéŠehÏæ2£,°U׃Qоl—hÓ.°I;—R·]íTßOªßÑð:‹ÉУUïZ]ÔÔGÓ `««‚§Y–mÓ´‰¨ 3öj$)"2ð‚%’~â!›ŸèÐ×á9æ†g‘<…¤>¤ù ñµHãSG4ÄÞ©1•¹•Ñ%c毆6á¹õ‚çŽÉï¦úaÝÕo:NÕ%Í^Vâ%.q%´ßOˆÕÂʉsçĪ Ïççà=´²ÑA%×½lÉ€4$µÅ3΋Ua‡ê—¦Àö¬Â¸§·‰W’œ‰:úÚ¹#im‚tÇŠ>?sÚ*EìSµz=­­f—Á’¥†ÅðÔ¿·'Ñ,T¤¶Ît‰$iöÏ`ðaFʼÀ=9ÿ8H¯A\|†ˆÚðö4¾åO Ó€žÊE•Þ*j†ðã°{}zò¸ š&ŽÏñ?æ×ñÇLþG æÎCÛµkœ*ߢŽHV`Ü ê”xñx®/DɈ:O¢7{“=c´úÐ7%ã ñÕïÝ@í äÏZº¬R¡’\µÊ6!­8§Á|ÒpnC€¬\eD)RgŸýÔÃæ`´o5ùïQoÂNVjê{²çCæÀ²´PÔ!/Þ24hRðÖ»@ãÔ…Q˜7&‹ôQ•â‰PÚw$_èh|â,.œÅ¼K|ŒïïoË{Ÿ!>‡ADçÑÌ·ˆ§ÌLÍ‘&=uˆÇmÓsÖou@øBà}¤Ä{Î °Ž„DêLûÅLvj°‰ªg°åsŸ'FÒËIpAÁÁÔKYe¸B?ÂG–jWG4¸Îˆf°2CnÚ9 ©Ñåš®‡: ›%T0U£»åAg±`!©Gó%,qLî­:S LÓã´°½Aƒ‚þÆóØ4ïCZ<ófo²%⇹‹IƒÄ3ä[Ä÷NÓqÈðŒM?mÁûNgð&®·£›‹Ð_]Êà¥Y¿OUKCUWJÓžš]Ã’£é|0£ ÚИ)×’¥†ÙŠaA† ŒàÖ•¡ |( » ˜=ç9KlGÄÛb>§K9´pS0\ÃA:ê $ëxZHݓƊmyZÝb)í’é'¢EC¦MOF{PÑwÜ dôÉÌÊ_š ˆ‰ Œ½Hõù(êKÕáQ…;BŸŠ^xǨcñœ´c… Mb8u¶£58÷­®6`é ŒÄdöž9™Ê+ZÝ|¡ë]ÀkçÖ´s€ά3ß2 "wÛ(²A\ÇÊ•ÂÉÜQyéjøٷ˰;Od¨mø(µñv/¶‰Ýϵ²’òžyæøá¨®Yz vçbòÂ2ªºY¾'lÛøYÚö:Žk̺+Ú-íEøÿ¨Ð,ùšzo×ï°;i¨‰¿¸ÚÂ^fîšÔYu£ÿÇÅ9Éߘ§³÷w_Ó„õ¤Ú“$E“¶)ÍT‰öÝu×]MÙ{ï{ß»‘`œ³¦«KF– / møSï%É>¬ÞÉD¹ Ô„OVŠU7hPO¬Öd&ß(ãVF±"9%ŒŸµ'I¤¬çëeÒÆ,5 £ ƒ ÈfܦÌq‰Z†YzÖj’—å}åÉ…š£uTR'ÒË´!Wªõ¾¼™õ1¥Iƒ¤¶`wÈ¢R„+m 꺖P;MBˆ|n—P›œ’d?g<ÈûÝ´)oqåÁŒí»4‡­M¹ZßM¥ßöÛ®ÝÓä@=AROFÅ´bÅ?Ür ¥‘^ƒm$¬>Š €>7±øl¤ÚN$YdÆê¸šïRlSδ0„ñü]Lñ­½=®hWÓÚ”U‡¶â–,ˆímjÕbŠ)ÑÇefÞ„R@X00ÌVÿh7Ú¤ÈçYÔ§@¶Qð1C”ŽR’b ¢K¿s¢ÆKªM°ãÆÌÖ…ìè!ŒòSu)cr“Úd$rÁwí·´_ °™‹æ–ÌD†M€É÷‘Õ%‰q|¼Ò{“*–%#¨”vl)’Ôq¯¶‰ÂŒÍDe=«6M™bžsEœ,Í„¼2üï2õâ6i5æ«yJ6¦Þ|€k '’:ÎЪ8qߟð±!Ô¡¡Ž ¼$õä­œ[t]è%×&¨a#3mŒoR›²êYG°€€%Qœ{4—ÀÒW'K¤®ó Ý}}&m" îˆþõHCµø™†Îÿ-d-ÍöÙ.¨U;£Vd+M«:vÀNŠŽ* œ¨ ãÁ^™É¹ž Õœ)X.¦ïXÏ(ëŒö q9!3ié¦-ÔX»ËTÖö¥Ò&²tOÅr¤¥Fµäê¼Û”1žQÝé7É.&41mÅl– bÄj0”Ü,¦TÌLƒvÛBBísQ½L&_XÖ…ñ}Nž&DÚ畵}©¶‰$1O§ÂÙªËdŸU›Rƒ·<À’¥úï!4ì6Q ##¹%K{†š:®ÄìÛ™-Y›X}T[Äø'zbó™$Z‡%K ¥>43D^l_±E¥ZrwM¡N¡}5–ö á¸vm7X² V£$öÀíonšÄãØ•¡¥=JØ`å–,ˆ]ÒáÀvƒ¥½FVEnÉ‚ØV†èÜѰ:ÅPO>1K–,Y² ¶×A¬€R˜k_¥½F´• ²=aɆªWêñ YA@§¦çæG#gÏÌðîÛG©8è¼§~[+𬢮zÖ¶v F“dÚ¹¾:·©>£WfF›yCꋾéÞ”úpdIX§”ëySï€ë-~¸+iØê/·@rïÞèý¥Ý{$ë:ßñÍÒ¼^kÏ.’õc£Í}—ÕÈm(ˆY͇±Å¤nó&ÅNk a@WµbJ–ZŤ΀!+¯ú½­o˜Åþð!#ž1»¾‰9ªs¾zÖª`²>1Wùôƃ0Äçž|fuÁà8 îOR§#Xj§ØåÏíÏËü‰þÁ{UFO=%¥N~ÚsÔ}-H‰w«Ï®aqpŽêÊ€ØæwGÏäþÎôv¥kqqˆ®s`d“õéš®º¦Žˆd§‹±t‰U'î}P„ƒ:ì41Å|SÚ£-eÓÍ3€!sÛGÏâpP>1`]º,Dô¼Ã¢~üÌ¢›ÒCQ'‡Ú†@Ÿ¤’1‰“Úßâç³öÄÊgÒ±&$Q–h»ôÌT)DÔËõê¦Ho©»»ÚùCt> çUǵ0J;ãd¼_S±åI~‘C|ÆÉ”ºlÉÏê¹aÆsùt¨Œ.IzÛšúkLÕHõN K7ÈQ º0RIÊþ;\ǤPj“ÂhÞoZ6hAl‘©O̧1±Ó(¿”‘ìËÄÅË ¶Çô ̵ìv…Äb"Ò[V%Šçož‰jÅCê›,Y[€X‡@}‹7@€.b Ҷѧgà£CªOYÿˆ®?G)¿ ܤ#Õ;¦¾ÁßX¥Ê|{T÷Ö&$ÖMJT‘ÖÖ̓˜Ô¯±¨X´_Rl‰žÝ²ìÆ’±úWbÛ˜¦£Y7lã32\T+J•o„î`¶ç2jÁüRZÖ$*iÒ¤#d €­(³î ©™Áƒ%§C¹Q–t“Þ$8Šë#)áA€ÔQÐ`^øtmK²Ž¡O×Я’tèÁ¸ú°CØJ±ùé€uF´/ EbÂ%Ǭ% b–JPŽÆdŸÅ ¯BG0±69}œÐVåe@ erØ>gyÊ Š™Ù¦HEž¸.¿M¢õt ª@®¸@$A ËAg”¤–é+ÆÀ«íʹÅHýÄ`ˆ’CŸ˜b‹V÷±ŒäÍh’fº0òŒãgñ~;¶óHÕƒ®,—mfƒ­Å'µVK¨Ê¢ 늺s"ÌDuˆe %õÒAÕuT~ž4Æ6«ÓǶëŽ,¡yZ_zÚ€ûÍ×Ë×ív$ý…ÔW!Œ'ÉÀHͧ”?\”D“$z?Շߩ§Õ-ÈÆ2×çð”î±”ïî½[Ñ6]+Õžú~ÀH]w]w‘îÇ1è¨ïìÑë«kÖ$¸à¢Fýv4¥nGhܧïè5P×§ÍkV)Ï(TC¹ÉØ1]S¶t};çU´Ôó6-ˆÍÈVxVÇy–NHJKHý~¦b”s Õ‡7ÔõLb|±X ˆi2C }oÓ ãY’Lzš'Þ:1Ï®V®ãŽ&‘`Æ>Œ’1ö@ÛÀ›#%ʺó3û,½ŽÉ×ÑÚìj×9Lݧzà5Ûtœr}¨©)^„Ôç²n-!¥ùòy²¿È5Ψ/Öo“lb11(™: çUV“¼ªpõyšó§E }U—^Ä{9*®íÃHÕŽÀeŽúýZXöÔO;)Þ]9Úg_=#VÏ(º¯h]ôñ˜¼GuMhhc©z¨kŽkå_ >ßKó'RK글jE2uÓΟŸ•F¶/K–ö¡L÷OY;G†Zì%É|‹¨ää=†ç]ägŠïBÆsÆÎSù§ä3²®/SMõx‘ΟÃÃpoézî/­Ú´6±éQŸ¤¯5u'à:CÒÙ€j{Ÿ*7å«%K3%òL çhs¡ÆG¤–ô¡| ] NÔí ôòÜ’Q—D*ç;†rj«G²êÄéHagÀ†º\´“©3õª"Ubd߈¥=H!ì¨?§¹¹ØÕ$¶¶5&Ÿ„6CŒo)@»Öz‰öLb@kdÛê“Jp›~ßRß’}Û•Òˆýšô¼v{­Ö]+‰Í Àpu7$j‡ 6 Ž¥½IL?Cƒ#¯NÞ:“l2פTˆ{Ê̽ZÒ&©z­Ð³zÓ:ëa%±9Ha‰úNz!ZIÌ’¥=CbæçªJäÝ8FÂu½GRLbóCæŸò¼Vºÿ#¨´„´MÒ#‡ŒC†Ål¡º°:U=sŸ8Ž’ûG“ƪÔÃJb £6˜ã×ÍS傃ʺ¬ZÚSDQLà 9~ð÷ÓÒÛ Ýò1h°ø~D§Ä÷Cxd*´©Z~?§•¹ë»V‡ZŽå”¹,Ÿ™Ræ¤í>TC»Ïä´û”¡'rê —©×ÉÔî%í{V»ëèûó5ôýé”ï­)kBÓó Œ<íç¾ÓÁÔƒQŒKŸ>'÷ꛇiƒ/kRx¿".’qÕav–uíêºé‘_ŠÖ%ÌÌ®è§*õÐû1²’ØüA£iõñÔÄ<ÄÂO½ÿñ-fx-…úÊ?†®ù'™yÀΆé± öi`‚ˆm×Ñ&IO0ùý”¡.o 2=Q&2¿ÆÒÓê 'E µóL íîŠïK0Ь_´ÝgäĦ2õ:€¨Ã¡uÚ]é{Æ#ø†~èsÐ\*s Ê›ÑʹO /YY©ß8ŸT˜áÖ>+ÿîZ=,"O¬–¦ …{ï8(½iùìÃhRgx襩ã–+µ ,Ê:›”u²©êz6°ŠÚîʪB£Uè®m Ø÷´ì-¼úíü r‰%Rœ¹JþÕ©Kê·5Ã3·ÄfëCUt¡/q,D܈ÌÒÓ®„²¤›†zœm³ V/ˆáàhbtŒ€WU딬Ií(K’¹`q<6V«8÷Þ±³á“JÖÎfSõ]g"¬i«{Ñ&±VP²ÔÁ¢¬*ôDé¦nÛ]ÀÊó -"au&T…æI–Æm ¬W¿¹šô5 µx؉,yG¨^£±‰:Æ“;r‚RHÇm’ò<ȉàOÒT× ÞëÂ(ÄFœÇüÇxIRkåíÓæ¬+¤EV“ÆÓ´…Y«™Ð©ãúÃm v„Vb­œM»E«²ZФ%ØQ_8BÝU€„`Ôæ¯¾'Àĕθn9ÆèDuªþ«ú$q=¨u°H¬, 69zÔ XÁ„Û¦ X¥$ËpÚ˜Ûœ#ûÓÉyÎ-ƒ‡i±å ‰'ÑrPÚ/%`p­Z-Ù¤#‡ŽSª«•ç:¯ €ØâyKuœú%:S€`¤©«-ˆí}êÊÿS6üí(4q8¤L(Voá4Vl"Æ]d$‘¥u.åÓr6™…W褀5ím úb¡1.öz 8‚õë¹Râq,ÕdIqmƒ´&¿w¨^‡SøADY«ì•=:hàE°ÛÛ1‹ÉD³#®ç~ØgÕ‰‹Oƒ:w´ˆÑVÝ´[u/¾âîSŽ¡Y¶'6zS©O…†vÓÚ%ÃAUðŽœ°æ¶Q¼ P›@²¬déàu\Ïy©Ýœ I+ªðØ>¡µ”2È…JÐÛ ¯Äsê÷“Z};ðbñ™ñ(ÙÞb Rô_JtÓŽ]iAlNÔ§°Ò„ÊÐäçiGê˜RXt³T CêÚ]ñYîIq´#'åëÞ{Lj'¤Aº©ÝCª‡ƒšÄ;²*`Mb»Ëj“*42lc(»AÞ§w¿>Ç)æ’´ßÖTvr!WÊÝœlT]’â:r®7d¢=(0‡XqZx.vh1¹j˜_’иº;ªcÛ°Q•ylA¬á„yÄ”$c^±†l|–EçaGIʼ÷Žl¸TžÛZê OÓçë ‹•ŸTEy62„ã¤Ü¨ØînAu\ÓÃAEU·1dHXݪP“£G%U( oÞjD’öY:âEUî;Ji|^ýŽwQ V»UîêLSª8qk–ïBÄ¢ƒ4Àzr°ßyìÿ¾Ê¡=lðäß?ÿß<,m­{ï8ÐãóOE†MÚ~ãî=ñÙ+Â/þéCÛä¤â<þŸ_lé .SÝÿT`¼Éûáþâ§¿±?Âó´NÞ߯ηÔù¾¸?Ù(NêìãøÞ;›¡”$S%¬$ Ä;ïF=ÓKTGw$ê£-*³^ž:€a´uýà¦=$KêèrF™Ž=Ëi,Y›)b9ï®o«ßú¤n\‚„¨ÂÕ³šô½:™G™ø…TfO1BF:ó¨"-Á¸ÃGK¨*])yöœI—]’åSßH¬-ŸóØ'®j¡]”i£6NvÔwW1nÜt½Š ^µ×70`=ûÀ彇þÃÕêããC ëâg¯Ð®¥l >K/´ð(€âS¸‘4Þ‰Tò8/FÀšªô8g-pè^g罤‡â"hNb“!ûe;ý0ÊãE›`“ ½7ÁP’¬kCÖ˜ëBÃ’ÐZ² ¶×,Y‰“ôÅ*E½‰¹ôfTˆÀd€eŠRŽŽ,ÅYàM ØÓVØÒC1Ô$LiLfÇ‘±À¤$A´D»v©Bé_•€‚4|h¨N.=·%Rep0V C°XRÒH¢"TÖ§¾Gub¨˜¿£x–皣áÅÆÙÕ÷´ýA‰— ò¶Áv·Ì‹Í©f•¢´ã}¸÷¨MÏÐ÷eçíŒí“lcHsê|{ZsÈWëKW×vPÓêÛ¡œä¹$dš—¡áÚ®ßÖ´ ]1¦d—c9)X–Å;Åöôuõ>ï1£rtOÄfpAŒ»Q¿ŽÉI‹ûIRútP4þ¤±é¯«Õç±Í‚3¤>Ô$™vó+3J9ÐþZ9»uMöt]û®‚W½ë…6Çy¤gžgïJ®ÃÃý«PZi#uéù]b¶ºk>×oSo÷HE8F®ÐIxC›©Ø|ª3§ºiïHž†Ò"~ÞÙûs€S_T•B]ÈH›‘E´÷¨C`3³íßDÊÙnë‰~•.å}’Î’ Ò ¸]˜|ûFá$¡‘¶±Nêd¨ú;°;ZFD‹Ÿ¬~Å6¬gK)#Ù¨/ Zµ 6?ê@óR§w…$ÔåÈ4˜½ 3 çyè1]2––îÑ€'H§Ì(’ 0våÅÕ¡PS™¦`à|Ÿ¾×'-ž]¸Q«6â3{$5µ ؤ>‘+o/gâúb<µXøÔ×›yì~VÇ)ëe¬îCÍ惖҆VÙzÀØ„9ÒûFÛÚ Id]“êÆ˜æŸ}!ºøÙ+œD ¤MÚ÷¾Ø* ‰8 ]ÅÙl_ví?úČЩ‚ûÄuÈÉ ›µÚ§ûѦ֕v.yZ¿J)·#ÎË~ègHM¿k"Vé L³ŸTŽŒÏûad÷ Ôo-hªn†wRÞCTò9¡~/oP6€fŸ~ŒçÔqCÅv@Š <‚Qø»5ñî“E+ŽaSýë’šj3{&#€Q˜0¿hŸÑäÈ[«—ÚÌÄH¬’é›Ï”2 ûHÛ±ƒ7:»‚9Eè@š¶™5ÓÙD§ÀGêï[Y ‰“ƒêLó‹ú® {i†Išc[3hÿÒ?ö®ÿµg$8ðj¹«I;¡Pá-S~´€ŠTº$%T%ÁŸ½¢­ÚjQÆ{(¹Ÿ<@YÚn‰´ &žpµ7F¡z©)ñº«®±m¨Lª#¿®Èø¢x¼ò»mữb› Qeubâ@¤•‘D¡É:W¢Øžà-‘A~˜{6“£ÑÒ*pH©¼;îx5tSD๊61R§æXËN·kV8# h@”óæˆ G…*‘Á$ÖÀ¡J†a2‚¾ÂÈs=­¼,†RÐÙ$xáñËúôLaåäH¤Ã¨:P+°ð Ãv£Î÷¥T(õ°Ýêš]ïY¹“8Q8äxŒ˜‚bÿš7'ö«ˆ&7ªÏ–o‚ƒGÔ=×JéæUïz¡«€öñÌ“ ˆž1`õEv¿V¾{é=;ªH⾿¢½ –°¤Dµ¬ê°ÏЛU+å}w„*Œ·6´aÿoš¹Ä\M½5¦â2œJ?äT àõiŒ¦K5KœãÙž;6¥MqawÔ>ŒÒ²l“¦@÷ŒXlQûPb {Ju$=KÈTµ£©O‹&å´ V/EM«`B­‚ž‰e+3è«4Ø31.Pß"Î&hÿaG‘A†ª( $ÛÌkw žaŠÎÎñæ†zÌ@t‡<êGÝ~ÔÍ‘n†êÊ”h* 2õ ‰º'ö§¥I»¢ŠˆÅÀ!˜n*—UMŒ¶9Û¦±º5*:q W&Èt`w޼¬sºä±*ãênꆲúúóHúhÎ0£³Kƒú¯1L4€‘ ØølC„žŽV/·*/µ V‘Wb7_vˆ)8Y’Æ€•C/&ÇŽvoL6Q²#ÄWù"Ct n¦.‘Fŧ½ÜŽ< vxK€XDúw á¿D™g¨IÄ”¸žø…pcõQɬZ>­XvZ6o¡ªk§¬®—ж@ìýâ¾¢¨"IREõn£º?>Ë@Ãlc™JÖà€ÉŒ˜sïf‰:lLROÓ3H6 ¿™Ú˜¦Ö+\,/F}» LwqXªÄx?X“$±{`äRËûl–R¤)Y0óHÛ¬z Æ]©ÙÑC®òñþ£ä•ÂÈ&×2EÔ¦½&G55U(ó ]¥Ç›q…èVì•F‘Øíl Û}îù‡/ë'xy[]LýÕ¥=Qm.&À6¶Õ5Ñc`XÍãŠýÚ·“¨ÆHJKê@¢}òDti¿YO,Ž•'­ÔÏAŠJ^ëºös݇ûWõ”´Šuê©g†zÛ†҃‘·^DõáT/ÉL½´7©Gõ? ãÙêÛ®4§2æx\kñ0-]‚t™í‚Ú¨Õ4u"1ŽXWKAù(ålGY3IB-µ’²êwa´_«hÝuw~.sX‡Ÿ=ùº30²µyy’0µ7oÏ[¨¬Cõuˆ‘sލJF´'*±Uàyº†7F÷ˆ±:|Nì‰6´sè[¨žÔ΀Ê8JàÎ^œ‰7™(;äº €*KÀCX—ÎùTo°%ú=m ek{bŠó,ñº0îyª«€»5DÆ_K‘n°±-Y«•œ†$ÂÔÕ…q/±*Ì£]°Xzè Ɔbãfg€é ¹ »£³÷žúZ"epä0-"dØ)ˆ­Üqº¸ã@HÌqè͇ê ±¥©ßèÚ7ˆ÷€b0JÉÒp¿GRS¢¶‘ÑDßHòGjÉážÅ@“Ú:°³7jU´?ŸÔŸGRc;ûÇXuɵÏFÀ[fƒü¤‹¤vÕô<42‡,+œ-Y›ØVØí|P:Ã0”Ï¥ÛQ:ÄHP,‚}’÷kè6L›¥oZ>ÀLý&Um!3¶”°SHîj7o NÑÿ‡ãd2W,<©ï!âXs‚°ž2À¯Q˜¡–v>"Ê<¦ö“ŽúEµ#k@a‰¶!e+þ~TÔ߃Q@a¼FýöT#×Ã(oÏ ÎÚ _GÎ5ŸŽÕ†Í¹±Ø–,ˆ-E¸ kJ¸)!‰…“2 ó_ ÈøïBJ`v ÷7]T×ÇŠ ;BUÈâÁÈÙåŠkd-Oc¿^ÀC/ÎXá»)*˃´~¾vCŸ&x²M>õþD…¸®õy ÁãÊë^t`ÜÙ¤kR §¨ž}Ï{Çï-Ù[´ØOuv¤ÙÇÏÜ÷«–s®©Ï–ëX² V'ˆÁ Ò«”¤„!«ÉO°NK÷ÐkQx(Ù?I0÷¤¥@€SHÌɃ‘ ½«Iuò³Ìˆ€«=Ñ®ÔU>ÇJD5š¸ôìv¯t0>¢çe:ú ŠŽ6¢´¡:"Õf—œ7º0:$©Žyt=ÿðå1˜pÏîhÜ£²xOÓ0zùŸ½ÂÙ¾ûeî?>³Oõcòœè?îûP$X5¹óOºA¾Ô˜³dÉ‚Xý †Ì°IÞ‰rƒ¨Î<&¬ÂQÊa´—Ë+˜«TÃ0JœÉ)\ä¦N>ï õÞ@]ßC JÛel·’“ Ÿ$%±dç^uý‹þWÿöÁT»§•t\€µ"´–®RÜ‘fvR¹°ÄÃ’ E:‰i“*K–1doe), ½dK£ÅDÒží!òvT´É†Y¸øÙa?ï’4q#ø½w$I2Q;EžŽÞ„ä{°{ƒ|™1w¸/³ Y b–Š:u\ÿáo¸ ”ÄÚÄĪ2*i5|M ˆ…*ÐÕÎÇ­ûÙÏ<Ùcø^ÞTÉ€È(Ø>—²ÛD®¸An8¥¬Å¯þíð©–3´»ã.ú>Œ< ÑöÕ¡=o,±¤s!¥­ä”Œ«W¶~èܳþÝu­L °œÔ…K}—©2y J‰Ëë¡;¾5äÄMp0Ägâ3 Hbô^óß?‡}Ý1`7éCÅÔ6–,ˆYZ"»AÆxçòWü£S°z%Û]°êPÇm”,_kwÀ +D×X­q̪dS ÛySš›†Éª-•¢L?UD5f ލnèp?ŒìT+C-•¤(aóa©åE„o÷²«^ê½öØsŒ‡)Jcš›õµ{²6ÕqºÃƒn?êBF°ã€µ®ÖJ ulYéö-¼Ðù…7®óØ µ>l‚†R¥Ó°=¢Ä,UV)n«A¨ã´ú¼jð8égšý™°v`þÉ#§ö«°Ìv‘â¡’4FñþZµ¤ #w}öP*ø ï{&ºâÕ/u¦ XÝš%¬"ö£Ië4”®Q`eö=Öý<8¢úÏÜæL@zh9R#öf±0³dAl–@¶îöê8v2ŠpßÌÏÅ&›"¹0 ÐhFÓÔRy«ü¼¼_C¦)Œ³ÎÂxVfÇðYS(ÊâϱébYF6§:tJV^ε:Ò‰”¬4g°:%«l6ï*!Èü úÞÝ s5Üg’4½6«I#@j@ßy¯{Ð&ïÚÚÂ,ˆíE Cð:¥ÜûÔWßW´ÉqfVuLÏ#©l@LóìŒÕRñ§ÞŸì7b7mfN îü9(â6¿“åx,Ó²ž°± `•j¨½,`™¤“ kkBÀÊR…Ö‚¬lßy  Á”bêÔ¢ù»®IeœÃ®mÕˆÄö$Ñ Ž#}·Ðùºq f»MŒu{Jj©RA_iã°¼Ÿ£J0£_º žÓ¯ê·4À‚™f]íÎJZ·ý(°A=Ñ5ÊFtÉêûÂcN“¨»bá0+ã€ëÂÐ¥EWbk„% b{ À’œN¤fXçÁOQ="HÉÝ4EëNQ-U%Jù.¦©«¥t KK£2¦ÙP‡‡MÑ÷ƒ’€U‡³Im!ÈŠdÖ.0æf¶D)KÍÙ¾:ÎÑO1¯“d¸¨UÑ5-–,ˆ-:%.ÀRON*‡y©\éñÛkRµTÙ ¯ó`šº$Ë:¨Ð÷¹c¶KÍõ³Ð¼ Ä,U¦ ê&QË –*bG‘Ì£S°ª®òg XQIÀÒÛç¡W° G×ޑݼ#ËF×Ȭ*¶»B‹¤n«9aÙŸ±E—š4*É<êˆ21m¦Y`y%Ë$Yæy蕬Ôè€UÅ;rÚÑ55Œ¹´¾ÇEYÔ0i û׳\pödmb5Ka ])ˆyEPÞŽÂ(§Ö6TÛ´‹ 휬R aü6mG%kh?Ò“G̰²íîÞGÿ l?û_û?­+.ؽ÷Ñ{Õ÷[:ê;¼üŠˆøþ·àñ§Þ¨ïñÍË·FPÍ;ïë«ó|®,`%•Õçóô#`rç§çw©Ì²±#å˜Ã1ÒçJÚî@lÐo… ÌînAÌRéY7=Äœ%7ír˜<º”¾ô¦])1àÎ,Ó*_}öéyýªzTo}À6õ „íîà‡î>ñº¥w¢Ç_ë)Vú…¯ƒž½nøý™þ+GÀ}|àð– óŽ `”úÄ×Ô’:`¥:›Ð ²cWqçç~¡z¯Š ÏeœM¸¿ûbÁ×)© mj±>½+ bÄ–â&V Eèèg–I-‰v±PzIŒâßå–IG`…`æ”`šú*Ÿµb]\(èlr÷Ö§@n¶õ¾·ý¾¤2¿ðÖ%¸å†ðS¯¿n¹ñã‹™‹ÏÃWzþúkÛðIu(öñs?|}|ÕËjåyGÂ(f`" ‹:ÝÆÐÖÆ¦ŸUlwXgQ®Š«£MÂ@ÿÛ%$K~f#Õv´Ôfv¶ ¶˜D^‰MNˆ×!Ní(ÄtBfö%spI&RùŒìe /“Jò„PEUr)'FØ¢útòœM€ “y¾òªËá×ny üò;^×_{eúxPçðx÷Ûø¨úþg_ú>|äÓÃñ Î× Ñõ(¶•TÖ…lïÈ«‹VJtˆÎc{ƒª¶;¡–v Zî±®P:9’åØ"Ijšü×ÒüÉ:v\’1¤-bdØíèaRK­³ˆ`ÜK°´K91@fÖÛ%\ÊÇè„ÚF_åQK­S›:t>ÓáAKk‰äu÷mo…Û~þu™f¢ñŽWÃÝÿËÛàwßó†ñÙWîÿîàϽÒœMø>iyá B°½§°¢³Iê3€Š¹Çè7^\¬i€e\$‰vÄZßX`ÉJb5’ÍËq¤Ù&¥¶è“z1ÊQK&) „¾JÐW—&·Gß«Fˆg©$ À)í¡GÏsAÄjL“,€1Ø%àókïú±‰ß>ã7€ßúËà+ß{ƒû­G~¯«$²>ÙÉÚZ¨­%êk}Ô† ûî`LÙ£~¨Ù¤ÃêÍ Bq- ­ÀÅôL“êÑûX-Y² 6%Z„HÖd-…~ê’0`@"FäŒ`<ÚB‘ ¯]bÂ-bÂRõU8Ã0K”šãI&b‚ý"j)jg @ŸÁ’̘*ôï¿ó§CiãßýóëIª.ú'¯9ôÿ矀ö¿ÿ¶²gÙè:À¸Ï{B­;(»QF‘Z:,Sè †ƒ¢óç ¼’þ§ï=(ÙäˆXô¬Ñ» ÐŽQ`¦gµ €;Â.jÉÒí³]P¥åk2‰•~ FÆö}i{Ð< {Ä\¤+52‡ ÿË0òª 4IíŒÒX80nØ¿» ýŒl0ë¢>'Øuœ!¸1ŒlrxxKS…z †Xߟøõð±´{elûÙ! ¿£ÊÁ*ïž›ïü<ùÜ‹ `<~ÇÍÇÅ–—ê*ÓÌœfu ʪÔrŸ](Õ‘¢O—é8 ¤âÕ,³í‹m‡¾ÖÿǨÏâ}v4ÏÔ-ŽiïjK‡‰Ã*¹P¤å–,ˆMð&«Ð½¾GD¨ƒx•Ì6€14F¾ƒ4#<39÷4ã{˜x#;VZ}NÀhÏ—¼Dý ž±,ÔuîæƒÿÑyñ_áüòͯ‚¾w9³?Ðaã>÷X`ï¼áÀð÷/¾Û œÞýÖ%øàÏ_—jCûÜ}?H$2¤›~ìw¢¥«¿Ð±ï.cáÑ’÷½ls?§ÿ—aäà"ߣì»Aþ?&B±v¿#Ô¶Á¥ MY³´Èƒ÷y(ZZ`:ø¡»O«ã¥Ãw|奸™¾”F_þîÓÉ5ÿìÿá¥O|åbê5¿ùÑK7ú›/ýŸûHê³þ×ÿëÁ—°LuÜoßÀÂó3¶fKÖ;±>Š„JÎÒâJ‡‰ô´tµÙdüɯƉô„®öë¿ú¦ÄÞD¨NDIí_¨¤µÜe2qÛÏ]Ç‹®²cö,4YïD b‹IÜþfäP®í‰…–Â7û7:W¤ÚÁpóþòøÝ_,î­ˆ`vá·~>wßS‰úP'KÜw&AÔÒÂ’ÝìlAÌ’¥¹Qâ‰÷ o[J½àw=H`e½¨pŸ˜Ùƒ Ÿ)ë`iñˆÌ ¡í b‹LLŽg»aaÉÃ?èŒa¢¿è™ÄCN} ðÚ+%@’­mu1©cAÌ‚ØBÓ·¿yÍ®¤“pð&èTi ã ¢Ú/ÍVVhŒ\|>õÜ-7SK %…áø‰a¯¨1KyY/Å…¤Ä)ç§^wõðÜÇõ‡Ÿ}4ù„6­[È;ð\Y;úï¾ ¿óWß1žãµWðGkWY<âˆ2–,ˆ-<õ`<â„¥"òÒŸßýD≈@†{¿hÐË;èäÞâã÷'›q_î/ÓI¨õr],) ÷<:äÜeÉ‚Øb©|Ó~T9Ð~²eÛSÍ'T¢{<‚Öƒñ ðy%ajü=͵Þ`„¸!úƒ?w];mlãå]n;½Á@eÒ°‘ó½Y² ¶g€ # “Ç)1ΈÞÁý¶§ÈP Cºó3'ßóBJ1!PqDŽ?~ß É´}a8«úÑoŽIdÛϾh;¼yà… Ïs$;8qîÒïÃiÖ6?²€§dkŠJœ¾+úÆ¥°³_Ä×k¼zìÏý6|ë‘ßã8†»€ AÁ7&0¦_.ùŸ¿ÿ©D ‰Î#¨>”åqìÅ×-}Ü»ùc_jx—…°Jjmc»š»ô}4)lÿêîõPsÄ,aÜ8»ü8¶hìiã=§è€Qú–FG2xÅU_ƾ â+ ëÞÓo/ý\¼A¥2t£ç8Š:²´÷ì—Ch¶«6Žgv²x2}̤ˆb¬UQJÆcuZšÙÀÓSC,Ó*mE|o ðÚ³* îËÁc/J½)v¡‹êÂ47ûIBP äš&ºùÿøjbwCæøø7o,À{>G ½µ—ƒû’-Ì ÖǹKêDœÇ{Ä­$v çpJˆ<—.•¤~ÜîEóÒD駃Žuƒ†›Âçbfh¡”F/€ô®÷´VT‰†ßÈ4ÐQKs ëØ1½•[ÿ6ö&©AÒRŸ4˜ÐHþ¥'†{ÃtBGŒ²ûÃðY¿òñû“x‹i¥Ñí^ÖÁÒBœg{‚Ø^£¶eF‹Í[I@˜7+Bg‹?Ê¡3z(¢j0 èt _b(«´x‹ø”Ò„tciÁ?6Üœ±½¸B³.·‹I>þA3:c Jð‹/$Ñ7dDIÑŽ%°¬äšÿú¯¾Ë^Šá©-æúº•ÆæGÖ&V3‘÷ÒÀöÄb’‘µƒº»«@¥…œÿø}7îºU‚˜G ¥,»ý‰ï&^‡è´`„Ž(±afè,"ƒF‘jÉ’% bó$NÉniq©ƒ  ø‹’VZÞ0tþ`tÌà=_EBðJ¢ž•šl2Ì9‘U'Z²´[Cwñij¥,SœCPÍ(A­€áæi¾*sÅö¼%KÄ,Yª Èp;D€Ÿ1ÎaZäù*„jH ``í)–,Y³di @†›X“ðBèì¾µ+hoB;?ý½]¦Ê±N@‹O6}ޱ=Cve½·€ Õ|uÄ÷ƒö¢×a0ã¼dèÍø‘Ï<Ì?À,Yšœ¬cGÍD»øÝi<›<aÚGgUÎz,†$•µÑ›ô@|ç’D™è™È±1ù%ÆAüÊCÏÂçðýõׯp*RG—ö¤Y‚ap€hšù¸(DTkåpVgû&-ˆí%ÂìÎËuN mC*‹X}îpLÆ)LöÓ V[ÏÄž Ï (ªgÙhS"rŒèÀ”Ân±ôEåxuØ“éYXß®}ƒó%k›òJSý IåPdzÐÓž¦]Œž ¥»È9’ê0"«Æ.¡Ý0Éü=å ÙìØ1ñû$Kêm}[»€ = ÏM8qÑœSÀÆ´4â}•á”ˉiL;ÎÃeÄ®uæ° v©ªÏ“§TÙ‰sŒÔ]’ìNL«®ôld,ƒi–Ó$zË[Þ²¬Ž‹ê8µHõþܸ¬Žs긠ŽóêX¨÷%Tr¨aèòþÄ)•Ó§²š‡‡*<ç=رKÈ6ÕN³‹€‚KphWCÕ«øðwšœuNö3 º¢œ3—ÀëéÓ =\” +ÀZ¢úvH’A Ôï ñ¾H¢ ð @ÀvtêN.©IN[¢¬6ÍÃCyˆ‹9ÚÏ6àyh9ZshŸí‚¹MâÄ|L*BVy„´·Åtÿ!ZöÈ^6I]xoO_aR9]b6…ʹõc/àÿO½ÿñ£s’¬.|ýë_?ZðÚ3,åª{ÎÎãæï°iŽ ¬ŽAú[ç¸oêŒV½gì_½ç}%ÇÛiã¾$í´èÜÖcz FNFÛ]Ó¦ò cVò² fiº`ئUy¿èĸM÷­,§O×o/"ˆ¡êÿ«ó[ê3A_}?žr}SAì4¤{óy Ħ^ß2 F‹¡Cæ‚H“µË)‡ÁkËr‰½MÖÅ~Á‰Àg'/n²«ÊF.ò|¿¨¢VN7»œw5¹Ë\y+pjÓç¦)bÊÔ˜í´Àq©N,Í´h¬9¢¾…@…H«ôìc¤ÒŽa´·O–ãiRÓÀ$áY² f©ù`†ê“*p8¹ëp™/RNûÊ'©5$FØ®Ñf¤$­u%õI:·YHa†hvÞ~RIaëÚx“åX7w b–ö8°Í¼œ7ì`cûÕ‚J HúB;ØÂÚ:X'ÛXK, "õÛ Ô¶g46öR9–ƒ¬w¢¥K‘ÐÞÒR¶ðyQ"JÜIÿBÒL¤ÍEñX´dÉJb–,“ÆP2ÙÞf¸WŒ€ËÉíûæ-Y³diAè-oyËRQ[y,º)§Ý*s£P ŠqÖ¶2K–,ˆY²Tµš§´0]‰›·Ø6€‘JN§ASƒÿÒ†çŽ}Ý–,ˆY²´‡HNfž/”Ò`Ç^t–>#P5&ƒ5ÕÉ-àtRH #òÔa%1K{ެc‡¥Kä¶XRؼÁ©]@ +“Äæ£³dAÌ’¥½@”4“c%öIrk¡Ôýÿì]¿oäÈ•®ñÙ±zMâúã‹ë`8ɦÓó ÇÙ&7œÀáa¸ð°­à°ÉÁ¢þ‚éI7*ð…7­h"㨃' ¸…» ·çq=ê+õS©ŠÍþ!‰Ýû}¡V³ºXUÍ~¿W¯^­ ÂæfùîÂ)¿ybAw"±wÀÚ©†»Íà€Pt‡VJú©vaÔEbˆHÔ*,ïAR ï ‚$FÃ'01ðSOLíû‰%²–°°>ìt ím>|Ÿ^‘ì/ ïí–(V#{Dz}æHbIŒ v±y"™c:^µ2q=n#:QÍ¿uÎM?|=ÿËæßÿÍ|ú×ä# ¹Y!:QÞ9AÄîÿx˜ü颹ÜB=‡¶ž‹MËl³Mw0V×¹ mû~ìh1·Úº Vme·‡dÔŒñ??}dþõ?k_ÿêÿf~÷_-“eø|êZ©H©éÑù,IŒ ‰õ0ª2q^Ê“§5ª§£;±çžt|^~ìÏTùe‰e§¶¾gªŽ×¨cd_»¹¤2@X-“‡Úªêk÷²S[îOÅ×e@|’ì Áü`ÇN™€H„Ôföõ8 X¤­³Aázb(*så–Š•ÉÌbáì‹uÛ¶O•m:”âÈžûÄk‹6LìßÌž{±Ï7°5âG«©U%¯0öcïÛ$³û|Hýýõÿâîm³Ø†¤/fOÄz¨°#õ#ÐîÍÚ뻓S­†@$/„TÅàÙ¿‰çÌñ~p–fùZ¢$WAÙ•}\”˜­ë´Ÿ{‹~J;ª=w1Êx‹a/,¡efáz»ÞðÓ,?!Ù2‰ îÖEòÏþ!ïP™YþI¬1"C¾%ƒ¾ ÆP9—jÛSàÿÖ{ßm”øÌSMÎ(â<Äùý®î›¼ýtã/mÞKÃîÆ©§`E½Ž­ò’ Žã /‘˜í¹b¹I%øùOËßþå‡Z)ÌKŒÅc³ÜeJ#HbKTÛò>…!½€A—§ÇûJëӹƘ¢þ"Wi§¸_Ú¶«'ý¤«/wÄ­”›ÔS¬Ò®m‚šÒO×2¸ÊÂÜÚ¬–Õa—ì0Bò%բʬßj¦z«¢Þ|øðáÙ÷e¹iþ}ôrÌwy4‚²›¼@bHe~f2 È¾à6ˆJIÜŠSG =$r:RpY‡¼µ «R¤Ü¬QÏNσXâù½:‘¾\BÛ 2ùþ'Þý½NÜv)õ–†%µuŽ‡š˜˜ v™Äİ$ÖTJÀSmÐg¦Û¿‰áÁØœ-!Ûà—Ì áB­%Ã5òÔçs÷ô¯¢ Ù·Älª^Ç[4œ…÷Ï™§ÂòÊGˆ¬°D¶I”æÆsb’Ä’Î&UÔ‡0‚ ¶Ib˜;x97uЧöÜv>Vk6EÈ4„¢O õ¹¹^+&äù1?¸¢(€V…E–¼Û@õ…ôõÈï«Zæ0Þñ{´1·CÃËžŸm·\±D–/)#¤Rméê7ªžÔPŠû¦èÙæ=$ˆ{PbyŒx n|÷ZõâÂÉç0&¥é—ÐÔD®u—à ŠªF}Ú8ö™¹½4ëA܇fS¦Ó5ÕXâ BjSÕ'F6‡ûÓµ;Åßb"›€2/yˆ{j%ëq)ã“Øh[¹õTÙ*kÆñÚχúóê¬uÉñͧGß~ùçèˆ'±Út»® ýCå aîÞi½Õcªé iX£ÞBýàŠH`„^“åê¸ìyÝ¢#ò ‚+µç&Šc0Ú¥êçÔx ®÷Hîs7»è?§äÇåf8äuYGVtF=HÌ‘ÿ]ΉÎ;ÚC»Ebˈî®óQ¬ ·ßq2¯zÖw¼Îuñÿéªu/k?ÆêIÏ6un¹˜››î´.U5u!çÏ,á¹Ì'Ž´„”fªŒS6ÿd®²a¸ÙžÅO:&i@ÅùDRá½ÂIŒ  3OQu‘Ø`dðø± ¨È䞯I$1‚XÓp6]áôVm™~A.í|¢-_(2D=–øä×)³èF$HbC‚H1‹Í)ue½eºï[77–È“Ã2²ékÐ+¯l6Àñ¨ÍíˆÍÔ#t¹¿Îÿxµ'Ù]¶ƒ Hb= fåLöŠÄôɨ„ ¸@þE1§ž¡pîÃ3d¯ÙŠu·áòjžEˆ­sÌ-É¥ŠÄæÜ·ñªQ'Š\Úl–È^CV…H,õƪ‹ÄR³§›¤$±½Á*õ1¯•‡ÂÝ¡àäÇïç9œ{Jî4DdkÖM„ÕEi {¾Âg&’:ª§R¹o9Øá5° [“P£ƒ?:¢4Ga…HÌ(%F$±#7ý3ê‹AɰmJmó\‰YÌ¿TžJõj"K@r«ÖMDÉÄÖj•14²Z£-´» (Õ3µè™©¢’Ø@Ðt¨˜Úǫ̂5&FpŒxiëxn¹1ÇÛPó[ˈ1º\µn" ¤W ºÀDìØÇ.Ó¾¿xÚm4S÷wå•™õü]t‘=•Aº2Á¯šQªéÔôœ›êÚqÙw®Z7±÷„ü æ»äõW>!ƒÌäÜZ÷LdKƒ>’A#ˆÄ|€mÊ#›[šJ²ˆ*k:>sí™hÎe/!’A,Ál€„&¤R­ûÙHû¦C¡#cÉjŒ$FÄâ>!Û@…¤Fs ‘nb˜öØ¢dÊp¶­´S+ôMÆÇÍϾåEì~Â! v€¼c‘ðuP„¹ŠXt¯K³ØÔ÷üØå/¿â¡°Uw¢Ú2ôD8îrÿù*-²øZݘ›ÉQÝBby*?VJæúi‹ŽÛ§`™"¬‘Yl¬XÔá-¦r;êœxú¼d)Õ“|‰ÑêÓdÙØÀ5*Ù?Þ«ñmçˆôXmñ; ŠU]Aë‰{Øìq„ëH&Œ;3‹påZ‘Xæ¹[º\.™""q-~¹v—»¦1¹­Ü@6ϼsóUñ} _!¬Ïîñ; ‚ †8Ã~]¾«@)`¬”ŒûÜÜS¢ Îp„¶Kù sOTÛQHQ™« .}wâs5‡WøçÍÕ<ØT•Õ¤viz΋87ª?Nf˜ y ‚ vO‰­ Is5*Ó.ÀN Ârp 7]£\z%Ê—ª¾¹¹éN¼&®Q‰ÂsnÁšÉA”Î]çò$ŽñY¹®rÔf‘ O¢ U[ôy­—¡uÚÏæjL2s{Ï)‚ ’Ø]Û+Òï;Bz…3‚² ÍøÄtØÓ«2·C×ç1é:ÎAd±6뽩n„âƒX§ê|póM5› ‡VˆÕ“¼… ‚ø1㇀Ø6\dâ·_~÷„£±;øÍïMû€ôÿÒoÓLD%ŠW!³ß57Ú$ÌØA„Ø ©²¯ßpDˆ]s'ýäï\§3«Þª÷ûÿå2¥`ß“EînnRæ §öü….o®æUÕÜ–9Vu<Æõýë¢ìe¬ýöÜy~Þ¨¿O»lÁ¥öúì²ÆÌQšÂXM#ãš©¶Ìí{±~•A,™Û™[„Øk 1ê…2¾²à½4‹ ©¹™ÉbŒóî¯ÃHŒ´2ì¢@B™i·Ë?|¦#*E<®l·ä²v>5‹l5S´·F›sÞŠIŒ ÖƒÐJ#¤ ž˜›î¾ÖÈG\iBI@%9’œB½m±ýºþ çpÃ:ç„wCåºëw]V9U‹1pAE%G º‰ÁB܈p©ÕÎíèàC¸!ߪ÷`è_,Q =™+ð™ŒùÆëï@Rÿؾ.Q5¶ný ˆý«@_2ãÍщ¢²çR¨Õ¬ƒhsT“€ÀRÃl÷•AôRI‘9E15·@*¸a€Ì"‡f•'óMp|‚úÇ[PK®þ*Pÿ&jLÆ AŸ”’’þNBÁ˜“1žtÔ›èŒáãûœ·(A#ˆ¸Z™À°¿ñÇ\+ ³H”<õTD»#€H˜Eöþq„,õ¿ÛùÀx|¢>ߦú’º½ÃÍ3ÍçÞ)b¨¼ú/P¿&ã:Dj˜ß*qý¯UûÆhû}nssê`(/]çØ,R·ÍJL®ñ^Ú.‘Žf99Æwó’w+1T0c±u0cÇN>PÐÊÉ,¢§ªœJ*V¸ÆÄûÛ…^»iGBéÛåÞ¹y¨¿ªŽ /Õy¿Ž–p1F¹¯¬ìgŸÅÜ…ÅJ$1‚Ø ‚¦0Üfnïã§®ÿHL-EÄâŽÕ˜[è;Ѝ0—æ)Ñ: dj2ÔBjÓ‘Id Ъo„v¡m£@žö©í—ù±­ô0WñN"ö ì †Ž¹G0Ÿ„Î9˜÷) nNAt5Œú{[ösUÏH]ç9+Ï ]> trove-5.0.0/apidocs/src/images/Choose_CS_Image.png0000664000567000056710000024632112701410316023112 0ustar jenkinsjenkins00000000000000‰PNG  IHDRp}ÇèoîiCCPICC Profilex…TÏkAþ6n©Ð"Zk²x"IY«hEÔ6ýbk Û¶Ed3IÖn6ëî&µ¥ˆäâÑ*ÞEí¡ÿ€zðd/J…ZE(Þ«(b¡-ñÍnL¶¥êÀÎ~óÞ7ï}ovß rÒ4õ€ä ÇR¢il|BjüˆŽ¢ A4%UÛìN$Aƒsù{çØz[VÃ{ûw²w­šÒ¶š„ý@àGšÙ*°ïq Yˆ<ß¡)ÇtßãØòì9NyxÁµ+=ÄY"|@5-ÎM¸SÍ%Ó@ƒH8”õqR>œ×‹”×infÆÈ½O¦»Ìî«b¡œNö½ô~N³Þ>Â! ­?F¸žõŒÕ?âaá¤æÄ†=5ôø`·©ø5Â_M'¢TqÙ. ñ˜®ýVòJ‚p8Êda€sZHO×Lnøº‡}&ׯâwVQáygÞÔÝïEÚ¯0  š HPEa˜°P@†<14²r?#«“{2u$j»tbD±A{6Ü=·Q¤Ý<þ("q”Cµ’üAþ*¯ÉOåyùË\°ØV÷”­›šºòà;Å噹×ÓÈãsM^|•Ôv“WG–¬yz¼šì?ìW—1æ‚5Äs°ûñ-_•Ì—)ŒÅãUóêK„uZ17ߟl;=â.Ï.µÖs­‰‹7V›—gýjHû“æUùO^õñügÍÄcâ)1&vŠç!‰—Å.ñ’ØK« â`mÇ•†)Òm‘ú$Õ``š¼õ/]?[x½F õQ”ÌÒT‰÷Â*d4¹oúÛÇüä÷ŠçŸ(/làÈ™ºmSqï¡e¥ns®¿Ñ}ð¶nk£~8üX<«­R5Ÿ ¼v‡zè)˜Ó––Í9R‡,Ÿ“ºéÊbRÌPÛCRR×%×eK³™UbévØ™Ón¡9B÷ħJe“ú¯ñ°ý°Rùù¬RÙ~NÖ—úoÀ¼ýEÀx‹‰ pHYsgŸÒR IDATxì}˜Çqf½¸ïmx›3v‘3H€3!RŒ¦dE%ëä³-ùÎ÷Y’å;ùlÙçp¶å¤“}gËÖlK¶²•LEJ"(æ‚Èysλ/ßÿÏlíöfvß.N³Ó¯»ºªº¦CMuwMàgþèá·ç${»øÁ—€/_¾| øð%àKಗ@ SyË‹|ø²çÖgЗ€/_¾| øð%àK€8V9|â=­õï¾| øð%àKÀ—€/_—‰V­Z%á°­²½óÏy •ÎmžQàÈc<·®Ë„_Ÿ _¾| øð%àKÀ—ÀkVétZFGG%I‹D‚ÐŒ,ÎSપªf2ýˆ/_¾| øð%àKÀ—À¥‘Àä䤭ÀA‘Ëc¿[4œadŽ7“êG| øð%àKÀ—€/_¾. ŒJ$‘@é,;¾7+ ?æKÀ—€/_¾| ø¸ì$Ng°„l6 KLq¾wÙ=&Ÿ!_¾| øð%àKÀ—À¬Ò)ìƒÞ–‰A‘›)p¹\Núúåàƒ222¢eý»/_¾| øð%àKÀ—ÀJ`ÛUÛ¤¢¢Bª*«`hÃi—Àô`p‘{àh®›œœë …B– ŠW\i3ŸpÄáK†ÈMƒf\™eºWÜ&¬7a5nò`ÆÉ×|õ0ñ*ÝåªÇ|¼‘–æ›õ7y7ãZ¥–3ëdÖÙŒ»ñ³Þ|³íØ|vf\Ÿ£/+_Vìo…މÚ7ͶdÆýveO|þxµ¸¹ÖoW·š:÷¸ÅbEšTн }oo¯$eRRR"5µÕ2>>!ãcãV<›ÍÉ@ÿ€ÔÖÕ`À±ã¤ÎIÈ‚áëêk]ËUUW6(½=}¢ñ¾Þ~Ð*–’ÒaœùÌ# ùa˜ 8Â÷t÷º–#äÜH‰„-eˆŠã š®qÞ©ìØ°Y žŠ•¨ùÊ™ø(tVÎÚ¼|Ìcœ•`˜ ²”¼Ö‰½Êißbø±ˆâÏrÕÃ[VÌ[J=XNånÖÓŒSþ¬÷+QŸ†½_AÛ.Ÿ3î?÷~îËêüquqc‚ßÏÝæ¿]ùíŠcðå0î²-ê|]–HXƧ-›·ZzóèF¤½½ÝŠG£Qk™õýŸzþt&¿yÑ $?ø¸P p`e`'Zj0—w—ŠƒåÈ‹n4Üxwâ`§dY/¼Kå×¶‰k¡ü¥ÂšåÌøbè™å.FÜù .W#NÊÅùæîUÅÀzá0Ój 囸/´_-Tæ»õÍ…Ê‘‡ÅòÌ2 nc‰ãý׋/æ£Á2nuv£î„õ¢çVÖ+­ÐgçUÞ'ŸÎ² å»Á3­P99Ë{ý&Î>èä«}\u[Œ7»΋²ŸîKà"H ¼ AU˜KìLnÁIƒƒù&ÿX–8ˆKC1–óÉ›^…[ì]åF>Ü‚“77¦¹Õà v¾ô…ø™¯ìræés4ŸÁrâµââsf;d;^(°­ ».Í×ö¡¿ÍûRÚ`!ýj¡zxµ•Be¥u"üb‚s,Y¨ì|õðêç^4¼êìÆƒÂšc×Rëlâ/äÙ™ðfÜ­­?ç¸k–q«‡™ïŒ»ÑpÂ,å·[»RÞx_lÐH–+hÜb øð¾’ÀäÔ”¥D,7_~Šž©±·òBBKÉÜhv Ÿ‰‰É9ð,KÄ¥G¾ÇůÂ-ö¾Üœ¼Í‡1°^xâÇ«Ür§ës4ŸÁrÓxµâcÛäçx ÚŽ ]—æ/Ô>Û éW Õc¾¶Rˆ¬ª“ÖÝywKœ0æïùêá%7/óÕ٤ɸ’¾†¥ÖYËó^ȳ3áqgU>^}Þ­NœÎßNÎü¥þv¶+åÍ‹÷ùè˜/¾7Ÿ¤.aO]É`.4LMN ¯ ”szØ{rs£¡‡E”.qŒ Ïu¯3•×r‡…äæäm>ú‹õ³?^å Iç@eVó•á’‡³>àÍIh¾òWrÞèÈhAÕc;.¶ „Z¨}8ŸÙBx éW Õc¾>_Hýª“WÜÆ/X¦ÏWÊÍ­¤’)áå nýà £¿Ý`'ñÒz¡{éy-%°O8^Ö)ŸùÚ[=æ£íFc>øBóˆ×ÙfË›-_ó’Ì%LgÃìéé±:ð%dÃ'íKà’J ¨¨Hª««—ÌÝ%“É%—÷ ú¸œ%p¡ýãr®›Ï›·Ì—R_ó–Ó+šÃ‡Â·Þyq}Ÿ.[âñù÷®t&pK{mÍ0ag -2² ¢sòP]/žÉž~…H`xxز0Ðâ“WÜœ]H`â -,[VVfÅ )¥À˜ƒ¼Ê«nNX…+Ôú©ðzwÃG\šnƵ ï Ñcy…1q)MÓßzw–ÑôBïZžðNfÞ|ø´á_¨œÂ{áµp¨•ŸøØÖÂë…ÏOuJÀWà.“çF“*}íqòádÕÔÔ$7n” 6xrHW Nå(vŸèÜ`=/aÒÈdr4l—'JÃÌ'ª\ŽÎ’ MÓŸ®ˆÃ¾T=ô Z–躺:8¸ŒiÖ¼÷¡¡¡™¥k–£góëo¸^Âá×ÖP—ÍÌ=ÝÎõàn Ñ «y¡°ûÁÍ÷º»á#.î÷¡bÂxÞænYˆñj=¬8 Æ nt™gÑÝ¥î7š†ò£‰D$—ŸkÒé2•ײ“•÷¥°ø`¹B`¿ónÒÃF ëÃÔ>+Zà²Ú4?&nïœôyh°Œ[]ñ¸å3]a˜?§Ì4‚ d&ê„]ˆ†™?ƒd‰Å5SÜ!c'o g•+°}h™™»ã™{јwD¸ÿMûAee¥õ¬æëŽâþÏ+D…›p® _®Õ‡#²fíjiYÑ"5Õ5žƒçåʿϗ/å–'(*bÛwì×ì°–TÇÇç?½\__/+V¬€?3øöÃv/å¼ZËTó,í›ùfÜ­ž 峌µl8íjfN¼Àå=/^é^4¨,Xepw¦ëåÌÓßsðr)tºNšÏû|4L8ÖL_ˆ†Åÿ<ÏÎĵP\ë«wÒÖ0_=L̸–ï®x £qÞ¸Ošû;·lÙb]<´³PÿX ~öò”À-¡ÒZÄe ¾éòbœ ‰-߸6ÏßL7aÙÀÜÊžxyâLã|« £4š4¸LbÒsò£4xÀ]f1ysÂ’æ3¸ñ3 ]ö4ËñS¼X'ÅK>È;-´ÔÖÔbÉgþC Vÿ/×*`™Ž@™ãÒèØØ˜ôõá›ÉøìŸ9>ÐÄüšÚ«ŸqB-$èäék)°þ1,Ö ŸNÐÖd?=Y›|šq·òT˜¦ù1aÜx3é)¬Iƒu³,hšé¸›°f–‰×Œ›0_ˆ†ÂñnÒ3ñšqžéf3ïBâæ3W<^õ0a—ÂÉ¿ åÁëÎù‹}íŸ œÙ?ÜÊpÞÑyPç+7¸åN#oÎyp¹i¼ð™K¨K²Àq©B‘0®Šãzi§sº•cyÂ1˜qÂ*>“†Æv>ZÞäÇŒk>i3]/DCáœå˜Î ¼1îU:Ìä)"*µ¥¥¥RT›QðXξ| ðÛ±Qëņ/9@JÜ\+dò´Fpé•“ûòB—ÊÖ ÎÌ7ãnð 峌g\»á5ÓÌòKIgy-gÆ5w3˜éf\aLx3_ã^44ß¼;a¢aæ›x–W\æÝÄÃtó·Æž¿5h^!w–Q83®i…Þ¹M€Kªlÿ\ð×þ¡óŽâæÜDœ :é|åëUްŠÃŒ;çL¸̓L'J[ã&o&^3~%Ó \­ÀÑ‚DMŸ µ~n,æÛ0OQò ™§a8ˆ:a½Ê±<ñ0hœ¸Í·m¥aÒ£%ŒWMMÍ ?JƒÝ'o¤É“ÙÏ»;0î OȪU«$ eÏë4â 2?âKà “·ðàN(žéœtnãÊXWW—µBÆy†óœÎWìCn°œÍrüM|„e3Îyóï:jœðœ9?“¾â]n¯D=.B›¤¹7uчôaSã§ÂC³(•šqù€˜Ï1jÖee GxÆÝÊMLà3D(Ç7ËÓØ@7éqé‘å´Að$qð΋ù,GþXNihœ4È‹‰yZŽ%m¾)6ùá[ëÁÀòf9â"-³N~˜O:ülÇÐÀðÆ€‹oO—*àm(Õ‡åâQyá…SžLÄ«›¤´¶Y64KQ§º¦—™= ¸d¤“Ã2Ôõ¢üð±~ Èæë®•ë6ÕKeY‘ ô…%e’]0ÙÉÓÏœ‘•Wí–âêK°1m†ÏšJÁB!ŸOC!˜—ŸøŽdë$Xý&ÍeEýýðÊI€ýŠ}›}ý˜ÖkíŸ<µW- ìï3°ÿ›¨I+ SXÒ—={NH&X$‰›d}]D¢!󋯣rêT·œ9‹S³k¶K]"*ue!W¼óÑ#í…òMþŽÃÕE®ONŸéÇeOÆneX¯â²„¬© ã$©Y7š<1xÉɆ²ÿΛϥ%3qNεKGOR6\£”FCR,lyܤ³”ø|¼-ß|eæ{Ž…ðAY뜩ýƒ}Eç(ŽÕì?:fëœMX–uƒeºYŽ|^y5çOÆuîÓy™õÕ8qq¤âFXÅë¤ÁßÌ'?×9ºf=L~•ïŠ×Iƒü² /åÝŒ³¬ICñ2]ãóÑPÚn²²ˆ.òÏ¢F;VŒÆÉ(ã¼sP•'jߺæSùáÀêUŽÊ ñª‚Æ8ñ…Å7Uà¨jœVs0Wå‹ …qZ¹t@ço*bäQ㤧4(7æ?7B;Ëñ…Â×ú³¬6(6J:åÛ—pØÈICë¡ü/ó)‹±ÑqÆ A¹KyêÏ5Ý+=Ýmò½ï>d5!ª'¡Ýù7— HÅÚk¥n¼ãÇj¥¼$"‰ÒŹ|F’ãýÒvô'òÅ/—“]Ay{ ^Ö6W\.=Õ.Cý¨ÓãrWe³´DJ¥[ ''ÐV&’’À>Í0&þ&Xï€méQy~÷eªé§$´¡IÖÖC™_¢,o:~Î|`ßc7•nö»™1ãK"QŽ>ŒïãNO2ŠeuPfœA›0fº–1áu¬aÖ|³|߾ͦzå©'Ÿd°Bšo^)­•!‰Ì‡’”3§ÉÕUÁ5rõÊ2©-Y<)=ů,kšÒT~4Ï™O8MãÁYo¦)Œàe%ï’Ó'ËO=>ñ2:]NËR¶×—HMc³Ô—â¤oQÈêC&¥1ƒ×¤A@ÍsƒÉ‡75zLŽ>'Ï’Ä–¤,Å#³J4Ë»ÕMñ[Ħÿ˜ifÜ„™¡ ¼n¼™åœq–Õ2&;óù[ƒYNÓx7ih]Í4óÎ9‰ó%Û ç68ÓËq0ý‰å¼`™Î¶Î9SáTñ niàiFñ*“†ÎƒœÛ9¿’Ò0ysâ5iÅKL=À¬3󵯚õ Ÿœ»y7ãÄ­u _&ïfܤ¡¼/µ&^Ò/$˜p‹RฌH«*(4¡RI¢¢ÒÑÑ!¶ï2:¡%,ÓMX3n–ãò"A'n ™—W 6 >\âg96&Æ©H²¡–W Â62bÇ’Ÿ¼Óç”[9ÂrÂÐzP±¤RfÖ¹³³sN=È@6"òK¾X.V—޶n,û„¥(º°5Ȥ³¬q4œ\vTzzû䛳PEórÍû¡ÉT@œKöG'%Rô}ùþm÷ÊöÍ­òÑ_¹YÀ§û­'K–#ßlFÒ“ûäØñãò¿>³WŠ£9¹vs•¬Z·Bbñå·¾‘™Þ³GäÔÉãV¶ÿô¤¬X™”‰‘—ä3Ÿ}F>û•ƒòþßþˆl\S/÷m¯ñä=Ÿ/’T&!ž,‘á¶>‰ž= o½¶FÊ pûáÒJ€}ž—T©X$§&­>F®ÌANes€6ó ? óï,cþ&ÌÔØ€ ÷“¶3‡ ÊUIº¶M2›× :7IX/ˆù\§ca…8'gO9qNZËVˆ4Û œˆ?Ê+MÓßλòéÄ¡pÎ|΂™ž@†‡{¥ }§iÅ:Œƒ³¾(“É éê8)mmÇ%R\.Go~ŸìÚ¾BnÙê=¾.D[yà]a'¯¹\FÆúÏJOÇqð30}¢Øv-cÊÂŒ³,ƒWšÒ`¾ÆíóÿUX³œIÃŒ+&',q(…Ñ» «i¼+^Ƶ¬ 댎Êç&$äüiÎg朩æë\kÂ2ÎùŒ°T\8q$~]†Õù“ù„ó¢á6/›ôXŽFΖs­Òào¦3_—g©3Pç0éQáÓ:3Ÿó¹9ïºÕ™ua–% ¾RQXÎçŸOVª3,¦Š—4Ê…­Â™mbQ µE4)KyãCåáS Y&=šq³œ©„iœðl°l,Œ3ÐÊeÆ©péó‰_ó·ùµË‘^Ä¡qâf ï˜æVNa‰ß¬‡Ö™oäñ™õ n-Ï8óY•Ó.Uà˜ÍɆÇÏyåä®»n„;“jÙ±1Ïç܈*r[_F:Nö/É‘¾-‘ñÍrðÞ«äºÕ k9u>þÙà‚Ö2RD¦’a9zFäš[o‘æ•­rýÚrIÄÕ ç#5'/ÿ_90Ï:±Š ñ?*Iü‡õ³<8*å¡Ùg?§ðÌx—¤dr xCÃB÷ZWrÈ@Ùfß‹ÀµÍåÈ' öEÒ L㥎òͶ§if\óõnÂpd v©õ <x•y¼ÔAÉÇPàv^»Q®Z߀gÄå"šÃrà‘”<óÃçåÙÞ-§’½²ï䔫[°þ¶òx[N&Ó€Ã~:,·jˆDZDŒåX$¥¡Aß?Û’×µ\'+¶m­Í0ûGìI8—å 8lMÏ6Pâ‰Â‚Á¯9Žt8¨OM¥% žKj65–É[Ë£ø"8Ñk† '”‚Á–:†R)ÍÂN¶¶¼çž¶@ËJ) ·‰´´ÎìOôöŸJaBFýÂà+ ˪`ÒÃX€:Ù\r’ޯЬe[°i»NØš0n¿íÚ©à°XfYoTß ¹,¿— kS’´0Ðä¹'ƒS§![V\6eE8Â3(oæ>¾tjùY(·¶•˜4x*º(ƒ‡yW™m´¯Ø_¶Yæì—æRŠæ`gÆÝ`™fÂxÅe.ŒÑâ饤é&̱ŠÛ$Ÿ=vÊ๠s:ߦIE.8*ph2h SI>¼ÄK*ìçG¬mÀ¢íÌgŸ3C(³Û »ž!ZpÙô-…ÀjüàpÖ.œlb¬ ÿ‚XâZ}µ47ÖÈ-k8± ]’öÈÑCÇet¤M²ƒ§%3‹Ef5”}¼dçŖ,z*¢&Ý@Ûa0N8Ûýà×d,Ÿ‡vï—ŸÁrbJÊp×Óò¹/=*GŽ÷É˧°Ü %®(“_ùß”ëKecmJþʧäÅCý–îÿìå‘êJéþå÷Ê»îZ/p#Øqè«òµïŸ“¯?Ð=GL÷ý»eýÚ&yÛë×I²÷Yè9+Ÿþ§‡ä–×ß+wÞ÷f©Âˆ\ºßšTþê/>+±Òjyë;ß/áI»mYåÄ :w\þä³?”}‡p€d2-ø'_‘Õ×Ê»?X+oÝY+%Xv†lf\ÒSüÞ 9•@ èö'­:wvÉu7Ý*_¿ÿEiï¶PÜtÏró7Ièù?—¶öùÊ“¶¥oÅÚUòóÿù—åîmUÖæuv>/mÇŸ’þñað5«ÄÕ45Êmoy³%Ÿ–º2¼(¥óè÷pêò¤üë7ɩ޼ôEä®[[0ùÙÖ‹æ[~Všê*-üƒÏJo·-«“mØæÐg×¼Ýö†;å;jfêüø×ÿ\Ž9,ÿïÁrŽZVm9½ù]o”·¿û ²± °=¦Yõ»”8ð±?ÑÊÍ—BöÏË!$''dl f°&O|Áà ®ä“€!¬†HpŸÄ>‹í½rpß^kâȇJdªáMòλ×ɪ¦2iª…‚ciYY™xNNn“Ï}ñQEcݯ~ã¯JCËZ¹eo¥ÎaÙiù·ûOH¬¤XJ°ÏîÅçž•¢ŠF!ÜÍ«cR]2ûP•·Ôôª ² °œt2¹0üÙÒ€…P¼žÊ¬—-¡ ÇÊa™†²0°ãÀ òù¯’á1{k q„c iºñ}ò曥±*fáï?({g`Uñ(oÀž@ðvÝê2)Çx¬Ê[dlT¢q¸’Á¢‡~X:;Ú-Y½õ]ÍØ’2ú€<¼')c™Ùõsï‘ꢀ”ãå+>+íí½òõoî—æ[~Fjy¹¶ò°|÷Yéê±_¢‡‡z…Cùª¤víMò®;WIy<%¡üiyò™v¦½O?1ól’õ÷ÊMÛ[e×Õ R[ôOóY+ïËy·ž•V<¯ÀçÈ~AÅÊ9šs¦Î»…ε^ó sþ¼4È£©3°îª˜ôT0uÂROðÒ/¼ê?ŸÎàä‡:ås!4²00LLÚÖNòìTqdþìÈâm¤S4µiœ †¦I¦kߪX!^ŒÓçã ¼¦3?qáÙHÎÄkÆ™O«ãó•#n¥¡ü)}âàÄ ¿4”æk'%-'³ã„¡B©eTV–¶µßž†:s°´ªñ ÿ!Q½¦IgñÆÅ…<1°ÁäóØŒ¯N¢®l,’îáŒtŸ;‡ö0!ÉQynïi9w¶‡2F°±¹ Ψ&ûååG¤"Ò‚Á¸% Ü`¯ŒÙ ¤$IJ ÷å'djrJž?ÐŽ-AØ“8U h'’’pœ<~0Y龦K·CXÆ“§{em?–¡Ñ¤*¡{æ°á9›žÓ8Ùɾ‰œÔ`¹”oó Yœ Ìã*âd`;Ä[66m‡Eìͬ ãì&¨¿ÝM"Ñ0–×íÓQ¹lZ’ýrî\Ÿœ:7"EegaEÁieKvIßÙ—1Y$|¤ –µœTVÔI{[›ôvEäÀ¾Ãr}ËÕRY’Àä . IDAT0.m=²÷@‡ Œa3p, 5 XØÆû0çäÐKØÿ³­{½ðb“CG;äÌénéÄrw¨¢ ënKÁŸ Jªåðàr›‚rèH`ÏZ{`rÙ2©(¯”‰¾3Òß~J^ÚL¶¯ˆKe)NCy8y95,áÒ•RÁ³Ç^­³¨Ûé§dßóGdÕ®Mxþ¬»C@—à'û•Ùù[ƒÆí6k§ë€Ç¼ÅƉ׉ˋáhA2Ed-“æØ†x¡Ÿc2“BûƒBÒ&ý}PR°ŒÂAè>gˆ¨t•Ôã´À"œK ýŸ•Sgz¥w4,-u°*e§dr¨Czñ’Âa¶·f­”@©ÊN ¢}t ŸÅÐvcXö‚ÂOÃe·i•‰U'ÈÇâokPá§&±g{žGFìqŠcÙðЈŒŒãpÚ^8V k.ÆMÀæ3C2Ð×-§OuÊH:"iX°*‹±ÝÊ —G»ÀWï¦rìwH5Ú|?`O¶k˜þÀ¢ÒX• ìwKIÛÉò±fú1žßô£µnàkb¤|uKû¹ÓÒ?ˆÃt18]Ç)þ _6qب¿PF’°ªO¥$Ç%Ù0Ú Æ…©‰L®R>•‘Ò"ÌSƒàô»ìm?'=s®Uz„%.–e0õç\Ëù“ù„×2¦À4Å«uUXÅÍ;i·êNÞT&JÃ7Å«°*c3x•Ó•7­³ßÞ`øgQ 5Ò‘{ßM³ºHÆ©ñ+ƒço.=j`œ{Ũ­RS%Ó ÄËÆÀ ‡,«ëÔ…Ð`9¾eÐtª´¼Êiƒ#Mò¬ð8y"?Ê»Y'›°üÍÀNA‹ ÖCƒ×4Þ©“ëE䌴Yœ–.óÃðf™‹0ø[•žvÊSíR•*‚ŒV£“pâက£ßX,-Ë»wMÊþ#ƒò»G`áÙ%òŸ>ú\µ6%—yß/Ý+éî=ÒwìIùËOý½´½áM2ðÖ×ËZ(~µcöätÝvœ>…UíöÛ6Jiäeééj“}â°”4n”m¯ß%ø.œJvJçñä×þüÛrò¥Ò?`W‡;e²¿[^:’5°(Á»€ÔÑë>&°tjLŽœÁ²cûìøœR­'ìÉg(~•LÖTË[îø‚LNeåè¹ ¼îÆfÙ¼µIvmÄÄ%Î-ÃpEST¬öÅ¥±…®*0YdR2>Ô.ÇÏfaÑËÉÞ—÷ÈGã§e}k± ¼ðYùþc_’O±Wbµ·ÈöíåÏþø?ÈßÿÍ'ålÛùì_üoÐü}©H¬‘âüùÞƒåïþé”l½ëíòö]ayóÎ)i?òmàí”?ø¿_•æÖÙ2š’»ê÷ÈßüÓ~9pl\6ÞöÓróMŲ£%+÷ã«ÒÞ9Žƒ&Ù>,£[+`-—¿úÔ£²o›ìØÁ¾ÆírË­7É¡ïþ±üè…GäsŸ<(ñòß‘U°Œ^~@¾ñ蘜ém–üÕ‡åæšo—¿ø›ïÈÁ§~"ï~Nv}ç“RÜRkµ79½’iìwìì·ÎÀ~Ë|5Î’yLgûºçØÀA•éæ nÆ:èzшâE­¤ߥ$_Óí©ä¶ä¬ý„¤A|„!¬†ñ¡é,™ÖÈ}o~›•5†1ík_úWùÉ7EöcÉÖmŸ”"œïÙ/ŸûüƒÒ7Že²ú7Ê[Þ±BÂgäÄÃ'Ï<óe9®“pâ÷eM|HêÓmÒÙ~c*&oL"+Z6¢ ¯”u«ÊaQµ]$©¬¸äjñ†¯Ápy–þõ²°VŒÛJÎÐà°¼øÂA‘´KË-PpÊayÊÃËÈ>Ù»î>!+ny¿¬«¯;¶Dåð¡Ã8ÝÞ#O<üÙƒvÜ‘ŒÈmµ/ÀÕÊ °€“Ɇ·Éª–yÏäÐ÷þX:{É3_ÿ„l¨ü¯\Ù$q<++àÙ¤JJåÄþJÇŸ—çNK¼~³\÷sÿMZÖ—KeÑ„@Œ2ÐÛŽ!<Ë¡A DpJóæÄð AaìÄá‹ux¾YœTfßíí<…4Ûb¾ë®7ÉÍ·ß'/îyHŽ:(gžýŠ|#\*+jòò†5ä%Ô»w('?ÿž÷Í<ö»Ã&åÌÞY¿Ϧ§$¡,šíÊf޶IJ òb¾¶GæÏ×®˜ïÖ©t¸•#¼[Ðy‡–k¶CnwÒy—«NæüIþ48ËqÞ&ïfßcÜ9’i9çs®À‘ž9ךôططUO Ÿ¤Ç`ε´j=˜Ç¸SP¼,Gz ^u¦ÎàÆ˨NÀ¸É›©Ã˜¼Nƒâå|ï%cÖ™4ªªmºZÖíαKåÁüE)p,ÀÁ¦LÝ„¯fM F͈Œ«y–¦LnTäE³#+D˜LS‹&áÙ Y 6 4‡²œšC KA‘³ñêbY·rNf=ÈñR8LÞȫ֙°JȬ¿Ö™¸4NÞÍ:+~ÂЃÝAÏŸŒöbÞóyXTs\F˜­ûp´Y¢+­ÉšÊ›†LžTXZãÒÅÍlûø?Ò?b¿1ÝùÎ_–«®^+7¬o–Üø*êÝ.?<òo8׉å”Üúæ7ÈxˆË£IÍ–]Ò¼ml€BÆ)×\".úµŸ’ꦵҲv-ä ‹n²C5íÏi’ò T2sƒÑiÈ©±>™™]ŽDKeåµ?'Õû`2;)õ×Ü+ WÀÆý=ç—'š|Àe’TÒó28<)ÉsƒhkØf<®ÊêrùoòayÝø³‹á% n@^> åPÖ>ô‡• kšeã†fyÿ¼K^€Buà¯÷ÉôÊ^4õVôÀ[nÚ€·ÿF¹ê曥©*(ñâI)«9%%å|)j'Xr—gø–Œ ¤¤–‚¼ïzYeµì¹­/‡íø9ùüW÷BÉÆ>ÀÑ9¶ç{¸G…üì¯ÿªl]Ó(k° ÛøžÊhK«)X_8*ƒÕã²jí(Åû%9XlÑÊÇÖH9NF~è¿n•n0:Œù»¦Õ]&«À+ü‡ýŽ{\ø¢ÇA‘A•2íÃ̸²hŽfœ}\H$yW< ÑH'±4ÅÁ:4Œ›ÿÃÆÛwûĒヰ,ÛûŠÖ5X$éÁ2ì%óŸ.×¼¢=¬X•’É ¬]ÜÛ†¾:Ò}PúN=«&šz¹ó-›¡aj0!«[>"G?ó=,íÕÉ ¯ÇžáFaÓ–[¤uÛ­róºÜÿK”·¨e‰¶­5VQ×qðŸÂiÞ^Pz^ü– b£\gÔ– åE×G)ìW Ù›½ÓP†&“Ø—ß" «EvNTÈÖ­h?øºLIZ6®˜’šè¨<6ÊÓíR2š“g;G`ÝJápnHÞ÷¶õ˜èk¤¾®XBwÿºTvtÉÙÌXoâØs‘®‰52šBŸKuÊßü2–hËPw…l¼ç—¤¡±InÅþ¼ª"LÎ8€Õ1¾VéP±ç .eÖÊG –Lï…|ë]¿(k×7a4"7ÞtƒÔDú¤:}@޽ô É¢ÿì ¬’±)ZPlEBŸM5&\¶A~Ú¬4ËkÆÏÍv¥”™¦íÈ™?_»by·6Ètg9¦™óçSÎE¤É9—*W:'ê|¦eUis+Ç9žs¿9’?ÎgT˜¨´)^ÏÝæZ…e9¥Ç¹žs·Î™LWz¤iε^õ Žcq(^7]Cë̱„ŠŸÖ‰üèsÒ9œi7y3åcê>Ê›ò®²rãMëL…òkŽÃ‹Vàˆ€„H4®„™®q^JPãlHÚ€™ÇÆÄ;…¦–7Åa+66ÖŒk9Âq{•SXâ',y",•JÅË®qÞy™°Ê§Ò±àax1hœeµÎšGü¤ÇÆë²~¼â¨¸ée‡B²X~$_f ~‹¾Š<,š`b*†íñ¡“xû&`®ª$ ÂÄã’ŽTc³>­Ž¢CaÉ¥ 06ÒxY…”UVÃúÀÆcXͰxÍ tàJ8l†ÒÛs:æ¸Ñaœ„ÂL7s“±œòƒ:ˆ¥I–ᡊÙá›eK*WÂúqÎxIE5’V€GC3#Ž÷´M­¤il„¶wˆÍRÙ¿êÚ-Øß•â>ë4T+ñöbÉsã¶²n%¿\ÅD[…—[áÀê¨$q²5YV—Éš–ŒTpùuоÿ;‡âk—³yš6“•,ã¤AJÊJeê*Là )ÆËòúu­–U¡¼4'oÉØàYÜù ±\]V+i¸CÆ2soyh#ú ‘D <–êŠàÖ¥(’–Á¾é† ™žS¼¤V걨"ÂËûél/eÌÙ•xù{¹â‹ÁÅ~Á‹({“ö^”yâEU —…™(K8t<‰4”¤Qk\0P ®Jâð)äá( Ü;7ŠÓr,CÛJ,‡NMÀIy8cYï$ëø€Ë§ñ)ì[³÷‘•VÔHãÊ ²¢5‚6Ãi+sxe*yCyp‹8€ƒÒ”±(\Œ=ÑÈö¡a{L'Q§*d"Y&ñhË£¥Øwk6öÒ9ž ^Ù‘BìG8¨4Ø7k0Ã/ãêf|¥¦ºM/óë¯.8¨Ö†-EP|1væá¸R¡RÜuš‡'ðB<•uÕ+ñrW+5%˜Cà¯2 ްX$·haæ›±ç8­ã¼9û7„eÖú•¥¢&·Ny\Õ2V—¡Š¬¼Ô>±J’Ìaÿ)ƈ"(†æ³©€%•ý˜2 YØVð̶§”Ì43~¡ùn¸'ç¶î…Ó¹‡sÓtÞ%¬Â1®pœãèVCk9¦©, Ã8qé\æÄKþCi‰c9íóVAüQ~éCÅ¡xù›pNXâròFYh`9^ 7y3åªp„UÞœ²rãMëÌr<¨§2âïBÂÜ™°€¬ M‡Ló¢75J¾%뛲–ã’&R¯`–#]…5ãZ–ÂäE¯r Ë»³L£†Ì7âpòfÖ™° l`^u6ëOX>õcc–aÞ¥ O­±Ì*1U©ÓR›äï–Ö¸lHǾœÜú·{àjeË]o”uÛ¾ŠÍ½¶5î¿÷¿gàáW ¦º—–k+d}Ór¦3.í=öž4¥·`òÁ‘÷,`éñ‹Ï‰ì>`[‚ÙdÏc²&j/u'ê%„ÐìóîÑÜ„4L½,ÕexoFþó6 ”¥`oæAº<È¡ŽÇ°TÌÁdÚLq&t\ð‰Õ#ojU%Ò¸ŠïÙcõy%,ßUðñ•œ˜]NW ñÁv(»³}ŒXüLv?*çý|ð£pŒS r©™ê4~I\ÆY¹s—äëÀ47.W·Ü€C'tµó+ÍüÓ×3 þ…ß2“$=z\ò©ÙŽå«HÉ*©¹ênyÃÏ|_Ž;'ýÑߟ»ýÖ[äÆ;_'[×5@É[ôp1×rý`¿c\ì`·\ôƒ‡oßQì½ä@íÖ´Äd'¾îQÒ=%mÝýÖ²© ÛxÃ{q0aüâ=+a¹Ý GG&%{ûÀNÉ?üÕÇLP+Nehuä­’¶ñ°=+uñ¼l®ÊÍ9ˆBw1TÔ"†¯7E†Bvý;þ»´àêë`åâäÃI’ãæ¹ç?.Ým‡å ?øŒì˼Y’Å?-oÛŠ6?Ö&'ž’Gw?€qÂ~áR|¼Óª/xwþ-ºÏ6™@û›Â~9~Ç–jîËÈÖbê´Ü<¸á›o •ð´:1p™r¤ç6?+/Ç~øOÛyC•ô¶§pQ ŠÊGëeó¦øÔ &×0öÑàø…=wJ(Fk[DÖ·Àê8j¿}é2mƒ÷Ù—î—s§ËçvÉæ«·Ë_¾k§´´Öà°Ô©ùào¦%š°•I]¥› ¹ÜäjãeZ:—¾¢uÒ0þ4¦Ù%Tj]¹ ܹ`ƒ>Þ ­ e8Ú¤h\ï…,¡º, 1ƒÃÇ^ú¬üø‘.ùѳÕò;¿õ, UKU%¬Ž“mòÒ¡³ò??Î «l[ûÁç$놕ò6Ë} 딞8+™©N ¦_µ¨*ÊÉ¹ã“Øì}!Õò»¿ý^©*§u›×#U˜øÏâðE§dš®—º œÎ‘n¾^¶]{£¼ã=°ªvAnX~â!y¹g¯ìþzŸÜ½k•¬lª–•‰K/±bÐùöaùt¼AÒëß‹= iùõ7‡¬}àcc£²çéça‘k“®½ÿ&ÝÛ? ¹~Q&r^#SÞ…´+‹5ÈšÏKÛ Ó4˜íÎŒk>ïTˆu ’Îi:¿ªâÀt]N\hŽb1·‘†Î»ŠŸi ü͹T-svêÂÝh°-_æê˜f!¼æ]•ëÏvíVòÃ¥P.‹šu™•1—[‰·ÐÀ=x\Êu®ð˜åI×|qÛ‹MÈ9q.#Ò20mžž“·¼?(0^dt1•§ÆËëB Ͱ+PÁ¼ü˜}ƒ˜ä±|–„Ï£ñ$&4(Û¹†Õ£§ÌNž“aì_ÄAºÚ¶hŰWÅ~ÛnÁ¦ãM[×à;0ÙcÌ`_ZGûJ,y†ðfåKL4\ªcà2mË´|Æ£}íÒßqVNöÉv¸¹úêÕ²_Hrðí–X1–¦•ûTŸýœéÇi§Wyu“]?—–ÐüÑvˆ;‹ÁÙ\B1Te‘Î'áw+5í{ E\C!K¨® HÌbâ›è<)Øtn k_%kV×I] œdöŽÊä–+¢Xò À×ø‹`9sjlKf8M8–’8–™âx^cP¸Æ†0qC´”Ò{~>Í=JÜ;–uZ¥¥!,e°\Ât-“˜d‹‹°Of…³ØXS…×n+—©Á\BÄ$Ó–—ÝCÒÕ’áIqG»¸¼ƒ­`G[Á'¿GÐà,€ËN·6ã:ÆÌ¦Ûudºw9ÂÌÂ1nÁc4,Ų&ž Æ‘‰á~,÷ai3†å› NžñÅKâƒ8=9i¹¸Á2u1Ê ýŽáN¨Û ¢Ø‹…ý_8™:>6,G¾ #§:dVå¤uš5%Ê1´œ ‡•¬{F›ÐD šby‡  tq4;]V‡eï>(ç〯l@è“véoÎæß–Õlœ¿5dÒË:–Lb;Ú*ÇGNÌcx1˜˜Â—#‚²Ä¥ÎáŽs2:ˆ—†–D›VKS}•¬iBÿ>l‰`È`ËýP˜Ë¬I´ÙdF&±ÜÊIì•èÁsZB­7â´,ÌZ%Á>xçRg,vNrɘ¼ÜÕ)£ý%ÒÙ‡CøÞl Kº0Xmu ŠpœrÁ¾¾ ŽŽ”‡ñ<° VJ`5ʂDZ1lWÀ)ï0¾mÊáYôÈDn¯À÷šuPHá~HN9K{Êã”+Æ¢\'i»a»c°å¬µ˜ým?‡Ùöh—#œÙ†õ7Û©ÏBjáå8:—Æ,Êz™žKµœÎ¯ú›eÌ9o¾9Šs2Ë› „×¼ëÄ«ôº»Ñ`M_¨¼[¾WLù_',éêÒ«‰{>~TÆ„Q¼fY¯xVfº™iô^€FzA ;îØø¨ÕùŒ²%J­•a1¿(Œ¼FÚK¨\çJ)&žø|Öä±Ýrzp¿<𹯒;)ߦyüˆ<öäqëç–•%r÷][¤ô4|Á†|ÿ¸ü?·ï¾g½ä†öJßé§åÏ>±_V_µS®{Ý r=¬ *ᣪ'Ò¡f¸#IÁ²‡7ü, &¯@¹tuc¯O÷Qy釟‚"S,Ñ »d 'OöÊÖ«ÞR,þè ùÖ7Â䈃ø$IÝÔ¸<9:K Ûôªç%ßöŒdJÖÀí@£ç'ÁBpP‰Õ¡NµÂ2V½gó-¡š$Šgá°øìñ)îÇ䃤|¨~Šò°<ûõ‘—Ž¡2ÓÛÚÃØ·´_ƈ= ·!8Èðÿ¾}P^·£Qv—×_ýœxé„<ýÄË’jí‚æJ¹å-ï”/<õ˜%Û¯?Ö%¯¿qµÜ´µAúöÿ³üû·NÉ¿=Ð/?õ ø6lNhõ}UþåQÐ5Êß}é/¥µ¢ þ¯º¥u÷¤â$”sÛвPu.i>K.R±•±¹ìØŠ¿=ÉÖhÅPHÓØÄϰP9Âp¬²àñŽ-¢²·äüXvý»|óái¬¯”Û6E¤hòy¸¨è”üò‹Oñ2eš¡ÿÄ“Ò{âÛòô™œZÞ ×¾õ×åºæˆ4gäõÛË`¬}h ¾µEªƒ7Êã'H?¬Ùß¶M~þVìå `ÉpàQyüÇ/IöC7ÞkPã¨4â‹! ý9ìÙ’<Y@qÕú1që7¬Ø*ŽùüDY/F¸8¹ñÞ¾=ø}Ùa?hueLÖ¶$ää#Iœô„µÖ–ç°Ÿ3…ª!9ùèg¤·ãJáBŒa¿ß 7^/ƒÓr´·_îªGZšórß5òÔ7ÿE:`k;•›«BÒÚŠ=Xm6GtóÁ“³5«wJxCõÇiÔÞqùÔ×ʯ½uµ¬iPÎ |N>rD¶aŒÚT—–c»ï—ÎnÁ8ºHÉgî?$÷Þ´Bî†/ÈS§NÉó8a¿¿­\ê®{”††äè>!{Ï–H&Z+;[ÿöKUb‡–ŽËÓ8uúâX‰äpØ! ç¹éŒ½ÕíY4ÚY®W²¸Ï>ƒéöC!#húL{œkÃð¯ICÛ Û«:¦¥…ÌœKu~U˜ÅÜÕªfâ[LùB`½h°=²í-gPù·W ?\þuÖy>~TÆÎ2^4›nâ=¿E{`³ ÙZªȲ$›Ì- ÂWZxqIåRnüMÂt;6:e9{Å©ËX|ûÕàçn¢ƒ NÎå*`5À©²uµ²zûuØ«Ò"«*á‹'w3,vÇä÷·÷Éãmç`1xIò“í29ŒÍȵkaZ+×oÃ^´Ì Â’ÐZå)ž ì‡aWmÛÁNJtT8._ü2–õàz!Ä7.±$S[‰A)Û#?¹ÿû²æ­k¤"Þ,·mNÂ×ÐÝà¾#"=í'`ãW ìcáZL8ýØÇrþÒÉ ê·®—÷“>À¨ß G¼ŸËÁº˜ÍØnˆoÛôÈÄRKQ øw,EÑR]TW#X¡uÏÇY ¹š¯ß%Ïwž‘ôáùÚ7ÂVx½b鸇 †s–¬íyZúÛjdÓ[Þ#wîÂÞ§Žyñé'¥ïx™ì©Æ¢¶1iï‡2_se8Ž^#UµrÏ-ÏÉʘŒžyJÏ‘çê‹e¼ó„ïÆÉ=¸“¸åªfø–à ƮíRûÒ9Iõ÷Ê>óMÁj7”ä1(àȬ’«qJ¶µªXjâö†^ÏJ] ŽœÍñÞÌÎïSn°d]ÓsÓoãÎêh¾Mã|z|£ãtõ=©‚¥züY<õ”LvÅ%ß•…L;NŸŽÁz:%e ë¥~õ6ÙҌӗñ&™Šl–'¡< à Ƒ'¿#ãð§\€¶vX_*vHEé IÀobq1¾JS¿SV®ÍÈ ,E݇–G&ñ‚ƒíÿÙÉSÒ5Y/9|dþZœ6­Âaƒ¡©zÔ ÖGì `YV`±ÑÒŒ Òb.yøáœNãW<O>-SðÛ–9c+³ü Á-ÝiÐ(o‘›6×˦õë¤&†C/ÛÞ í©Ã’l“žÚm}¼è:– IDATÁCr°*ðKPeX†Ïíƒsmð¿y,$dåªSh—{ñI®ˆü¸;Ž—¸q™ÂòÕ×Á‡\TÊà+r8U‡S¸èKù>œVŸ”hI¹Ä+7ÈÎ-»¥­ý k>ÃT©*ìéÜ.-5Ç$å¹ÿØc8„•ìé;=‰/À¸Ï]yŒm¹¾çåø‹'Dúèñ`} ÊUâ|«¹I*p¨¡dø^y¾ëyé—Gü–}16a›À1òÁL«T®Þ€ýopÁ‹·¶¢íÊn§*i-qþ}‡;ìl¾GÝÐŽí¶:‹Ûù{6gáË^Hù…)Ø}Ћ†Wz!xÝ` ©Ï|0^üx¥»ñ°”4Sq/X[ !¿Laà¿” z¥åø’'Æ&JqqÀNɱÓC’x–1ø²ª¹A®ÚT/×]¿J^ÿî7KöV5—Â3{oÄXÊy÷ÿ]>þÙãLOÈËÏbÀ"›ææÛo—kvl”›¯j’d×QœœËʦµðÆ^Q+)X¶¨À­¾êz‰T6â¨vœ–žƒ3Ð3(‚»‘­««¤µß¾„Û’çvÿ_cØŒ=bur÷µ!ÙýB§<¼wHŽ=Ÿ“õ98ãƒÀXNÁn2ÞÅòel¦žÄi*Lp<„Á™ x×5‡äŽí"ÿ÷Ç=Ò›,—«àCm{+? v¾‚Â>‹ÓxÅø$Øp>hYN`ÀÀÒfTâeõRZ†'cÏ-4ìpi+VZ‡“¢üÜÜ0ê kÌt3àg®"p:J~*cAœ ‹Ã—ÖÒrèi‰=¾G~øƒ‡ >ÁëöuøŒöýlZ+Ø ÷¢ôœÅá‰þ™Ü±ëôœ”?øÌL:ÜË$rõÚ¨Œ§k%‚åç,]54ÔÁ ÉV¹wW…¬kèßû‡ýr|y2g£õë·ÉúÛåö«¡ ËPç­²qßnÉí–ï~ù~4X!« ,DÛnZ/×ßv³¬¬.ìùò™®ÚepƒBeXÈm÷p>¬Â9qhº}Ÿ[Î D›çû–«î“ÒêÃ2‰/l9ˆ¥;8vÞ÷cx‡œœÙyÔaY’ MQ¼ZÒᄬÙ0Ž˷ɾ瞒g,+¶Ô]+µ­rÝÆ*Xû{á0K‘}à ÈNêÞ°e–m,±t”á}r¦]àg“/ÆtΨ>š°yè<ˆƒV¥©cÒuî\¦ŸQ¦|»5ì”k6Ô@qŽÂmJ³$öõÊÀè Ùóă³¢Ã6Ê5àŠèVKqc.:Ûœöh¶3>‹Ì=6?ì\îüÔ+Y·ýÑî¿ÁÐòáO¼§ÕÚ$È‚ÎÀÍ{î·œ2ª#_'Œÿ{ñ §óÎöùÏÿå²f-OSÁ²Ã —_<æÅ—Èâ¨25%½ØÐi†,Zü CƒU§P±¥ Jöê@sˆM[‰ßqœïÇÛ+¬n°xöbŸJ{ØbÍÒØ\%¥Åp3¿Qü–bm©n"ô⎓”µU (Ypw€¥¯ž¾¸¿8%ƒ’/ÙŒ‰n *±vŠý@iìM¡åbÍ|þ)Žcëð0ßÕ;…2I©…¿´8\˜×?öiàÄ\>Â]Ä=?˜Ã{áˬ² ß”,Âä&à˜t ©;zÇ,…ªqE 6ã<50GÈÃ…¯ìÅËq/”°j8ô b_QŠmÿxÏæ¥¦Ž.S0‡rà†,Gǰ¯ ë«kqê3óÒp»Àï1à+ô«V~pw2ŒMÔƒC2xnNäÖIrkª¯±BîCåw1QÏ’v9ÕÆ}F©i¹_m€wý(¬+¹“²÷…SòŸ>ü5ùÛøøìX£ìØØ¥|§ê&ð%†3؋ԇýsÃ’/Ý&åø*D%ú¹Yáá^<»a8”ÅòX (uÕ€ÃÞ <ƒD)åãÎ%øùãî–ÝþDZV5¢Ÿ@ဒƉ õò³ïxÛ%àÈ ‰öC‘§Ús ò…5–« lØg ‚„U‡J|¢ ®+øü°/“{5'ø…ìëëŸîƒP€òa8ËÅV€–fK ôóí›/|„˦à?s âT3ö”ÂʇÖh›E€­(uK×Ep΃c*/CÓ´éÓ]Ϥõ8ÂwVMk‚maÄx€ ÿ´<—Á‡ —69^±ÎìSœø9 Ç ðÓýlÿ%Ñ) É•&°§ý'ǯ«`yJ÷.sîá÷dC+ØHÜüFjuå Ë2|I„‡©x‚<ºsŒÄ‡",xŽ%øTØÄ(Ú9–z;À^¼ðÍ©i\iõmº= Dð­Ú4œ@=$ÿüøÔƒþïû½7'YŠË*ñViÉ0ÀFyb]RÖå1{ËC%^Òâx–pàŒgã6~X_Á?Ï<¹Gž}fïLÿxIû¤.P›6o¼P-[6oµö*:ÎííxA Ë*ºVùÀß¿ð¾û»Ù·À©”.ã;Æ+è {1X áÔh1Á–4“ÓÒ‚‰cW9†Hk€æ~¾ò›&/`ÊS½Ôa³r.‹¥Q|ž)‡·î@´KzÜÄà |ÃÆä‚÷uõö'Vìé4PžX_Ñ\+“øtÍ$Êäà3.ˆÁ6†7nE^lF†"W‚Äô­h”úPßwœÂç °)º¨–@.Õ̆`Ðæ½eŽWX¶p ‚¸C°>qéˆ ¡×àË —W œCÎ Pñ•†:8 ìtøÎ„EqnN¤¾ò°ÒÉ*͸q)/`"ˆH"´ “Ü…Dëae´_¤8Q—WBILÊ<ðŸ> _“¨o &új‹ãû¨ç†qJjÔBI?]ñ¸½·*«‚Қ𖬌Ãl…ä‹›P.+à4ÓÓüVÖXJEQ˰øÞe A –bù8úäòC@#gW¥’‚"ÔâP8<Ÿ€ÒÃ>…ƒ%E•ð¹‡%Rnî·Ê ¡–W”à+WXÒ…(AexÎ|Ð@ü|.‚AH¶ÔAᎡ›Â•’«›©« „/tŒA§ÅËPÔòéÆC s8X|ÑO0䩸´ý`&Šñ†Á.úH+…ÐöG !‹›á‘W 'ªTþxzϲ"®Æpº/á5(cð¾Nï'±éfLú´zEqà)ö Xó™bë© ß\¦–…Å/Sïi,sO!•L*Ÿ ì ,F? cš²ã³q3¸8Õš-ÅVZY§Ÿ#>wCDx8\ä_—B¾w)¤¾Hš< Êñ‰ƒá+ð6AŽôìS¨ög¾8HáÔÕë“_¡H&.¬µlpe“o̬G ßeOùðÚd½]ÏÔûã°=­a8™6Í[¾•XR ¡Ùˆ9x¤ŸWV‰Ë!ðsf¡PävÛyìÐ ¯t2$Ƕ˗¾rî)rø”VPnÞ2÷SZX ‚û„ö@ÙÞ-dX‹bo¯²yæq*»¼jZïè†~X& D¢Lø\¼çþ8~¿‘}ØVbœ$x‚Š]6ÍÂ:¡ŒßøØ}¾ø*©P]ÜȺÐ(û:y_LP…‘¼ÏÖÕ®¯›òG¥Í-d𲸘@%’Š2‡ÊRŽ ”ÉÔ8,”v`’©„ýÕ†ÜA$ÚûÖ¨´•Q)ž'p‰¼àg3?Ë—À…H@_rˆÃWà.D’ËT–J•›B‚ºõðR¢ Áá Ão5r@UM¥‘ ’›’DEËäÇ\þ5ã&ŒÉƒIÃLŸ?Ÿ7/~æ–»2~q9yÍuï•ÿöpJï¬|ù»÷˃_:+_êé“U·ý|l¾E~ãw×É® õR[ÜÄ{eHh¶œÌ“8‰HE‚m“Klct©â Ü{šÂ§ æÀÂüCøùÊ™4Üú„â-‚[Zšb´Š’—`¹×Àž`uƒ%Tìô„+i“7¢SeIé9ëÁ­ & ÛZe3âU…hhi¥ã˜4o6¥YzN¹*ò欳âðª‡æ{•3i…Æ-§¿-°øÂJÖOÖ#°ëÁúЈ։¸¯ó9.ÄòÅ»SVfãJƒr0ƒ ¶oóùëó"¬®l ¨Åšµ<D+°îfcÔÆÃ†§qmüÚYé¬ÑŠƒ#;+á˜Îr,¯q¦³éà x¯$¦¨Ù9H/&(<Ë._˜UÞ–Š“ü(Oz_,.÷rÎÛbù¸làñöÁÒ[>DξqÕf,Å¢¿TUÕȺMð^ßÒßyRç¥ÓsöeÃú+Ɉµÿ J èg×âXBÅŒý†yvœ[ì‹ã‘ÆÉ³W9}V<åI\;ÖIiX<à·Â2i,Ã6LÖFUà™Ï ó«*‰:¯r~VXÊ“p„ás0Ëé¼>_9°Þæƒ5i~ÏÜ|áÒtòCü k¿¨ÍÒ3õåÝVvç–cýøœ¢a•›³þ&?*7³þNåÛ­Jc)÷Y)/²ô(>jÜÕÑk5 VŽ›ñÙ˜4Î;†°dœñ±;ÞÓ…£Þ}ôs?H¸ó·'ü•Nê0þ°!ñZl`ƒ¡Œ/·Àºhça\›Åð¹T™,†Æ« –IBÖ¾A¶Ýü+ò±?ü¸üã¿ü«|çß–¿þ·ÊGÞ{½l„`/+ç««®Kç–8/Z¢øMOÆuæþ'Ž+ö@K«ÖôLïBŽíW—åXÞ¸mšÏN³Í“å‡$×|î'Õ8—É£,ËqéQóYNyãÒ¬mµñ®ëH:6žói¦ÊJùa=˜Nš& ­媰ÖÄ?Í»o”y Ÿ*7§\/ë¥Ï†tgê ~Ìz°7ÿã µ_Óæô-ka­91¤T*Ù‡á ÇÂk—#˜ŸÃT^ò8Ã~Ï ubœõ°æ ËÚ‰;¡t{$Ëdb¶Á±"QFk Ç»ž–¬ˆu.ÆEÞR©Ð3Ÿ2]ˆ·ìë#}3˜4Ìtҋé8ù™„;ËŠ~ÍzPf:)šÏÜÄã|vÌ#¬Ž•|ÄáäKqP† f>ë¡òvãÇ”ÕädÉL[)FûˆÀW\‡+X§À´Ué,ݲ¾”­ÝgeåÆƒÙùmjm´šò4.ëWoØR!e›à|ME›s«Î¯¬•*œ‹#øÎ±ÂÒ2¨ó9•••á›Ð€!î1(W¼¼Êq^æšgƙƠeí_þ__Wž8ér¬áçÓ¨À…qr†Ö¾}sKŠ÷’aBpST”…Ógøûç8ÑqPÏO’œt#8-ÊO_QaP¼,Ï@EÃúºAZ8Û Â ÕÎÇË•ã…JiiqP¦H²äEƒÅ…ÇÎOOÓðÄ Xʇå äO• Ê€«^¬'OÄZ/ƒÓõ#ºÒ±y³OªŒmø>*”®BycU`T†6žÙ¿|ž6?´às¹qî ©YÎù<‹Û³s“‹‰KËòn=ÈRŸ£¦™07qxÉJù™˜`}feÌ6ÈZITšiƒÆs´Û&#˜¼iýiý^'^(p¢™Ê¿ÕGð™°œP×À¹vý….\ žF[0`ËÊŠ­>F×)tSŒ“Éa´ÎËP^ˆ³¼¼Ôµç~ÊCËårö)vÎý јEù<\NYºfG]YàüFAy/¯(…ΦçÔK”wºìa3ËYuƾóBh$ʧ5yϪÿô¸C)^•1ÓL~Êøœœa{•Óz†©˜ésZè¾(îÐÁåì™3!v`^~Xºb1øÚjXe9lu?™¹tÜ,©§>}kÜ…ÉÑ/}yI€žìŸ{ú LvüüUPšV¬“ÎŽ.ù̧ÿy‰Œrrœo,[(Ÿdu‚ÏBì-„c¡ü…ðÏ—¿î ÍŸ¶[ÞBôÜʸ¥Ï…<7œ ¥yñî•N|Î'•q*NÚ?ô³Zªœ:1W ÖôsgÏTHÓLX¦u´Ï–%^íçŽ.ªœBÄuÓ5‡“w'¬Íû1E7£$-TŽÜh0}1õ7ù™¯ùYµz%ÜR9ÝU‘âÜ@œ¥À½îö×Ë<'Ú&}E;ÚÞa²·7§ñp#–‚‡ÍÐ|鈞êsðªjUŠq Yhöl¾Q˜nYÿ­8ü£âƒäðíƒt¦1ÏŒ»Ñ°éáí 4g] gâUº^°^åÈ y"n¼™ü˜qÖÝÉiDá+ª²¢ÚrÌGM^MóÊßrÞ‰ŸábÒXN~}\¾Ü$°fÝj¹÷¾{ðñíÖ@Êñ$Q^ ×Â'7|~š/+MT´\iu»’ëS[Ëon/.,J»ã.|BŸ):ßåhétŠJ=aÚMœÎÁËâTx"Pàé–.=ó!)Kƒ#H”aYªŒÃªk)aŒ3yfÜÆxÊ#h‘&i“™x‘d)„Ìwƒõ*G¥‹<•àÄ’o&?f|!äçb+p¶öï+p”µ^½X»v ¾Ã»æÕ[Ÿs_¾| ¸HàÈÑ#XŸÝzæb%q.×°(Ž©¼q=š ‡Zu¨”aÃ$ó¸Õ£› K:å1{O a­&OSfŬÍöêçåK£v:ãEP”âØûÀ8ô1 ¯w£aÒKÀ¥y œ‰—,¶V…’0¤RfĨåÅtûˆêÏ„r¸‰×Œk=f\äˆ>;ÝWp‘Éùè} øð%àKÀ—€/eõ- ¦N¢i Þ©˜Z ¨2ç7Æóp›å´¬òÇrfþ|q¥AåÍŽÛ$W¼Ê„VÓÝî\zT+–[þbÓt)ÓæË>)Ç8ó¼âʃɉË+·Îú;ñ2yfúbëâÃûð%àKÀ—€/__ªßÒ’8“M*OŠÔŒF^ãNX¦«‚r~¼0&=3nâUúf¾×|ón+Ló×Ç„_(nâã!*R–uÊóœqæN­u´ž™qòÏ‹é”+óÌø|4ˆ›eyå7ü.$)?ß—€/_¾| øpJ ž~(DT¨tÙ‘J–Æ©ø0n+@çÇ «e_ 'B©üi}-Æý?¾| øð%àKÀ—À%—€¹„ê+p‹x¶Åp¶kи*qšþj¿;ëüj¯Ï¿/_¾| ø¸$°¤%Ô+¡âK©—a³8¹Ê`/¹Úq*9´Zé2åRp_®eè ‡õSkãåʧϗ/_¾| ø¸’%@DýÓ±ž¾·ŒO›JœÓg§Ù»fœA÷²i\Óm%qiŸÒrâ5é- .›x­ ù| øð%àKÀ—€/‹*.ŸòË$fcšâß/H´VéEDŒ3P±óŠ3ŸyªüÍ_×Ŧ¡¼òî_¾| øð%àKàÒHÀ·À½r×¢$EËy@€–3=yÊåJgœÊÓiý"gœ4X†éJÃŒ“q8ñêa ¦_ ò¥4¬Êø| øð%àKÀ—€/e“€¿„ºl¢\Dº©Ø¨ˆñ!Q2—S™OX5Ÿ:ã4­²ŒâsÆ—k ՉפgÒÐúøw_¾| øð%àKàÂ%à/¡^¸ — -e楈™Fkƒ3Î4æ1}1qÂ:q],cþ_¾| øð%àKà¢IÀ_B½h¢½0Ä\ÒÔ`.ojÚå~×ec.ãúÁ—€/_¾| ø¸p peNƒ¯À©$^w*EfàoZÑÔQ0{Åiz%¬â œWœ „m„0fœ´—BƒtIߤgÖÃûð%àKÀ—€/_ K€s©ß<¢’¸ÌïT¨¨éEv©\¹)Tª,QÓ8ïó)mÄC|ª´)Þå AÞ”†ýËÿëKÀ—€/_¾| ,U¾n©’»ŒÊ9—[MÖ¨¬Ïwb•JË{Xå¡â •;™Kºfœ¨¬ÆÄkÆ™Ï2&^›|ûq_¾| øð%àK`®hTÑpÙ)p©T“ÿìþ/eTïöd”`("Ð'–-PI¡BúÙlF°8‰u¿ˆÄc Žó–  ¢@ å&+éô´ÃßpŠÎÒëif¹Êa•¶¶³rüxÛLr0’ú–ÍRž(•µ­Õ–"Ë Ú¶¤@±IIOϰ¥àE¢PjËk% e/.³–;Ò²•ëYk«åNã¬㦵ΌÏ0µ„·ÁŸ”ÌÄ‹òÛùyî¹CÞÕÁŒË¬Ä=¿ø{²~ëvyï½ë%^DK•‡©Å›•“ÍLÈËÿ¼xà„üéÿ9 9Xœ‚‘ )ª¼V>ùÉßkvl2XâtjÒ%B. .WÈçÓ’=*Ÿÿǯɧ?ý- íí?óËr×Ï¿_îÞZ.ãÃýòÈÃ?±É…aíhÝ%wo«’ºDÔR’â£ëèýröÔ!ùÈǾ$C#°6¥3³E Óp(,‰† ²åÞʇÞ{½lXY%eh鉳Òvë¿+c£+)“üéçdÝêV¹sK…E›Jeb*b沩§b资ªÊ—jWåj–QïX&Ù&½Gå=ïú˜ttôYfÀÿð»'[w\'oÚQ=£\šÈóäh»´ú–|àÃÿ(]ÝÃPL²¨­Žö³µ,xá"©h¾MÞúö{äío¿Cššª¥(’’¨»bhÒ¸㩱òÜž}òü™%çòšùÀÇÿEnØÔ Wµ”^‰UöëäKÀ—€/ËBæ åe£Àá¼£äsSÒ×Û#토 ¬Eʱlg+g¹ô& {‰ñÈÁ}’ÆDr[4×$¤*3Ê ¸o+-gÎõÉ©ÓÝm*o±D^J`\ ˆTu³qrnŸžß #R”Uð1224SÿþÁ™€B‘ÏMJrjHΞ9-££c’ Ƥ<°E&×%$_{^²é1™>%¼(ÇŽ“3§ÏÉÄdʲ$•W$`iJÊ–TF’X.|éy9t´Ñªñöuµ¨'–uÓSÒÕÑ)Ã#ã/IÈÈDJ’XꥂµÜrp«ŒÒ =¯€Ö#Ù\Zº»º¤£­Û›´äÇrZv€dFex°[žyv¿œ>Õ&ýãV¹2X"¹tÊ01> Épê˜~¹U^\Ó"UÕ ‰B{­Êz*™”ö¶vK“™€ŒMÁê™±•Þת\üzûð%àKà•”Àe¡ÀqIMòîÖ³@Ë2•Û%Š-d IDAT[r™ìyDòY{‰ñ‰oý½ì¯ª—p¢AÞ¸k³ÜµsÅd—‚µg\¾ÿD^޶5ÒkX½Aî}÷;¥¶©K¶¡9*-MËæ«?Înbs^‡Œ—çŸyRö<û‚L¦Ã²óÝÛäÛ«¥¹Ê[iÍb/áèà 9òØÇäO?¾Oöža›K ×ì¼ZΞn“ÇOYéãýgåàwþTþ¿Öm¹VþöwJÂóTU—Eg^¤ˆ¹ÔéE"Ä~·p²ÏoG–|§ -|ÉJrüe9x`üÚo~~ÚM›ÖKeU…•FÙœ8~Z’{åï Ëž½ÇeÇ5ÿK¥ÞrŸƒì ü —b*^nüàKÀ—€/_¯¨.»%TK!š¶ª™’¨Ûø:YyÕòï»Q*JC°& Kfò^(òÅ/í–ÇŽÉÄè€üà3¿'¹÷ËÊõ¿$k°îÈ Ij–•ï~AºûÇäPWLNv„a!ÙÔš‘»ßôiZÑ*õUÅröàåОïÊ3Ov亼G~üU´jSÂÛäÄe]Z»?hÌÆÑõuxí‹©Z,¸Dæ[Ðzê)ü-…·ÓS‚½ðæmƵ7\ƒ—ÝpÖUО)¦ÛQó½¤w€ô|U³áÉt´t»Q’Fu¹׿îv”cUE¸Î®¯o¿üåU§¬©kZ׈3§O±ŸNc¶è*ÔyO¡:ý8Žöf â/GÑú—âå—Ô¢¢(“}ìÆxÿ õá»?| óóAx}¸îMoEIaj²ð»ï~ ýCã c¾©:ˆ­;vcëÎÝh¨)¦Fw ³Óƒ¸çž'¸ÞSk_Ö¬&Òð»ßüó/Bnaþêu›“•¡œ8bLâI8H-pHk+íüP`Sã}ø>Çö6qŠ‘öMMŽcïc(ÐHz&抯Â_߈²œÝ÷yt†ÐíGó,½dSü^=¦øìMŒàÞŸ¥vxgºÆ°¾6@Aœ¦{:ª¬é¨,-Æî&-tƒZcáõ“¿þºø®¸{o6 r"„IÃÚM[H‡ #Ccèë@Qe5®¹íNÔæzÉÏYY#Úz€ï–ýâpwz†Ó14Æv‡Õ«ªq-ßÒ—âÜ$)¶SÃ8Ó9®`=­ÜüŠ7'Ò¦J9?8¸0Ä/Œ (AKi!Ûsííʤx¶í(úû0:Cï¸_öÃè¥Éë©'Ñ;0ƒ|ŸìôpRIÃôÀª/Â<å’l_5z»:pâÈ! —V"!z¢.Ðt6>„p`š—Ý­ÔеâðÁ3VÚIO•ÅZ€Ë+F¨b‹×À“—FÍN/Ím‡ÑÖ>‚ŠšxJ(,®“8kT4‚4†'pŒÂN_ïζcݵ¯…¿¨ åÙ2aÅ1®*£€5‡ÓÇ[8É·ct„æc¦4NŒhiíB½ÿ‚—ÕÃÃõ\ÆL¨€ø˜ÂøØPLó–ÎÉ.§xê×nÁÎÝ—¡še&»¨Ñœjý'\ àð$‰äßü ÆfCª~šâýbðË@š™À0yß|´G=©L˜Ÿ<·™…ÃÈ«çúAOÅ™aTU”sòŸWü9Þ¬ùSU;ªíW°ýdŽTÁµ…±þìºe^6*2ªoLJ{Øgûp²'ˆö=‘æå—ÀŸ™Ç²ÖÚ¾(¡Šï‚žø Ä 8?ÃÅøsqA$#+µë·¡®ë}.þùPY8‡ÒœqÔ¬Úé™y®ÌD€kç”§p˜ííaûÛpìÐ^5Îú'Òq쌗!¬®ò ~ãnÌ®  ° n >Sä×Ⴧe^ŸW ¿+Gq¬ù8f+j±sé‡pðX:™7X˹D ¬@kü¦Æz1ÜKmìþý˜üÙyØ==5:‡{qüÇ|ÿh˜_MâŒ\dV¢¤(^׬âqss3ÇÔ”¢%@3¹·û÷íÃ<Ÿ¯âŠŸ—uûµ eeô jÙÅù²ŽUúîè‘§èé,^ËéÊ?>>ªðJaàfËʰk50Ígèøá}h=Ä™a½@ž¥`ù.l¯ãZ:ŽÏ wg)hõ÷õàÈ¡#8Ô<€–¶Lõ ~ pÿáêËX_[Vå C'.ÅèïÆ)>ƒ§NžÁcå¢(?ŒUå\[ÑÃ4µŸBeÃ$¶NQ‘ÉÏ ~}½tð9Ù¬úsÿ?:ûÝèMÇy88<ˆUkw +s- Òré´&m¤­[Óv”´Q°\’«’kõ51Úü\3)Æ'9p8àpà™àÀóÇ„ÊEô!w.gØd’Äa!‡Ú•Lþ+ M]o»ã"j|Žap`Dñaht­íc¸¸Ìo|ý;øô'ÿ“É¿¨iTZy¼?‚'÷?…RN"úø‡qß}‡q÷÷O`žkÍLsYûL!¾ôµ.ÝU Ï‚7Ýð.z8N"@dÁ…0¦û€N‰£Á”æzü짘ùè—±~Ó:\ìêÂSœ¤5"³Ô‹ìºØ~u{„¡‚øå½?FWW?…¹¶ïƒ+« ë6dWÜã5+8„œ‰øôÝÍœ£8 RÀœ| þ4ŒîæµøË[¶s"c#D!Úa¸»5VΟëßøì¾d#šò…,s¢+=W½þ-hêDFýQÂÚú|dgO 0ÎÉ4 ·,ôŸœœÄ_ÿžj>/üà8BÁ  +¡Â°»05Ó‹Ÿ|ö1ܗ׈Òê͸ë[AQö¦F,þ´façM³XÅZÅqAÖùÙ¡þ\}jýšŒ>üÇgˆÃ‡O᱇QÀ°>ݯiâŽÌu’j,““8+,'õöŽã‹_~7n÷¢®<6l@FÞ:¬ÙµwÿâVŠÞH8>K (0‹Gï8ÞõÎaÏ£OPXžUãLè‘?.§Dg °ç±ýÔ¾îÄ›?üe¼rý'û6üð»w)rÄ£õûwQLç…]È(õ£r?‚8¸¿ mÈ82/¡v¶Œë294š}BÚ^<þÈQ<5(ª(D•‡ŽÇ:ðŠ×¿›t̰þÄ1¿—cþàþCøÆ} ïûÒϱ½>€¿æqwt½ [’ ¡â-v¡¦n-i×SàJ~9fÒÅ„J‰')EÂÎÙw÷üèû\ËIÁíûáw]êÃ%XˆBsµ§kÞùS.K`—³ì[Ê\QyÐòøÃxâñ§øÑæ#ßCìT~T?ªyâówMÓ$Ò†ðþ¿û öíÙGm%½’.LÊózxü±}Šq¬‘1Õ0ÄŽ¶1¬òLbx¼7¾â/(ÔŽ².­±/IW„ýy` ‚ÃûŸÄÏïù™êËMÛ¶á†Íìÿÿïãxâ± ´ ¬y\öï»]õ¿ÐvýÖb”çkÍjCK‡$qÀŽ„°øíœü\\joN:pà«T×jÛ´xh¢]UWƒ‚5[QUÛ¤4h aÒŒ‡F%ö›ê»y ál£ÄO,«©âzB†ãiZ‡‡|\ò!À‡”Û—‰²õ/ÇêÂ!jÀzðËßöRèç >Kýc(È b,»ZäQýŒ±dÖaÍ–\*Ñ‹¶ÓÔt·ö) koðÂÇ% O>¦>¸æé`‘žYƒuk«ÐX_šðœ ý:ã„8¾»öa–^Ìc C35=G'/6]õZTçÍ Ø7‚}‡ÏbtlZ='ùœ#£—Ó±gd(¶¦­»°¦xýtúùÞ÷î'þIÚ®häMG€¶;Éá€Ãg€Ï NÅ*aƒ¦žÄš•é÷¢  “–5~µsr $Ç#«sµ/«Ç¸'…B¡ÚŸº—‚ƒC’Æ€³yÅÕ¸ôU‚—ÖrÝÑL?~½ï0&ú‡©q c/¸+¶¬Á5—®ÁÝ? «£K½ +7_‡õëêqû«7À7y?©™òûiª¤,äËÌÆKo{36—¡'ð‹_>{÷tÁãç:œuº ‘p¡é.„¹îK’´A‰A¤- IM~s„‰ ó®l,P3÷¦Û_Ša„ÚùÀ<²J6âú›^ŽmÛ×3„ s½V2N©_ø*I9ƒxýXÛTÎ5Y²ØßN¢…+Bn^¶m«Q•æ7éåd*¦±$ù ÁùqŒõ<†÷v ký„×Õ›¯FÝÚMø“WVbüôoÐqhûšÇ05;†ù‰ ŽÒ¤:ÑÜŽµ¥@m¢VÒãó#+; ›·nDùöKQVÕ@wê׃Œ‡HD„ú¤$ ¶úÎE­^CÓÔ74àúW¾’}wÓ\o93$˜ÞŒ|4]r®l8…­EûðØþQz·2Ä ?~†¹Fm8k“y½ðÐî˜É@ÓÔ™aýöÝÔ"¿»ó÷à7Àôš1ÏR ŸÇ©–ô•´ÁO§q<‘:Ò=Y(¨Üˆ—íÀK®Z‡_Üã¢Çq‡zÎmê;öa²÷°“ŒTHvv]÷Fì\5ˆ¦l®µ ™3â¥Þ‡v~°À•‰±5ÔÆÇ?>Äü~ÕÍŽ7Eó¡Ã¸÷ÞGÔ;*ÍåáºÝ³cv•ιÇVÌ=7ëb©ßÐ+Fyaä]/ëndáp$mž^„.$NÒÃ:´žVßE~S&¶sTE‹¾ù šOÊdMïÕâ]¨Xµ¯¢4{¦™º¤0ÞwÃþóÇ\;Õĉßý WÔlź+éàV¢U »?Ë5u%TulFIu5>ùÙ‹fbÓÔ TGÚáÄT°“´Ç®šª|Ô3~šIi4MúŠ_wv­ÊòqÑ:Wõ`!$íLœ@åÚ•žËCTº‰"Á#»°™cZ”ìÌœ¼ö/ÿ;×­BC¾6…FÁÃG05Ö¢ò¼ô¬õQÛ(|±“2%ûUô†Ð)áUÄD¤‡+Ÿš˜ã%§HÏÁƒq ûÍ›_ávÜvÛ5¸hûFMSc5ŸkñÛÖ¯p}Ü)t÷ â¡ßÃP}&¶½Xãþd”\¦ÊKŽ˜:#¿âC2L}r4|½äÆ[ð†;_ƒ­¥ŒvïÇ™cnš ¨]&â”.u&ZÇFì|Q6~sß§ð‰½—t/Ôú=«ˆ˜ÓÝøô'~_nò+7ã_>ù×È àLó,8Æ«‹¶ã²­õز±ž©Ä¾Bó?†ïŒLîè`R×í½úŸÖÔ;VÑ´HóZ¨£mH÷~Í€á,yWXÜ…¹MAšñöa͉b¦nl¬ÀUWoÂÐ]kõÔ˜¯Y‡¶ùP’Ù‡Pæ þÏëñ«GÛñÀþa5æÖïvmˆá—“—Üñ¬ßºïû£|½ìnò‡šÆTIÆC*/Tå Ì1lÆŠ¬OÛuëû°am=®ÝQüÐ}\öàÁÛÞ¿2+êÖà“C–œËèœt5.»êó8uª]õA[Çòé]^µn'Þók108«,W׬Bµ¯OÜûm<ü»Ó4Qw+^Ðéd-ÃÞôœøæ‡N*²¥ŽªÚFü÷×?Âõ—Èõ»°…æéÇ?ˆGÞ›Ð4áßh;—\P'Ïkv?vT£ÈËø£§Ê´x ý­ÀMëpì¹ÿA®»ÂÌÛ‘åÁßþãßc{n~>Ê+¸Æ•NSNr8àpÀáÀÓáÀóÊ„*D$âáŸháD“¨+̧Žæ/‘: áajÚæ§Gø²M4㈕¬:—fTŸhh¦›æù™¾ýÕÏýp!Æó:˰c“Ô Ðd97Ú‚™ÀÌø¦ÅN. ,>šCá,š§ÐÚr’NÝüÚïGNΓð»§á¢7ßTÔ‹OÊNLÌal,þEΙŒ4Ó+6jà —QGÛ™(0ŠŠK™- œ¢‡©ÛǼqS_˜¼›3½+£Š*£aÒæhÝ_f9µz`hö,½MýôPœFE.…$†Á°aÓ(@ËÖHSSóŠÏbrôøiš¥—ª6m'ÒK‡OäËz(©K(<(Éæ¢šˆ23¸X;\€´` =½Ê”&ëzO5£ØEæÅFˆþßµÀ#ü¡=õœõI½†¯¹™\3VÃÈ)(£ ]»Hk$T‹¿Ðe• ˆ& )y¨ªÛ‰×ÞâÆàà.»¢Ki9gh.—x„Ý]ÔÊvw3Vñ„DX:Ð,—˜=Œ3G¬ì3ôî¿÷{xêqzÃε0® ½%iB ªu3˜9Žá‰,ŒÏPÀ‰¦tn[VVUIói12³¸T€)* M®Ú^…“9Aìm"=Ówƒ8Ûz3St˜a£Ò©Ì/ª@yYŽÐ‘£sDóTù9|ëk_£ƒÊ„rÂé:Ùƒvjð¨ÒSc~bl †&ò9®ãZQi Ê*+ctDILyñ ÍꉷE{lÆJº¯ÞÜU¨¨,Eii¾Š«àJƒùI=ŽŠjÖ ¬a³òæôfg‘.OàNt-žŒ… ø^%h¦çÎ!ÇO´P—IÏØL ž:…Sí£Š¿B…˜B%Ž_—%„ÉsIitŠJ§çu~~Ä›6‚¼zç%k ¹>ŽüäóN§01fFZñÃoþ'| ãÔ\/?B´ Uð.Lt`>‹Ï‹çUüðŒp-L `´ç0¸o„ëu´÷ºP^S‹Ë¯+Rž°QçUAã$‡¬˜Ï œhÕ41ôÆANÖÀ%˜P=ôÞóóELã Ó)T |&Ä©±~µÐØ´žwàq…P[ÀI¿Þ¿3ĵng;zñÃ»šµ€¨ç%4pîgêG˜/ø_½ZœnpÉQNN° œ-eMÌÁý{ù§×Ö ½e}y²o©L:böŸE.'‘xá”z?Nœ²Î&@­ Kp¥4`µSÁ³]Ú4×èi<Ü%ÁË5kÌ8šDKÐ?8ÅçÚ<*ÙB¯]¶P–‘UI-w%` ’\R…î¾14”æÂEg8¬ð~Šk®féX1¬à%$GÍh9.NÊb.‹òNÝäÛ“†üî~a)›r½i( 97“!M"|> ¢Å&6‡éÉ;–K­&¢h¤ÍÂ…Ä/91Âz†ER~%Û·ªð¤B¼8)A^î‹fÞÅ Úa®4=~ü½n¥/^ÆyÇ'Bž\m¤< òL„²øQ#_rÑ$g.â’@yGøhŠõr{µxâúHz÷Œ¹Ñ3Â06dY8ȵŠôþÉw;G©â¡tŒŠ &Tšõkð—w^ÃîC\­×íSãáõjo°õk£Ë{ŒñÙô½dž>ñóo«p$=Ì52ÔNyXîÕ¯½«êPV^‚_ÿâ^ôq]Ì‘ÃÍÉE9±Ó+oè œíÞIYƇÊ+äDE“ñ"HfpvÐ&T=ÙÅAèð0ÝŽÀlw<+Å™cF “‰Fñ4 nŽ¡ú]3ªérqm\ !šÌÞã?ÃÁCÇñwú‘*•Ïí‘Þþo?Ä•²æ°2Zin&Œ®! D2JLbê-p!·\2Uëw]Æ®ôº1áxÓr>\ãÕ4ñ–&h…l¬¦>9&'ezåbõTIx!{¡ÊQ’1ÃËöi'ùGšÛGð¯ßµ^[¶6áSŸâÖiÛ‹FÁûŠ+.ÂôÄÕ rùè]Œ;ØÆ°!¨sŸ¦pŸmY“îCFÑ%jœÕr/YÙK5›¿94uÛiój®ftÛ½ùœÔK×`Íê"EC„Øt:çlÉ«È9|û)œ~è+è?…»†šÐ?¢×Í5î~šês±:iì&yñ.--U¤Á¿¾‰ëøJemYˆ»œä2PöE¨«­Dýª¸vÐÀ¦:žß„ÊR‹‡Eª®Ã#mrŒBÚÕÔºr]'ÇŠ-ÖÕ #<ˆ{ÿý?ðð}ÐBm[ˆZ¶M[/fÿìÀÅ;¶(‡„ßL0{—×øà[ˆ Ä •¦¸ð†gP:wr4ÉUCç–FüÝ{n佦§¦§®À‰Æ´º~>÷…÷Ó‘á,x¨‹±[ÑÖÚ¢è‘N$ÍÓ£ýP'¾ø“Øò¹÷ wÛZä,±7¯*àü8p8àpà0–Hy^pòU­R’Ö%̯Z1ÌLOaÚ«µM#='ÑÓvÍ-ŒËµ@áƒì’:dT¨ ¦ô{àÄ$ùÅ-kV>*‹¬ˆ“X`všÞfÇà¢!bœ-.HSã‘NaÏ–P23½Ê´£´ƒ!®ÏÍ ·› †iÌ.`¨ÍX]_ŽüÒæ½ôª”;¨dƒ4ù{©.Ê QëKDÔáÈN!EÔ*UÕW¡˜æÈÈ<͗󓛣À.ÚO!…>®¯¢–XR×»¹d|²¼[9 ¨l}Ì*,áz9½žrAÚF­mGgÂ3˜ רIàç|zwfp—‚Ìr#ORò˜É"{†öçΙ ŒyòeyOºhzI;ÍŒB»—-Ÿk±p¬&ÿÈxH1&”Ú))ßE혋ψ¤ ®AóÓ³Y’8¥ø1FÕ·¦AÖÿ©Á¤n+s¾Oa†Ò™c¬¾9j ]Þ|šœë(ÄmÁš¦ôu·ñž8TP‹Æ@y&<4ßË¢IáÀ$5–#8yf€0¥¨`|¿á.¡è?«+á¯èß‚i^îü~½ÿ®¬ÃM§C9×Ïy)ä.ÌRuM.¸ÕÝ<û“}é ûéÄsŒë{0Ê8wc4 çUa×»ù9Χ&F1Ò{†Ïc ÎOcT…3‘ñã$‡<3x^pVÌPÍ£!M BžñèŸã?ÿ(ŽìÀ羯*gäcýõƒ’‹.BZI=ìæ)¤`ôan?CެÝNí›:Õ»ÿå³x¢=C y˜-¿ ®œU¨«)ç‹5ΊUtF(/¥(C5WzQ1Î…ü4Á¸ýUÈ)ß‚ÛÞô'\Ï2i.6?AóNW'½Ò¢)Ý“Š5—Á›ùM“Åà³Sœló-R‰É¡üö;_Å¿ê'¾°‹bFNH åÁ€®î KC@-ž8KHX ?’žd6™KÌK•k_Š†Ã¡ìÚ¯vq@ÀÇõ/øuGS DúŽ×¬£pÒ†!zß½ÿCßFO6·zs×Q(^‹Æ::'xz(ðCšÒù*dOѼ–ÍùZÑË X´•äYNÁZš+©þ¥jY²ßõ‹c¸í¥kQË}p Hâ÷JMnGgp5w=˜ÇUu{È×pL—½ª å›7`=KŽ>øu ˆŒ]Ãf¬ˆð/8ÝÄÍÃ(©òaŒÏRrJ÷€ò?~âÏ`gÏü.Æ«ÛìW‚Ÿ8Hø ·aÛå¯ÂþôføB-øÝý“1í[—É3!;$§æ…‰ãÜIcÿüÕGñ†WlÁ«/+ã÷~‡¶ÆH¸üò5¢±2 Evï§@-¢­x)ç­¹M9(qÏ)vЙDÞÒ—ôú„zî Òg7ÞùŽ?ÂÛßq ²=S|Á¯¾òüÓWŽPˆUçœ8p8àpà‚9 «Ï›Ĭ%±ÝÂaN®9z¿‰[ü%>Ðò0&äöq™þªžêd‚ø1› çÿè¶ØÜXŒú¢ò^ú*êþ%"{ú0?zÇÎá=·î¥ÓËsâê:©¾¢Ó2аºä~põ ¿Ùåµ­“L¥%Sj뢡ÞôuH@`¾Øç{0Þ;…Ï~üƒ sp–á,ÎÐ\¦µ7R²²,«ªr£‚h\trï}8Ûv{¾ÅP\w5ÂHú“3‰¢ÒÓ”LT‰‰ZȆ+˜‹ ‰²}ؽ4Ç vüÃOlÀ_Ü|1²3¸M—µöGpÌÏsÊJÇÍ/‰ ý¨ Ó˜Oîýµ¦ç›\ ™SBÏ·_2iǵ×`íæmÀÈ·ÆlQ ÌüZl¼èï©ñøcæô«Ü{¾ôQüæÛ¹øï,®]bœ8ÑìÍÐlëΤàKØ[o¾Mµ" Ä)øØ¿ˆC÷ß…ÿ+ ³ý˜é[Ä?µ‰kÖ¼þღußýw£í©½ø.·O[˜Àì”,4O4…Ë3›Ôe’LÐ¥«w ¨7.T õ´*¾îù–ŸÛ8Q““NÞsá ¬;“vH¡ù–×lêRê(æüæ+ˆôtPÜ‹Ï}àmÈÍå6ŽÕ鱳܇ö¬kEÔê•™šãÇû<@d(jÎߡО± µÕƒxû­kÕöf=ô”äÍ]Ïð!xñšþø×Í€Â[¯¼óÜv ü¨±Ç|¦¬”1ßÍðó4ÅS8k,{]¼ë,H­Q êHae§<]*¯öBs;Ÿ&:Í1Hîa·¤Ážyõ,Å’<Ñd‚[pÉ2;„Ž|DæÎ «•ž¾³òÇå#ðÐ/B˜îßϸÅÙ™®ªY®QÏÕ›Z'3²ƒù«{¢½<þ«Oás{³ñ]:ØŒôµRëªy+Ì“õ»®À4;ÉýÖ׃±®q|àO^§Æ—ÙÂCÑŠ®öÊ#›ëÞäÆ'QÚ~ðm ÞŒµ«þ„}ÿØoOªqì/(@Ãî—"¿ W6ÚI8¸ˆõíyµªL°â¤@I‚'Ú¼bÚ5Gœü&Î÷ª—-+·Õ5h\]̽"ý4#Ñ3¬j5½é*PFaJ„’‰á1Ž¿è·—[.eqQq¾Ÿ{^rMYÿf™±dA´xd*[3ã=Ñe“¥¸šf¬·ì9B-Ñ@ÿŠ å"Í>n‰$’ˆxPÒLùé|‘M!0—4LR ¡¤Ý-Z`x‰)·(©ö/~ÁKÐÔô´7P§ž–ÇM9½­‡qôèiƧ¢¹3´5¥i&,ÑýépѸº5ÕÅgx‹ÁÁQÒBá‚ÝIÈ:µÌÜBµ¾¯±‘¡(B쓤~‘bnÆ×Ê-Ù„ªª ®ƒ 2Î=%ÏœP_”£I›±sr˨ɬ§F¯ÕÔ ¹†QT”«ø3ÀýgEp’ÔÉ? &+Þ™ÉüI÷°¯Šš`¹„åÎ*¾ŽöwCþ¤´”“íét S ô‡I¢yKN*LFv‰ò^­©)Ũì²åko20û^L“~Æ‹+*+Uã­†N …ÓkèeÉÐ}ܺÁzO7G%ähy?ÞLj—h¢+aç§‘Ÿ5‹ñéQH,‘ãL.0¼ÌZ»‘aòX¦¦¾™|g¥ u88X.âê®å–xá凌gòýLM\€¡2ø2?_ÊËÏ¥Yðb¼þo>ŒúuQÉùÈÏ7c0ìCaÍõ¸óϲqÍîrÜþ—_FßÙ¸fÉàmºöíhܰ Ÿx÷5(Ȧ‡èÜ(çP¹.,¯©Êc°_n¥uí¨Ú|1>ñUš¯¸þHÖAíyt#êpÅ•—ª2bšóÖÃ÷þ­‡[ðê›ÿïÿàz3¡Ál.À†ö‡·‡»Ä5‰j‚¢÷›¥#…'.x³°jÕþìÕ´43j|t^é8ð£ÔþñïÄÌ´JLbç튛^ën~%6WPèËôÐly >öoÂé“Oá¦?úw®Ž«j«Õn“Ç6Ò¸šË¿ ýàhkYÌW)×иZá0ýaÇs­¼ý ¸ô²"ìÛ_Šw¿ç?¶3f¦´aM;®‰¶c÷Ú2jO³"ƒ'§ü’r\yóøê/(Rr›8IWÜø*l¼xŠr¹ž¦t%µ/Á‹é•ú“oÍ+~¦ó›¯ÇúËoŧþÏuüÐSæ[S§—[£Z½ó%²¡ý¹’;‹KéTPh)‰Z‡+j3èÑ­ñ×n¿ .Æ,×¢f1ˆ´ZTû<Ý>Œü-¸á¯Þƒ¯Ý÷>¶TU×ÙÁ°.˜·¿ñÓðÏžDÚð>ÈX‘X{Çø ŽÝ¼ÛvlÓ‡ïÁG?ü%xò˜ºàs+ŽK7½îVn7{ïþAù…U»QÜx1¾ý•aüç×îÇ—ÿû¡„ûr‘SÚ€M7|`_nd̹Ìô×á­½Íý\÷âÿ}³™Ïa½P;°7®,çN//Bæíx×;®E­h´Åà$‡\’M¨i7}èãÔüןzã*ª¿d¼²Hÿ©£GÐØ°† …=ë’a/äZT‚86š`¨n3!kq¨iâú.Œç²]‚pHmKvN6ªj8IÐì#æQ±°JXùú&°À¨úÍ-­˜â6WÃó˜ p[!NvùÙi\h¾…»ЬQÃEâ‘Z™†ÐÞ!æ>†9 ¢¬v=q2B>³§aVmãtèP µ4Ônp’›ò¡4/åù\ìκe[£Á‘ j ¸@ŸZ…[×Â5?H³ÞŽ=ÃI‹‹°¹Ø:#§@íèPÆ ÅeÛ#qÒTU¿ÚDjì²îû,Cy ¨ü²*–sû&Ÿðf3cíh9=F0À}*éØ@“³Ÿq±^t1[“'ºýä@tŽ™ $ïø§IsGãvA³œÄæØÞj]$P¯Ÿš­|ääæq‹¥5(¡–ÆG ˆiŒ'áE¤ýÒ¦£fíf¥MÈ÷“§¡>î40ÎЧ¸}f‚¼ˆ— ÷]î,jè*QZÁݸkBa&cÚ§ôßÌ£çsý×çI ,Ø×Ÿ|š ó²üŠÏ²þQL׫)lgù©ñÊN§tˆí˜T}+í¸us¡ §……Ü0žaZıDtà85TtË - Õ¤×OçøZ¼dz̵p¦’çöøÔ˜`xAn>Àþ½GéÓKa… Û6n¬§X⻹hV>YòêDë$²ŠØnîj²ºš1âØt>µh=Å-Ò¸%ÛnDXÊ¡·t]…[ñkdt‚ëç(xÎR;N¡oÓ¶ ¬N4OŒsí(cÚI;Šò¨Ìrì:Ú¹Œ‚a~ ·¢„¸ŃÖwÒcüÞóÞϨæÈÖ\oùä·ø¸„û”¦Ó„ߋ޳ÔN÷öc°ŸÏmNa:Їòî+'ÿ¥/³8Xø?*&Y „-CÔp“–iqèeÛ}ŒïG¢ •i¿Ò– 5ÎfçB˜ä:©~`È]\£)}ô\%¡ÃóŒB“3— PàÈdß¾PSˆÞŲ×íÔl„rëVsOdfÒ•¶ ¶ßÏõ©=¸û¾^ÕT¯?W¼Šñ!ô™1âzOßÏujGðî÷}Q™7¹wé;>õ߸~Kal£ù×…†(àööNðÃÏÅ8‚nÅà ŽÍhôÙsAÁ³S‡ŒáÒî¶AôÒgK±svr€Á»Oâ3Ÿü’"Äå¦CJÙ£XU‘†ÊÂyìݳ—cíAë+ÚŽüŠ‹°¶>Ÿåø«0Úc ²\]-%ü¯æ¡B˜âGƱÄý£1å%ò\éA…áIïd9p8àpàB9 çm]:þÖºPlÏër2mimÜRdÊ‚f-ìÅ!dòO5ÙË‹ZþVžt=¬¼´)!t]XýZ Ñ¤KDf»vJì£àѸDëi’ªƒ)d&buù•ñgq;‹ô[rßÙœUiÒi”t?‹Ð­ƒ-y0VÎô‡´UÆöb+"\|F–Êq!†àY9Ñíxîë}V“€Tõ‹äè&&Ü39%Ü¢+àÅåW½ûŸØË%ô†î;Êx0Œ͸}£4ÅÆRiÄM¯áºÀMXÅC¶„Ïç¨ÈThãÏU⇜âœ:p8àpàiqÀž»Ï¸óóI¤Ù ›ðÏÛ@¤ªÃÜ[îQh´%ïå–3pÏ ×rŽ+¡÷éÒf hö¹¡3Už¹—êhÓ³T;L¾1±k­œð#1íýþ R©xö|ÉË+]t†­yÅ4is-[8ÔFášë)ÀÍME”F<ÝÇÈE[pó-×a5âUeÿÁ¿Ÿ/ÝçÐápÀáÀ 8༹VÀ,Ôá@2â&]mrO¾/×"ì ܹÖ¦*çä­œ.W×fâÖ[oÅK®ÞH‡šI ë0?ótLIÏ(fè‘L”0M]%ƒaÇÖ[®¼.§„ÇžkØŠG€{®¹ïÔ÷ÃÜ$%£Ê9}Óù}†9 ítìá>´ÞU\Ç8ÜRXèZ@‡%/™²•SÇÊŒ¤Ï0©:‡< 8ÜÓ`žSÔáÀRÐfÛ¨-œÏ•CÃR´ý¡ä{|•Ö#RülœäpÀá€Ãß<¯vbø=à§Ó‡Ëæ€h㌮)¤ÃÞ8º ÃçèpÀá€Ã‡©9`ï…jù ¦vr8xv9 &µuÏnv‡8x¡sÀà^è=èÐÿ‚ç€ìB±ÀÝ<œäpÀá€Ã‡ÎÅÛ„êpçâ”sÏáÀ³ÌÞŒW‘ì½kvy–«uÐ;p8àpÀáÀ Ž õØiÉ¿¯ˆ›NÅñAÖȯÕß×;ír8àpÀá€Ã§ÏG÷ôyè`p8°bˆæMö÷LNÁ`ˆûÉ.8B\2cœk‡8ˆ‰V8œ3 <§m¹dÿܸæÍT/!FŒ/j€‚œcN5œqŽ8p8ÌG€KæˆsípàYæ€Yó–\Ú~+*Á…B!µ{C2ŒsípÀá€Ã‡¸°çG€ûÃNËŸcˆÉTL§ËM²&nvn{y.ÖÖ-‡çpÀá€Ã‡¿?0ûlK‹î÷§_–<9`5­”LÑÆÉÚ8'9p8àpÀáÀ6ì¹Äàþ°Ç‚Óúç„fÝÛ…Uä\Îz¸ ãSÊá€Ã‡¿¯p¸ßמuÚõ¼àÀJͦ²Yª½Î4BŒ¨âvbŒ–8G‡üÁqÀ1¡þÁu¹Óàÿ¬|íšø0ØÞ¨1º‰Jd·pˆÚß¹@{Rrà\‚œ) &W'9p8àpÀáÀ —…+&~EÜ5×\‹¬¬œWòl˜œœÄÝwÿ›6mƶm۟ͪVŒÛ¦mûö+.ïxaq@›LŸžæÍn±o¶Ë¸}Ï>ÏðyÕº9;Ï9w8àpÀá€ÃN¶œÄÂÂü9 ~Z&ÔPŠp´Èª ©Øãæ@<ª äz¹³Š]N#£'^rˆ:â¸d¼#K’h»$ÔB$—ep¤*§ Y?6 Éu0¡Ü)--Ýd-}¤ZĬÏÁ”a ì6“¢!ŠqQ6ì…ª\b8¨²Ic ¯xn/+±íƒ¥ùïƒoQ;̧sŒ¶#EŠv~šqƒ¶C]“—rßîÿÜ NÂ|6\éq>ʃIr6p¥ë%§»œDËJ³á R«{<^5攚í’q't¤»¤¯ÙÂèyª|—ðfÌJÅR.¬Ü{ºuØõÙç¯ÐaÎ R¯ìrv¾œÛ´Ëu*X›/#ÉÎKÆ¡!–þMU‡@'·c©:ì|S‹Mƒ}nîËq©r¦Ïf)ÚLyÃc»¯}~.\Ÿ©Û†]êÜðÇ{6¬}?U¾¡=¹6¬Ü“d·Cç,ÎKUΆ5õÙ¼2÷ÍÑÐlÞ$—Kk—³ÏmæÜ¦×à²yi·Ù†5´Úôذö¹]n%uØåìóT8lzLÛ–¢ÍÀÊÑÆk秪ÆMußn³ËœÛô˜<9.Uήî۔µó¤ÍçK ßË&­H'“·®$^QDíëHtœ»î¥Éd.Z|R3•ê£Á‘f•“’ËÅ`éa.ý,¯¤Tu¨rÁû\¿¿t¾Í+›9WãÜâ¹o+¯}¾œ:„z¡[Þ=ÒaÉõÙ|Ü‹úeúÔ°b¿¬0ö-*×’ä(/IÆŒjî©ÌgèGdù³½’„‡ªêô¸@)0êŦ„­ävZ|']g^q¼ÉetlX»I’¯[¯ñÉ=Cƒœ›~:㦦‘9±úU­ÒF–±ñ™z ‚Ó$¹'Ioàì{Ÿ¹§èWxX³Ð£)ÊâNNªL .~צ'ž«é³¯—:Ñ¥CóÌðHó,•p£Ú¤ž¥D^ØôÜ 6Ú&ɳóMØåRÑj—[êþ¹pHùsÝ·qú$Ϧ]®í{É}‡MÑ8VÞÆ/'5è¤0Xã@®Í50ÉåÔSA ÁmãÁ[¤¬¤Tpv^rö½¥ÚohWGW¬k¹ãJhJÕ©Wþ)\òþ‹Ò~¾v¤º/y‚Ký·ðê¶)ª…Œe¥~$•0÷ÍQ>Øuشٰš´•ÓapØdØuØùç:7eÆÌxç‚ß“¯+´ò…ŸêOî™/0="âecgŒäx5Ú„=pU¾à“dã²Ïõ]ýkêVt1Kð¦‚Õ…1uظKïZmæÏÔaã•òæ¾ËÀJž:â4´[ùòR6FS¡p*|Qz¯ mQ¾Ù÷Ív>oºbôÚ¸TáøO ÖôyŠòFJ¥jŸÂís»Í¦ŸÕ}ý£4j´.š–(—FÄEMŒn“Ô/_”H“ÂÅ£zh„Æ„¤Û,ªóÔ÷€S^È+J 8R_´¯Ì*ùæ!“{&?%¢§‘i𚣠’v'k³„2 ô—ab…Â/ùJ6|½*Ó&˼¬ÑHØ~ Ö†QE¢Å”¦)ŠC­÷PÚºx}vV÷ŸÂ'ÆòÂ4IpHi“zɛѣÜ<’Ä £¾€£÷â;q;L¯úYµ“c*ʤ$’Úx‰cÍn¼~i’Mƒ}'ñ\Óω‘ÙÂFW‚Sò¥Ÿâç‹ëÕÓ¨æ§á•MáäIK%Ù|Õ°:Ï.§“~ìrI·Ô¥Ü—ï]W2„æÅùêR2Ö—N¦¯5í’gÚ$ç‰}góÊPÈdø*çç«C`dLÊp0r˜ÓfêX’WÒŒhSìqœˆwqûÍó*pö¸JÕ~UA”n¡YUÉ›¶s+©Ã†5ãJµ‰Ì1šnÑ ésöØ•:%Oê0Ï ä?Ù¼Z mxµø¹Òi÷WŠ6)°Â´TŸ›:ä¾T+Ä»" œ|-Ç¿˜uM¶yK:V Øú!Sð<f‰ÙŒÃZF@Â}=YðÒ}– &—Õ@-ØDfâ”2bNT¯Î¥#¤^ýBÚä|)|ú¾àÐÌ\dš”ö/¢Mwv²IPê‰×kój1 ŽŠšxdÒV‚ðÔëó@¨ggçÄhŒ†Î(vuиô³Û?·53©Õð6ƒÛækB^´Ï¿©Cß׿vŸë±!í-Q|Ò0åÄüÇGNf£@k¥q°Ç˜n2®ìXX@ °€ÉÉ Å7·ÛƒÜÜ|¢ÔcA›7EkÅÅýn3.l ç?·M¡B“<Ôv2&Tä’ͨ6ܳunÌ©él»¢&4õòã¸JNÂOQé+m$yd›äÜÜ·ó5ŽDXÉ3/7e’ás"ÿídÌ0bþ4/¬sÖ¡P$âQ娕BsâD«k2uÈ•XµYÔ¦C“{ºMºVç)¡[½7’-§’—[ºŽÄ;rexeóÕ>O¦]r™ …©1²4 *oq 9ÒRç¹ê6_5ìbjâè…]n©ûK›ÓìþH,mÓ+ý-m1-ùòìËXK§¶YæV°|(f6ã5¼”—Šaeªq¥Ú­Ã†U˜T7˜Ò̱Nã5ÅÏWªg0‘ÇzÞ‰—Òg‰í°yµx Úã*±\–\„rþ³h6°B§Ì™BWºKăÅBy>Ì2i—á•à ò=#¨%Ïîsݯë0¦G3Ætûí~TxÞK¦ýqXûÙ]ܱ¼zæ9N =‚Wèøå¤äçQÊ,¢Sø–Ô~á•Ô#Ï_ªgм¯„žó¢(¡6Í+à¢åy0–n’sÍ M€5*T+æ<FFïŸ ÆÔcã2eÍÑÀHÒÉ’ N9—|S¿9J~ªd—“û6noÃØçv=ɰg`E ‘KzÑF0<<‰¡á ´·uÄ4Q^/ãÈeå¡´fj*²‘™á{‘“‡iKªzMýr4pvž`çÛ8 Þ†þjsŽ \‰Ç'ƒ+SÍÀ$æÆq¼ueŨª,Ö–Jƒ&vT^¡ÎΓÏZbÕÚâtÉÚßàÜæ§p¼e„tÌcjzšB¯Ÿõ£¬,~Ÿ>õ Iuñò1RÎqb?4ò JRmžÕmÎÊÉN$pñèžÑ[†6C— 7yË©(cÉpðY]óG]«±¶˜ïú®)¯)ú¾ŒòBpGǸðž`R_¬Âx1ëL ¢¼ä©ðU'¯Ï%Ïä›sû¾}-žp0e2­ )Ÿ*=>¶ë´ÏM=¦NÓ“oŽö}뜧 ›üðÜÜ‘L鋸³PèäÜN¦”ä'ßK¾¶amç:©ÊIžüÙu$_'âÕcÛÆeÊ.UNòÍ= .8$Ç&É`‰×&9ò§ ãÙ&ÇÊÖc4ƒ]6ù\Ð ‚Ä2êŠ?òÌXè“`˦xJu/ú±Í¯Œ‰Düq æL`§&|>xS.~”Ò¦”M—½gòÍQJKûZòì¤ï©_þÈÑÔ‡’\sGŽ’ÌÑœëRñ±`ß70rL•’idYá/ k” Á¨ò¢×‹1™Éu/†L•³"NkY´ÖÆhÒ4R™dãšÉKÐ*ñ«‰r3ÂE¸Q œ.' –¯|-}Ê¹ÖÆ$â³q™zL9}ØóRi”tº>ͬXiýÚ°Vö9NmÚl0C§§ÏëvHX‰Pp3ƒ{ñÛûöãÞ_ìÇ#ÿ³²m’Iyë°é†àïþüR¬¯/B–'‘ojjLÒÙí´Ï N9j—ôWbÿ£;×}dæfçpòãØP[¶ñâa~èÛÁï>‚Ûï¸ o}ëÍÈòÊà¶ûÖæ…Ⱥf{˜ðq¢åT{Ó1 Æ²ý·è8|Þþ÷ûÑ?¥“‘SŒíoø4þôöK±fu³E‹gZ·ü£úC´jÒæjûL›·ì¼ŒÂ!Ãy$¼$–ÿB!mAÍà–O¾¶Õk&±³·¼»eŸþBXóQ”ø%œX8~%ðò\njN„FûË5iÎ Þø³l賿šãth°Œ,ùJÖù:tŠ>7ôür4m¶óâçÆ¾˜Îh95~V>ˆL;¤&e ÆXIåë:lX‘ÚDÓ$|”vÅû#Š#ÊcýÐHåŽÀ ÏU9ž/*§A™/5¤ê›Å¼Z6Š,ÅAÓ,h‰š²x›M?%ö£JÚ¡´&í|„ÂqOhi›é»œ]‡À/’ý‰ ¯ìrWëJw1¬£ù¦ëÖm¶µ‡Ò–d¨*íG¾^Tû$/vqص.5®T›9nl¼v¹%ÏSŒ+Œjœ%R#ˆí2i©>7|Þèg,±M’¿´v̂Ҧ^¹ñjUõ‰uHïš÷„:UãÄô‡Ì³2úÌze !ðRN—5yrLì}Gêã°ZqJ|––‡@ÞO&­H€!ÀL¢œBÙ6mb‘îLN‰ÿâûqÁ'þà nSÎÆgòô}ÝHM‡…—ÇÐfÊ.)´¨OŠ(5 Cã0¸ôѪ/zæMg‰p!©ÿìò†¹§Ï5>wÚÛqÛë>€Ñé\x³Wã®ý…ùyÔ"¥a¢ë—8zì þé_ß…žy7ÖlØ‚½íR5{C÷‡Î”v$šŠ5ºÍ²V,Õ¨˨0-WÓ©iTs”W— dÝ_éi³Èt·â ŸþÚ{Æñ©ï܃|_rk ð¹»åUUŠ,1…*›¿2…jm[rŸ™FÅyeÆ)RãÍ*Ç'&žGhèasþ¾õݸíßÀªêjì¨ÏAxtÎö¶ãη¾ ‘é÷o[ñ‘¿Ú ·›/ð``EæMÛªL¤‘i¶¹-Öæ/üìA”zÝÈ yÞ4š|MCžÅ£¼Häo©bÃrŸ¦i“Ì&/Îød.&$<¼”<Õ_±­L®2™‰p%/Ò¸%ùòÒ“1­hñOƉ.'÷õ缡FµpaÓcð.=™KÉÄ©¦Çž0M-¦Íª~•iðJ=æ\ÞCZ02íPC›œ'ÂFŸ%}Cße;SNÊQM›’Nè&¶CÛ°’#µ‰¹PÊÅû@ÃÊoMß°É—vÄÛ¯ËéIny¦.]"‘W*u¨wXôk·Ã®/=ºðÒý(÷•pM!0ÌwÄÛмPÓ¯â¥Ô§M„¢,Pƒ8:N ¬îsÛŒ¯Æ.Ç­™‰‘8Îõs#ï #|hhÁ!œ6Úç6.=þ5¿Õ¸²ž'«˜.u_iÛÀ.…׿«”x5<šeü¨± ï^þ³Ÿ]SwÊ£°Pž_«\òØ•ºl|çëóx;ìg^ø¦ßí6.›¦x¹hOEßQÒÍ& ­Ë}Mè2,{ÏilKÑ£úƒão)zl^ ã¿$»œ<3¢°Í̈́ԟçWÞ±&­H€ SˆP‰Gý‚ÖW¯d²ÕÉÜ×e#ŒÇ¦,2Ñ»Ÿ”Õåp`7ÆÆeŸK™8=m _üÚœIC˹6=*õ1±Ç×`™6ÅÚÂb1:UƒGç&:1>Ô†³Cä—cÝæõ¨«¯G^nŒú¢JÝs˜ y0Ïðs]èêS¼8LMÍPóA^~.ÊjV#ß… wfÆú19Ü‹®9#\Û’ž…Òâ, [\çÍFy!Ἒ×Âßp˜mŸÃÄØ†G(«¬FÍêZÔÔøÎåf.l¨ÏBŽwá¹jÌš‘›__vŠrä¥.Š´žêDÄåEnI üóH /àLûYL“ÎYj%–U©¿\÷fFÐÓ­Û|¶w÷Çî͵ðä¸13qC³Öš@·7YE«°º*fäB³ƒäóææõ8íåÜ|9%õÈJŸ†èd?Ãép¹s±º©–{ä±Ýiñq«Jú‘1fÆ™¼4bç.-$è1™NNÌ_¥ºŸ<¥‚9_ž¾o葇œÇë^îY2=Ë-gÃ-¦×˜ÝÔàTÑ÷m˜øyÇÑ{p¦õ8<¹›ðº7Ü‚·¼åu q:MÎòW°ê:lË©Á?¿þñK'pzÿÎL߆Ìî1;܉{œEÿY OÝxüÑ}ðæ×)s뿼÷Z”dд@בÿBÛÉÊÄ85„ÛßøiÜüò‹pÓUµ?ýS´žîÂ}÷¨ª;âÎÆLå­ø¿oÞ‰ÚÂúÎqïƒ-øÕž^uߘt?ù¦FÏâßÿã>ttR ¸ñ­ïíÃ+/žÁ‹›FñÍûÔñʆ³øÂ7al\ 5ûöTf×+®¼¯ÿ›£aÝ&Ôæ¤£õÐopྯâÿ}³csÙÈ(Ú…×¾¼‘Âf9Py îxYj˲4|BTOõp½Ý…Ò4ÔVç ºÌOd®Üu¨öWá‹ÿz;ɦ€ÜýÄ7P»ýåXù¸~…ÐŒ U¤í3ŸBØWŠ7ÿ ^º¦ésg û5à+Àî×¼ºñ*„gð¡} µô#âÉEdæøíïºqßå¸é]…¢Ü0}'ð™/üoV_Ý„…`=ƒÕz¢ž©òPºQV·ÙOa–Ž }ÛŸ¡¤¤[74 hýË[X‚í•»°ig5ÖWi}¿Á‘æ‡q`/vúmt™Ãd×£Ô =‰üªµx}]N<úU vœÄ‘–®zÅ›ð±—mÇhëOðĉYÜõo_Æš÷Ý„ÜÒüX›Cðâö¿ýkä¹O¢»yûJ joÁÛ?µ[óZÐÕÙ뺲4lçžÅ'NœBk× ®¼í=Øùâ~á4>ùåÏb®n3F7Þ†÷þS&µŠÃøíoÁÃÅéÎ9üŸ7nT/Æs=è¶àfDõ" p;–›ä…g}8¦,¦bÞÈr”$ãÙŽ•—ñ™K•K®c‰âÏH¶LvÒŠ?çÁhØöŸ§È9o›vž(Å͕мØU=­,Ó¾åðj%°O‹(«°ôçrh³Š,:µÇÄ¢›Ḛ̈q<]z¤JÃËäêeœËxXê¹K†W¸(àÈ¿§“VR_ªzÔ;†ïÁcój)X“>Xu‰¦¥âÕÓm‡¡Ë>ʻܤå¿ÍYB¸L)ÑTðl…&“ìs“÷¿y4ôœos³aÌϦ!« ¾ ¿ÌpÔkõª I.·™yÕì®ùZ¥Æi>v¤ìOÛ?0‚² ~T®Zƒ"?û(2‚gpúL“óXU|§Ú}è›,Æu ±®n•¾Lžæ±n®[›À KAâm’?¯%å5XS“ ïì,&æ¦0ÊCVa5Ö®£92ˆ9zœŽ÷ÀðlÍ nlª/…_¼cÓ¨(ËE|˜ ajt°•Ôx11Aš [VÝ€-›(ÅÌôPXÁÍŸãð4£w˜/Bæí¨-b­‹Zª…qÌ-ø0==¯&á‡á­¼(|ÙÅ(*)E%=]‡{;0?9„l0–Ñ…¼É*ÜaTdùáÎp£²4 OàL Í» !Ì/Ð,Ýß™ù4äDÜäí¥6sl,™ôþ]ÓÔD3v=NR0žEÙðezÐPWŒÌÌlx2HçÆ5¤ó8Æ:pìL»êË•Vqc1û/4¯xÕÞ=¯+C pSS¶'‚âÊÕXC­á*5‰ƒwcªlæ3°ù¢"LuâØ“ÿƒ‘ašžÝ#ÒìXе?*É É³_æ6Œ<ôrÏcˆÎwÂÇK™Îgê:˜dÊÓIW2Ž¥ò“ážÎµÐ/õ˜gs¹¸ž Ú–‹SÌ6f">½+=® ½¿Üv þ•À^(=v¹g²¾g×3ô/—\_èóšŒËÔ±’ã…â0ï9û=³.5iYÏôïÄsñj©ºM½z\±'•Åwj1)'9—‰A<¤‚4gBåÚ¨ú¤ŒüIžFÎÍd#8$O´æ<VîË=±Ë‰ÉHîÉŸ¡MÎ M¦œ©Oh4ç‚Ïn‡ÀJ’:L9¹6¸ähòåhòMžà5m܆6ÁaÓ&×vrÏ$Ááaý 6aªƒa=Ñ…èñ8?:„Û |w|Ô…Mò¥ãòÁWü"ìºh+Þt}²üdeâsù5þî‹?C…­ú+g1?ãA$8‡pç£81ä®K ˆ÷æè Ú:Fh$/¨Ë(Ù7¿vvl[(fg¸F.{ûžu\óÕ‹_ßóõuÕÓÕiH†73 «£Ç5‚Ť9¸SY\Cõ#äz†P•åFfÉTV×âWW ÓWƒ¹É*¼÷öØ×ÓÖŽdö‡1“±ë.߆+׿ (ëÞB7à/oy]¦ö°Ýs(§§¦bu³KÈÌ5¸ýοÀͯ¿ Oì§08†Í-èo}ÝOŽâë{öaã+Þ‹º5kðžW݈?Ç죢{|ã“chä14^ýT×­¦`Ky«Ö‡Ð\t‚xàžœm{;/½[/¹/yMê«ráÆ8‚®ø‹·#‡šÐõatýÏSè>Ó‚p0‘±v,œy{:Bh;5£èmï¥÷p&ÐDG‡üM($–[¯¨Džß‹™‘IŽ'Vח᪫6Òc¶3þY\Q?ƒ‡ô3#^¯2Öä/¤é—ÚG½V&±0ó;²•–úåZÄ/É“…Á!Òéóf¨ñoÆiŒ‘Ïñ‰mzŽ«~ƪ â^âÏÒgÑJh^ ì³L¶ƒþ™/ÄçUÞ“öܺ>cþBy%ô.'™÷»À®H€3È…)R™ 2ÈD02ç†i#‚Œ '8ä¾*æ¾åÏàx)'Éà”s±†•ý‘Ûª›Š1\?µ62 ‹ ¤àÂFúÒE)>†Æ()Ê„gŠiô0,_sòʪ•i1üÿÙ{0¹ŽëJøL÷t÷ôä 2A€ f1ˆ $•ƒ%J»’-GYöÊÞß븻ö~»–½Þµw½²%[–%Y¢‚e‹J)JÌ `@Îq09ÇÓýŸS¯kúMOOÄ PE^õ{nzýÞé{oÝò榴¸fÆ\°1I¿þ¡ÓÿdR5€{î}#JêÖ ¾r˜š3a+óÍ“$‰pQ:÷÷¾ˆÞÎ3xø»I:¹8¢´M-\ð0läÒ?iâ'± †*!¹¡™!Ì•©ECƒcAœí%)J‘Dr¡@œ‹Šš§ c³‘ÔSçË0Š£c$4}<†62K£HñÛÂ$…ã$WìKÂMiîyèÙob4T‡‰¢f4®ZKmܪª«1Ò{cƒhÓ:µ IDAT.=ý½ô3£ÿJü]w¡¢vV­>‚ã¯ìBb¤Ï>~ «ß¶k[©)BeãXZ‹üÌõFë— l?uŠ2AUÕÓøÀ/~U¥a.*àJ´X'’cf±ˆç‡É1ü‚¬9(«¨Ä½o~Z“h)óÈœY}K¬ãa®– rá •–¬g»åh)¥ÿ^A[©0ó¢;€³ê­B䘕²+ <,dëœ"s,«{N>:ï­êòV¦i%£îKa§{Ú%‡€CÀ!à¸ôX¾°I–DàÔ:Ó_ˆþH:ê¥jó–©œ%9zaä&ÕË-k5lÒ^Ù—tn½ù>‹`©®ÚXhRù\yrëÚë§_îÜr¹Ÿýõl^õ%§^¢…Ô(u}çÎmÆ9.]$¡É·4WDNŒöâìñè+V…âê&4×EŽ)Ò5kV!\^ËŸ$N“Ô¬q5jŒd¨ ÈHü\Ùi“ú“ì¡Ëñïî{LýÓÉפñSM‡0œæŠL¸ÑîgÑyü|û›û°mûÍxÍ$7k›© ôˆˆÊk,ô?+à"€`1 "µ†áÀ*ã\˜®Øœ'ÝÀHWe"[_mˆ­Òl\LÍVEªÝ8ÀˆK€Ä5fLž–Òh’HÚ$‚Üáçÿg nD_ÅëðS÷m@cCˆ2¶ÒD[BruÛZâcÿІ®¾8F&ßʺVlÚÐŒÃ/>Mm]¾ÿl7þÛGÐÚZÃեÈVß„HÅN¼÷ƒ½8rà0ì;„§>ŒþŽƒ4ùvàµïÿ ‹‹åòù$ÍÀ“¬#ߊ3•t? k…Ú©­­Æëï}Ék€->…3a´ в…óPº%Åh¢æ-•nà*\š™m"'”-@Ò'M´oú<),L}ÝKÞ×YÚ8] ñÞ°F[GYu=NRo“#p wt8+q›Eà¤!)²Æj‘,1Ð5›WYGêÐ’2Õ÷“+?i³‚ùþ²¶Õ™-‰PèOýHÉ2_ÊׇêûÇ¡žþTÖ¾ ­<þ²ÊçKþ>üe®Ýý¨l]‹ÉÏý žx¬=‰ZüÆo@m…494óú7ìÞµ¿þ_Ÿ£#ÿoáÚÍ×£!ÕK…TñÑ>¼üÕ¿BIâçQÙÜ€û6Ÿ¡Y´ ÿøÍšw¾5aúáFœ¢h^÷º{P[=Á:ð»¿÷O ]ƒ@ÍÔÖ7£kœ+)°U©AÔLö!NÍݹ\áÚVŒëâð–×nÁ»_»š‹ ^Áä°ZÃ?Îxÿ+îÁ«]ql*MÃÒ²²2466ì cup„4.=ÂEé§öZš Ÿ9Ç¿½’Ä}ïìfÈsö?Œ¿øü.Œ‚‹8ÞÌf$%2 *1&ƒ´n¡yõÿ†oþèA’·/cÓúU¸{c]×a˜XžØ÷iLÄànݧ ãøÿýet¡¨áµØ¼¡‘»\P»‰¼ôП2$Ê9<:ò^¼íîFüÎooÆÏ}à>ù÷i|ñ+\@2@_¶*úË5näjÞ(ï±aj;y¿ÓÙצõÖã¾{_‡ôøçêÞ¿ÿ§G1Þôn4ÕÕë&&OQú8êR}¨‰ ^]‰x7ƒ7mæ–*šÉi‚7&P¯E}¯Â!Úw}ɘPIÊDÐDÔ„Í«¬5¡æÖ3Äϧ‰›ë»äëÎe‡€Cà# wM³³ ["ÏQäÈ3eI’]•#’Oeü¶i!TÎ&µkËHFýùåñú¡‘š ›<"è™aSŒÉe“¿žÎ©]í¿iÔHüן×u²ã°²©ßPñjÔ6âC¸ ûNaÿã_Æg‡§ŸX¦¹Æû w$…êkßûßp#6oZËøpÜ€ó¨ö&ãã8¸ëIú^Åq v«I'päL!Þü¦Íhª6rixëÑ]\ 0€Ï~ú¹Š”¾T©Q>çêÌrÜtkêËŽa Ä•­LãE£f¨4=F'ûfŒ'cè|æÇxlð%t¼ZŽT¬ }}ذq-ž}èóè£6kûûn4ÑÍÇG‡ðØC?@ð†njõF.¬íxyÇ` Êvž½2¡H›vîD{¢ ÇzðÕÏÿ#ãÂÑŒø|öÁa×Sãkû=YUSø–7nÆÖ·|o¹çz\ÓZ‰XL>r R tîè>tŸ<€'ø.†«?íF¼æÖ Œ‰VŽšô:lÞÕAr¼_þüxäœBˆ6܉·rŸÒ·ÞÑÄ¡¯ ':Ê€±Q$éë6F'W·ºqšp‡O|ÏìãÊ#תªª ÜpÓõxúGßÄDûfŒ¾ç~Wƒn¼üø“ØÆ³®L--Ò—-L3m‹+Š¢pˆæÙ¨¶Uã¸B ¥É6-Æúí;¹°`/žìÁÃý ‰ µ|4A¢ìV”U®AC])µxÖ§‹‚dÕoÁŽˆãç|ñè&™ÎÌuA¾|õwâ§èû¶…¡EŠJxŸEdÞN³'ýØHöÖ^#ª©Ü*%©­¬_½–¾ƒ Žù³xrï„Á”§±jç;°áÖws<ÅæÄð(ëGËHNCøÖ¿ýoºïwк¦×<ºûöìÁãœ4dÌÎÇni¦V5ñö>â 1fc¸‰ úõM$Äy ŠËê ¾êO->)úÚû_×f¦ì÷C÷«¿¬?ï¯7užë~ÍýNù˺¼CÀ!àp\8üÄl¡½¼ûcþ%ßù¿ö§\ƒj:~ë/7iõÛž½¯¢¡¾‘í‹Ìeýª× Àš³’“ž†K/½ ôYÉl ÂrZš^HŸ,KötM«œÊ(ÉŸÇŸ×`*WÒ5¯]Ï\¤s£ô½zä‘G°iÓ&lݺ•„HàÓë©_kNRÞßžä’&BmK6+_>Ù$‹ÅåGdÌWÌûÇéoWíäʦ²"{R˜XRj‰ŸÊÀpC «1ŠŽÎvƒÎs—‘†ÊÚ&T–QƒD~<•˜‰žGÑÓÓƒ?úF%î¼¾÷ÞÒȈ T•Ghª«á®)Tá\Ç$†º¢££“Û+q KÐ\SN‚Åð$%ôécì¹8—·ö $PUS‰p8$%W¯š=?»{¸s@"@2Du™äò^£³/Éð!\Cd_o‰%WËÒs¿¤8ˆâ¢ zúã\„@ÂÆE ýWˆ‹.ê¸Û„üúÒÄc|¸ébó9<ô5‡“8ÑÆÍ7ß‚Ha“#{ñWŸ| ‰P=ZïøI¼÷®F¬ªnJÔ}ˆq!Å(N´ÇIPIà’2{ÕR.ú¨©#Ô¾•0ÔIzâ$Ã}ìÃO¼ï¿Ón-þð¯þ‰¡;.@??µ/œµ#FM wgàN òå;}feÜ}£²’šFαSÇÇ >T¢ªÌ3a–ùÞf†Àéûá’CÀ!àp\\:»•ü`ëµÛŒ»‘í}œ ÛÚÚÌÇp8Ì÷C%>üñÝßK$Ó×.Içi²ø¦›JÞ‚‘7ﺧ•“6€¯†ÌŸ­ã•ñÈŒ´ijhf¯2¶¾òzÙzæï“÷¯ˆ—·{J¶žÊI®üíÙr¶m+ÛÌóêßk‹oW^VÞkS}ëšw´x×uÎKF6 çÉbÏÚ#Ï‹Q^AÓe_ú"^ƒ$C%Ô`’™>eÚ•Ir2Ám±&G©Eâ¶[Ü© ¡iJÉ¿µ€Š/ÓO€~_27—–3v}­Â$à)9½3PpÂtPÃ¥­O…Åf+«–b3ÊO+R#Ôb­^SEŸ88š+JH6‰§ÆÁ (›·¢±–$I2ÇHømjâ <©ÖÐÔXdêöÒAŸ`ñ~‰–×që(*Ó’iî¨äV`ƒh?Çøq#CˆÑ|<ÔC2mÅÒ¼Õ—³?šPuŸ™yÐ=C”EŠÍϩv ãÖ)IFƒý$J“ãÜëÔI=t áŠ5¨lXK­˜~X1½:oˆñÚVq;®É›~À(éË G8^ßt.PPš ©Ö&­€-ŠF^€Ü +D|ʦpe=îrQ[kKëȯ! ´4טyySÒ‹wæÂýëÝjšs}W2\¸î\ˇ€CÀ!pž,šÀéíi³fö¬ ‚õâ·Z%¾ªŒFLZ+iå¦k®²þ:jI/Z•±š2i¨¤å“éÓ¯­²eu”ySar“4jÆ·,czÍ•GýLÉF™½¸kzIK†@¦Í<ò°]µ%§ð¥ÊfÇ)™-A³ùñ †â VÒîUUÙ%º*|¼—ª5é–”” oä4Fúβ¥)nx"@RÆXt$m’Ó›j‰F½º†1à­4§’ßj½²s ÏÞ<ÇMhOfš YÇšÝì8ÊŠ…ƒsžüsgÛÔÑjký÷‡ÿº?ÿâs/à³>Wv=G²XŒ=Ç<Ðõ×âõ[©½+W°`Ïq_$Ö¿²R$¬²”ÿ@ufj’SÉQŒžý¾ø¹Gð™/½ˆïý#l¼v-JÃ\”bðU]/™ñQ â_¾$¢£-Þ˜g–ˆ’Àéo¡Éö·Ðò¢œýnëûaï· ÑkÓ!àp8€žÑRàØ´('²L‰ð(Œ†§¹Ñ9û2·/w]·Úzeg'}V fcTéE<[òH€üædz dH )¥s~ÙfkÃ^2«®¡åµë-X°}¨¼òŠ«¥öMž/|oœžoœò–\ùÛŸ-o‰§×†7!Ê‹DÚä™l-¡´g³ÇPÝ[QU2Š5¯G#7|/rq WBR/i y}xy°/hiÔԟG?®ºfç4ߘüeÕ¦’0ñòÓçÚ“)bæ7·]µ5A3ej²EoÄ;þýµØzÛèíí¡ö+…o+Àöë·sål¥gÞçÍa±Ó¼¥è«6WRêCãHpwˆ¢›pçý hÙòf´n» UÜm‚&^•sÉC@øŠÀÙyv¸8‡À¥G@ï)» PÒ,ŠÀé-Í;üó^xÞgixü¦Såõb×öŠR˜z‰°/}^4É~¶e¼¶Ìxv9»qÇc>– SÉŽËôÂ>æO IIA¨M«"¨¬YÅ•¿ô‹ãâ€`p&N^…‹û¯06’^2™¹/üDuZ¾ëÞ\ë;+÷;ï!\ l -ïïâ¶Ôã|òÌv}¶óK‘c©m-µÞRdT|ýå;—ÛþBÊ,µŽÚöß{Ké+·ï¥~¾”}/UæùêÍ7¦|×uÎ?'¹}Ìw=·|îçó­ŸÛž>çG¾r 9—¯­|2ç;·öRfQN J/ÖT¶y½P³ç¦_Ïž÷Ê[óf¶¶L¡ÓÍ ¹}øM}Óó3Í€þþrËÚ>sûÓùée³ZYcz¤ÆNiú8æsV.Y^Z¬¬ïØBú(¯(7²,õþñ«½Ü9˜«¬¾ÄÒ´I §dÌß3­¿ž®M¿W¼1‡ÂÙÛ°„rõ·jÕꩇƒÚä-ÆýmµZw1ÎgÇ1wN+õ§Áš/á¢þ¤U–2N²,"­…J·é‹u¼yž»'盛Ùõ9ÍöÏíÃë×jÒ—W+:Ÿ<Þ÷Ü[ ïß|õüeçËÏÖÇ…ª7_»³]Ï7fïù0÷üç«7[öüBÚ5Ï]¾#üñ RÏö±ÜÇ¥Îãr˱œíÍ7wùƬç²}>äÊb¿Ûz/,Å#ßœçö±”ÏùƱ”vT'÷ôÞmžÐŽy¹Ç!\—lB]ê@]½+ïË:Ý„šÑÁ-yàÞÍ«®ö¯Ä¤q)°vqP€hùÛiqˆ=/eW¡‚I3p´ Ðu›ÉÓÙî»'"¦²ÒZæÖSY³ë/˜þä;H¢¨öÔWœ+‡ìŸüÖäýX¹D hTÎ1;7ži:ëËj|ÙH>[VmûóÆŒo~zäßþЃÏ_Vý+ùû!×gÉc¯[³¯ÎùóòqT_j7_Y¹c(Ù…Ï"Ç¡¾Ô¶æÄ߯?ïõ«m^ pÛ‡’AuüëœM¶}V¹eýõÌýA™ì¸ç’Í/§+õc¯I~Én?«/ÿ|ä«ç—ÇŸ·ãÐØ•Ô®R¾1™ ™l=;‡¹òøe³õüçfËûÇ1Û|Ø>ý÷ÊRûXL½ÙäQW;纶˜z*o“Æ©6ýXå³ÿ»ë/kó¹í-E{_¨-Ûn®lþó¶OÿÑÝ?ŽÙæÜγ¿ž?oÛ¶åü÷ ÿšäô¿í½›Û–½çmݹŽ*+¹mʪ>ì™ó8j@‹1™G²ÌÊ3ÔœG縪™¬ ç‚` æÂ^7ƒ?-t>fÎ3ÛÓÓÊ.`>¼òžŒ.ÂÁKžŒÞ8¦çóc•¿^¶¬_¶ü÷‡-ëa`ï;çj?½\\mý¹Ç1íûa…—–™’ü¸ªXvî䯜ý¾Úû*;N+‡wô׳Ý͆k¶ì|Ø:YÔ®®çÿžûe›ÞwæùaêzyÛw+[¶?=+¹žçùàµå}w§·»°OËJàâ1ï׿L2 Iz¹è׿q"çËãrM_c“ÖB&®•ždJÓK;ɉ˷€ù°Xˆä›g{}1XÉ|§gs®<çÜû*_ÒDéÏ߆mWfC›×uÝ–U¿ºUÆæÍ¸8>õ«¾ò¥\Ó£Êøë)Ÿ0ðË>-Ÿ1Yª};¶,cVf×2s*&ŸÈœ?Ÿˆ{š;ÇÈɹ3×ùYUåýÉšPíx,†úÅ®$|D"§°"±S²¸*¯1ó1¨,Ótó¿Îè¦þ”Dú,ñË-ë7«û󪧇^öÜbúPíéîV3¡óÓåÑk±ù¬ÇùŽÃ3­Øv³îÞïßóíC­Ì…UÃé®ÓeËŽyv¬fοìùŽÃ/ÏÌ1M—Ý?&Eï_¿<þ¼-ãïcz>;þ…ŽCm.GùeËÊãïcú½ëŸ~:VjözÓËúqŽOöÞõËceŸ½ÞòŽÃö7]¶ü}زþãlõfŸs¯¶Ìþ¼¿m›Ÿ­{}6¬t]Ïb]÷>[/ßÑ>ÃumÑNðIþŠ×Ã\¦%팠½>ˤ¤Ïz÷¹iºôkùꩨQŠ+SŒ/VòÈQ¾²¶•Q»j_/È|I+`Ó|Ù¾ýòäë£@Ñýf‚òfÚ´ý鳸¥4ö6ÛøUVcJ2 ®mÇôMyÌ9baeχÅ-Er!-Šd7Ú•9Æ! $gnR[ê_}Ú®•M²«†æÊÌ_ÚvüzÑæÎ‡¿©Ÿ¥¥Ñ dµ83ú`ÛºnþØž×ßL%³î‹©ùÈŒCs' üÉÞg šÍ8x1·ÉnÇ$Ù,þ*g“Íë¨_ª’ÝÞƒfœÄݶ«6”×QÄF¡P|M±>‚”G»%ðö1aTdžTyµÍu;ÜoVß j¼øŸÊNÏ{}åʦϚ³I†(±²ˆ|YylÞöÁ¢¦¼ŽJª75&}¯(´Æ©òjÏ”Ñ÷##›îiÝÛÚõDeÔ¾-§²þ¼³æLcrÉ!àp8..zØ´(§}%僣_÷~gkýŠ—À:Hsî)í…ÎÙ41›VÏž7¾\lY½4æêC×UGÚm)¥Ï’k|lÌ4i´ |aNÉóóõ¡þŒÚT²cÒç)›sæIî8üeÕ†ÚòË& ÏðБÇÊ.ÙlÞ߇H€ä—3ù|ãИ¬ÅùGòšþ)‡úPÒg+›|¢4þqøÇïÇÊ߇s[V¤FùYûHx}dÄšq<ÂÁö§ ÂvÌþ¹XMG¦\Ù4^»þ¼¿|X¥¸+´>2Xùe²y?®öÜ¥>Ú1ùçn6™SÖ¶áÿ^Ùsîèp8KG@î1ÆWyM,ŠÀ}ñŸ?‡sgÛèö Ràû N’0u&C=Ú@ íÞæÇ:Ǫ—tª¬ýUŸ¯¬¿Oç`Jkg-Ûwà©ÇŒ¯~ñó^‹*«nmß<;gsö§¡f¯š1Ž)ÙÔ½§mT +ÛsO?‰o|õŒ'Ï”ì*aqS5¯)9%»®ó0Û8LYö=#±­©Ö3—½Ö)çÎLŸä´óÈ2SyÓ«Zd93em#<3£¬N¨¼Fîý;£ÌusÙÿÆfûPvÌþ2ÌûF3{*5C65äG¾>ƒUžqLÙ‚#û%ù85& ?Ð~¡S6SÏ̇7pK.ïp8KDàïÿ´®ÛÈÞÜW«0Q7‹"p%¥(¯òö…\¢ŒË^-Ì­*ªê¨ JpïÑñeoÿ|œ’š²±±ìVKçÓær×ÕÍ ŠCãî¼7Îr÷}ÁÚã˜dî“é{Ú—AçEEjø%1cf‘©²™ü ¹2eE…¦°ZBY#O¦ÞT>GÓ‡þ¹Ü“0™û9s¹ÀÉçp8.‚ÜŠÎ\‹’gQîþw~€þR37_T®ðe…€úe†U¨ˆyˆÿe%÷\ÂÈwP;/fümYñ6mù%~$îaÃc¨¬¶S™õvž°5¼£)«°>¬f-ËÂj7_Y’MfÚiòdú°»˜L—âòú$ß9Á%‡€CÀ!à8’=|A RK°jÎÆäöbÓ¢œ*é%hºm ³½‡;ù¤‰…2K¡+øô|ã÷bÁä' …%· V”ž(É×®æSi¾U6^©•ñovLtþŸ¾Â7o1†·0AÚ7%ßÖóÎdÿUC´R–ef+ëïCó#œiÒýõ²½_ž9XíS¬{PDÔ%‡€CÀ!à¸ðøýÞ³TnýÚܳçišãEïI¯™9ʪkµc_Ù¼×¾w~f^z”l»þ¼×–_¶Åö¡vgÊã—snylßö˜•ÓÊFlŽwÝ/»?oËz““+ðµ«am>gÚõÊÛ9ò·›ÍëÝkV×f˜Dn·ÙÇ‘_6[Ï/ƒ?ïŸ;µ½œžDÞ¿¹í¹D—xÁ»7³åüõrójÇ&Þžóý×gË«¼ÿZ¾Ïþ6/·¼dןwË ï’CÀ!àp\L–Dà$ Ôx&ø' €^ÌÞ/ñlÞ„/ –Á”åuSvÖz"4=e~Í+¯ò ‡Á¬‰(m#(+šs6OøYeLè Õ_¦>$Ë y8f#Ϭ²yòP„i)V^ O=¤ñ,e~Œm‡ÒìXÜraeÛÎ7ÍUvþ½y4s·D¬l_òhM¬²«¥mÝk Õö^-˜¸q: …Ày™P­PRãIÛ#’Àßâæ!îÏ‹P).×J}¸klz9i öeâd‰NŠœ(f–H‹É›À¥51ÙKKÄÏæUÆ´Å:~|”·}XLz”æC¸ªþ\I$1_É–VýœÊj϶«z6o‹Ù9÷Ú´euÕæ=¢ðQ[jßË«ó>y„­ uiå# ûAól¾ó$î.9‡À…A`É&T½„õ§¯ÚÚIÁšÅ¤ÕðçYÊ|^‰ïhQæ5ÉŽCcU2lýyÃfDÖ,É™žWy¥|Xyx™ËæCh2ØfÏfrì>Ó•9á™®¼¹˜Q6sÂ+ãÉ­½œÚñÌ·¦˜¯]µoÛÕ5^ŸíõÙòF¾œz~üùi2¨AbëȽ IDAT—V4š[}-ì=°¢ã„w8+E-b0š§4·ðá*;ªSHJr<ÄWÀ€"âdf¥âBÊ.W‘· V/Ó|ý‹ðÈÉÝŸ¤Ý ÌWÞ_Ny3wsÏÛëîèX(ö^ r5íJüá¶Ðqºr‡€CàR!àWü,ÊÞ!I͉"n"Wj’Ù˜N/â§%ÄÖÆíi©„µG¼„¸uífþ(§¿®­bÛ”Þų•Á¯ô“NDθ\9Ãr#q8—‹"pÚ÷Qiiß®tÇq^ÈdÉ”ÕTè³b² Öúê¼ÌR:7[šïúlõrÏ«/;·þkæ¼ÿÄòv< (êŠ\¡x÷±çy…Ñ Ë!àp\üÜki&TšH®dêŘ™=å§}I£?7­7Ú0i7/²IÓdz±Lº HkdËøü͇ËbLºóµå®¯lÈX?ôä {É%‡€CÀ!à8–nB%á¸4pçñü-ˆ4)Ùw›ý¬sþ¼>/6i‚ƒÔâ-&åö)M y ç^˜£QC¶*ÉÔ>q)GjcC]ôE‹›ó£ý§­îƒe˜}[é¥FÒ§}O¬F³$(·ÆÊœþ›d8 ³%Í®i†ýIH0õå—]çýI×ü„Îí|óê?·?Û¦®¹ì‰9Ž¶ì…’sŽ®Ý¥« û½1‘«lìn¸‡€C`¹X“yÞøžã=%wñ'9ä¤ã(Hôã©GÇñãíxèÑãˆÇ³æÖ¦ÍÛѲe6… ¹©¯Ë»¦šHdþ )š:»°LeYuÕ’´ŒO„1/Fyy) XÌœêîÁÄØ*nÁPÏ’¸ TÔíDwoÇ»žÅ­·ÝN¸ÆÇFq®½ßŸh¤µuõtèáÔ¹A¸é²hü…ÁBCάìÓK\ØOÖÄkBäéÊÊ6ŸÙÊ?ŽùÊæéÆr,£5æ÷I±sŸ#‹nÌUp8WR°øM¨‹"p~|¤™’VM&U«å91X€3'»ð½¿„’æM(«º¿ÿ±·¢¼,Š¢hýÃFŠé´Â÷žHáäá~<üã¿Æ¿à5hmm@eÝóæeÞ Ò('1ÊüÈPÉýÿ„T"F™Hâj¯¡Æ,†ÑÁƒ˜ŒOq-]‹’Šuá _?Œ±Øqêß"xüÑ ¬¢¯¹»Ÿñ‰M°?ó ¿ˆæUk™Rš6™­éTz6ÿø­À"V’-¯6‹{É"9L,Ö¯¼œÈBþÌœ ‹ñl¤J}Í™í8s”šÕh}s“Sdê£ícÚ8(_j2fdTÁç«¢¦…ðyþ‚ŸÆ¦þ§¥L½Áa•Tëêi—݇€=7ô]’I`9|8ým»¼CÀ!à¸Ò8/ª ™ÅdJô̦)œëÁɶ>:Þ‹k¯GUi-6_» ÕU%(.c.žœÄÈXâjŒváð±vœ9ÛŽhQ KàŒÉÑX&§ºQ±dEÄIÉ$¾4ç&HŒÉÆúN €ä#PX„‰H ‰È8Fz™ò@‘h#5j$‘ÚºNcllˆŒÖsدª®AEu-εµcbbÜÔåKFá?HVH Æœþ ó¥´ *££•S!MävdxÉÄ8â£Ýèëí7ÁvÇ++€pšƲ° 2î…ãém± ê!—Cœ22xsã•÷þõ€MÒD¬ñDÂE”“±ƒ4ç%IüFFFŒ|‰ø˜‘Qõ‚…Œ'‹HÂKˆ_Ê‹ƒfŒºq<‰8Éó‰é RÉ CüŠHÞJÊ'PRYÇ~‚sP–諞?qžf.š =Ådšä±4<ÕŸ¿žÉ"™¤ˆO$KÒ¬Í á).7ÚÓp†ÏÖÿŒ6݉‹†€1§ê „š7ֽëÈ!àp¬Xfª}8£Æ#{I)0Z¥|áÏÿ#‰®yíÛpçÝÛPWWÃÔÊaˆþnql©œD÷x:ÆB¸å¶-X»¾‰Ú§büËCO¢4¼ÿÛ›LïjSñÓLÛyDŠÌÖNü»¿ŽuuÅh.3¯2ÊÎŦµÇËSÛ\ùX‰jiÒSÊkV¥ü“èikCgW®Ûy·GŒDNy¾ýôqü˃_0òICh5ˆAjPë_ƒí·ß‰ïº÷ßÔŒ(I™Òá}/àðýøÌߊýz9ÕÓ¼„£exßþ{ìÜÒ‚­­•3°2 ðŸŸüzº:ìÇ©ãXª'“ñŸ>pÊKÂF[3u1“Ñœôuë»_ÀK»vãÕ£!Æ4¶®Kbç»~k6a{ü§/,ÉmÇ}¾tèY¢8fÛ­K'†ëÙ!àp\öØ÷²]2óqô$ðÒþ> !È—vSsµyYó3̓")Òµ’l%´*4 ]9QÙ³¯ƒcxìųØyMäϦöeÞÐ^mÈÔ¢ó:§k"*†,ÈM ý“ADJ×Üp‘µe£mŸ,BWñ»P9ñ"éšöZhŒe© @šoXÕêV”–— ÓO¸(‚²²”ÑÄF1DíŽÔê×lÖ-Iò`d¢þ…YlD˜Æqêø!;´{^ÞƒeÜr×{¨¡”Ù(e{<õÌat|}ë{H½æ:4Þ´ÉÈá`oǬ~4fÿ6WÚ6Ëž7ÛY ¶ ó¦¬e¼j•2=tÏ>û"*׿5¥@4=‚—^x‚ržÀþ=ûQ»þf¬.«42öôôbt,†Îîqôž|Ï ãjÞ¢™DóšÆ<ñèÃÔdŽ z󱦵e$Zõ‘QÕã8sú,^|ø‹ˆ$Þˆªê×.ŠŠefR0BuéŽØ…ƒO"T¼†Ú6ðÏ#‚±`5&¢k½±êÊIÁd'’£§ñÍù.ÎÐG±§abê˹x%‰Øø^øñ3ÔêžÅ¶¾ž8äTv/ÜÔ\6Sáq8.sÄlZ2ó Q 9èˆáÏw“œPUFmu1ÆGǨÅJÐŒVlʈ¬µMðQ]@ÿ0®ðìïB"ž0e#¬Ó?R€œ<º?~ì‡8}ºÕ-›ñš·ÿ,Þ¼#LöœÀ@ßNû˜ãxâÛßÇêÊ0nß¹!¯44Ÿ>Â?AH\‚á(µ€$›< l2óJ[(2*2— YScÐç mdQãŽOLàðÁƒxì`ç{ …¨ öã™'žÀéSgp¶­ï|DZáÚíxe<|躻:ñÒsOàÀ¾}xùåN¬¾öŒ­¯ÄMMýxúÇb´ 7¼û¿ã®»®1Änsi?žêGxâ‡âG?ü:jêÐJm_EH¥74ý+[]2€c‡_¡öì(ª×VbuƒV{X¦"ÔüUoâ(¼g¤Ø9桯}“‘VT4\‹;nkæX'Ðqn/>õ4BEÕøÙî¦Æõü¶›Ñ·;±ìØÎ>ž–½ × CÀ!à¸b8/'mÏ@×It?ƒCkn¼‹®R9Þƒ5«* Aj;Õ/îÂáÝ/âúM4VlÅdÅuXÝ\Aß©$NŸ@íæ›PÚ2Â6¾±76-ô cš2ÓÒ’s½×EkHB*#ƒ8Ü÷ &égV@¢XRµšÎ û>†`jpŠ=Œ¥æ+Ææ[QS_kÈO_/}á&Y†©íô’ÃN`Û6š[CÊ¢">öÁrzáˆ8‰Øå3i¦éÔ?ó)œÚ³¯¾ÚŽŸúo‡Õ«Va+±) “’ˆ5ÐÜûû¿ûïÐ~öþë_œR÷Sil„ZÍžsxø{¡fÏ=¯ìÇö[Q¿j-n}߯ÆêÊ‚c÷/¾x Ïîîä˜Ö£·§|ÿ;†ï­¹v'~ò¿ü%Ö• !=Úƒ/þ3xy×ˈMÄðéÿýç¸ëö ¸i{=ž{æ–Ôãßþ$îÍ4Ö–Ò/)…ÖÍ;±vÃ8^K}l7¾ÿè^<ù­‡Ð»¡oÛ¼ÖÂJªñÞðÆëqSK•Qú£%*±óÖ×bÃÆM$’I.b)Ç9ÞÛjšŒVPæL¥ädN´E1 ÐÜ^…ü§ß7&àñ±± $À\Õ\FŸ»|©ó4 çË»PXuÞúÖûðå¥E$»PÐ÷üÎÿø:NµÅû;ðškkQ]Î׌;w 0õ]âw¬v¿ dr"8Ë e1¡jPr§“y¼€±Ýäà^\Âc15Z À±4pg΢óì9ô÷ö¡£˜;Œt#Uhn(Éh—H°èL!S):Ò'ãô™ ™öÕ1—²/i–<õ¡—÷”2|ôS›6óVÃNÒ‰^!BDàt dFiRJ¸°!¾¢ÔÊÐÌ–N°WRRwFÍá$ø”7fœòÃŽ‹"XK‹ÆZ$®`ÿ™~yžÃ3²H&1%ª—?Iã56ÐÇúI$Ó¥hjlD#µQ%E™’lDZÀê†5F¶ë¯ó:WnRãLõ¢ûÜ19x}ÝÔhR{Hm`wÏ&’=¨Øw-;W¡¤4á:Ú»pâØ1®ª¥ös|”‹FŠ08ØÏ…#½8IJ[£Ä˜c¢–Î,È  É4CÂĸha(NÒ¦¦±ë×­Ae95œCi8ÃÄ‹zJ”jP[EóxU‡Î cl4Œ$Hò8øøøÚ¾Œ64c¬¬r‹+UUUa5yÑú&T–pÿ\!äMšIþk“1úÚqþDêÚ;z©9Mм>Àe»œmÈž›Œy]‹QŠêÑØZ…Æ–U¨¯¯6/ÿÔÄb…¼¿8ÞTŠ÷}!%§K—7öÖ°ßs}Ç\r8üœŸޝä¾ðZÄkª°ñï%- a}«>bdp?üׯÑlÇU|î?©îNòÅÜŽ­[×0r¢2)DsÙÆ;îãJÒ1 tŸBMÓ&CÂô ׎ "H"!š~~‡öôãÅ=…Íüÿ¡xS3ö*î½ýxá¹WPt¼ëÖ5ã—þã¯à©=BßµþéOXö?„ ëjÑ@ ]鉳4­žÅúÛ^ÚÒ=wÓ?°|3ê›®Áëw4ñý¡KR\ÀÑkæªâ(n^3Žgö‡IŠÊ0Qt+ãGÐ~üþîw~×½ã÷Ì¢×mèÇÚÖf´PÛø¾ŸþcfVZXá=RŽ¡hây’ìaôô â?þêŸ 9rŠûÔž4cnÙ´7?ð[¸.’2¤Ð⪻°7î@]aÞPWŽ «+§ä+ÀÁ圯 1#·¬iDÄæ,ö¶-w¼¼°ß }Ï]r8éè]jÓù=%©9IŸ~é^ílµmÎyl®¤ù4HˆÛVEÃØ²©§Û¨¥RÀžô¥‹q|¡²ræms°û4ûºððw¿Iò±«×®¡¢-‰c±Ûp&¶·•}‰L®“˜úÅÜ õ-µt|oA BcÛó¨©m0f¾=/ï#+Æšµ[§õuâÍÃÏà÷>ö¼íí÷à]ïzƒ¹.¿,³Òsœ@‘G÷saE4`LŸjD~]Öœ(íƒÌ±jNÚ¿Wžx =Ý iù^Üú_Úæ:lk,¶V.9{ùµÏàXÏÏ Dð†²ÇP[tÜ„øøùÿúh]Ý‚íÍ…X_ú$öîëÀóÏÀ±Á!œZ…{ijìí=‡û÷㣹“GGÐõüófLŒdŒC©†Ih©œJÔ€¥ éÀ Æ+“F’aìªÇÿûó_0æÝ¯|ëîúŽýh»BÔª’`…‹Ë°ý¿»w6àÎí•4ÉÒ4NͨM&Ö\ç ԸͭV/onäB„hüìgðÊž3xâ¹!üÅŸý'ÔÖzMu…•°W¨’Æh%Þ¾ºþ„ãH·áøãŸÄá£gð©¯ïG<¸µÍë°uS£µ».­ ’rM SwæÔ•1_NJ‡€Càâ `\Ë2]-™ÀI RÀ™ÉcoÍdZZ˜N£Z˜±Aî=Êxk\xÊðÜ ¡š BO‹RBgüŽáìÈiöµh"™Æ\ª·õ,ippÀø{ 2ÌGYy%øNp•k5å)% ™<ÒÄ{O#EM\0Êà`rÊM?)ö_RÅ(Û—Ÿ–äÕxrSüSÅ'ãƒÀã?вu7 zÝ6’ $ÇÚ0tîþù>1UÖŸééìCq'ÜŠá`%ʹ禙iόތÎDŠHž¡rÊ0‰jš¯Ë¸¨ T¹'ÎŒ¡x€„Ù&jµ$Å IÅÁ6—ö!º‹¤0ˆnðjM&ãèm?•µ'©¹ü‡ßøg<½—ã-œ2ïN“~n…ƒãá}ŸúÜãæRóÆxç¯þîyí5Ü^dµ¯wZ÷ae `Í©ÒÂy¾¯+Cn'¥CÀ!à¸P,‹ Õ®ÝR3ŠRjƒŽŸòÄÐ{Ýøñ(VÓªÔÕÜõ;nÅֱÊT ´"oMê(¦æ†2n‡U`L©•••¨®.¢ó¹{2ÞÒNQßbÌ)þüýoº—þu#Ø·ïU£M Âøä§Ee1µR•Œwv†š¢2¬Úü¶¥H¸BãÑGÅ­75¢¢,Äà¿(¬íFma?FúRcè™qP™ÖÝzÆSU¨ª«eàáõØtÍ5tì§v‘¡Pä„?k*,åÎqÜvÍq´9‚Wºûðô¾6¬ àæÍ5ÆŸÏÖMvƒçá»ÿövÜÆkHà” ‹[PÞ¼ïúù¢¾¦«¸`a¸ïµ["½¸ñîëˆeΙî·gÛÕ1•ÂDïóæh|Ù&šH–G¢76T$0‘X3uw"6ô5Ô{¥ÓÈWUÉʨqNáùWÏâ[ß:êí¿ŒêÚÍ¥ÿ_zºšZ¸5øÈGÞkº•f³ªa¢Ô¢5·4rÆKHV2`sž”ˆÇpìàntÆ«¸»Z)îj®¥J3È~iZï@<ìáÛ@óq­wä¤ñ‘~ô÷´áoÿäA.Âî¸ûv¼õ'~U\4QÛ­ÙÀ9†<ŸÅœªîã B@~“Òo›¸+Hn'ªCÀ!àXn–lBÕCTIÖ4/ŸFyde‘$Bqš ©é!)‹r¤MÑâ‚¥ ÑŒ©×8+Ç©Î+UÄ…ÅÅÔº±·¿RÜ5cB1Omv¦•žLŠ çÏ×ÕÕQŽ* -œüèδ b¸˜;L¤ÐÞ]Ž·§ª 4blraãìêb€ß ʸÂ1äVO‘8]»NŒrÕê8ÿ<ÇwѤQZ[F-S·˜dÈ‘šj«8F­påK…bI2O:#¢‡N0«6+JèF Ü™3mt3 ™] B ѡզچª¿ƒ¡X¸x¢wrÇ““øNrk-­Æ†é—·e'V7Uc3w²è8SÆm«úÿ\mU1Š8·]âKÆÚI©D~ƒ4Û¦âÔŽRS%yáŽ>vŒ“§s\ñYÀ~‚ eÅÍŠÐã'Nccc1Üiòå*Q’çý‘&»ÑÞÕSc¸í\%'™ìÀþ½hf<;j)K9‡Š·¢Ì(íå¥A®œ F2"7*KŸœ)jAGhÞîgŒ¶ÍÆ$ÙE4;+Žß •ºÃãÔ¼hÞO‘ͧ¼6N®ÛA}ÄÆ‡1ÚßF_¹ÓX½n-®ß¶›·n¥l$„Œ<å[ÜV‹ d\Z¹H§ßpÞ}ÍqL¿ÝWîÀœä‡€Cà<ÈÚ³ЈîµmV€ŽLz†*¬†ªÑ‚1l À‘ĵy[©©*BCmö%:I Ë‘Sô=ó4,ª*H˜6¢[ù‰ä…IL³€êOìCŽë œkó½Ô®Ð«²^ö»4ŠF>„$¯£7€Þá"¬ÍÐXÁkÖ÷c×ÙrúëC+${I–ÒÃ\ÚlúÑ–[Ú^+ISæpß~s.6–2+HïØQ‹ ®Ü —40±éÆ\×?ú¨:*Ù­½$§R}Ã"«?ŒÖíᆎ⇟û}¼Ì@¾Éàÿ2Ar'ÂCr?ó‰çÐÖ1€ ÷ý\wãVl®bo㽈¾X‰poWá^ìç®qüØ |å ŸÅ¯¹–{Í ¯é)HLJ 4pCguVs” 5Ò†âBQæ`¤mÛPànM¸ùÖÑÆp/_ý¿¿„ø/ÿ¯©@¾Ã 4Üßׇï|ó«Øóê~³ªóŽ;¶ ªœ¾…m1»?ÇÉ#}øøW÷âCoÙŒ5eF˜ÞcOãôîÅsO¾Ä­ÕZqÝÚjÄùDÕ.áöïâ¹Ç+1jÀÚÍ¿mõ½h.ET¤˜›Ü‡JÌN #ç¸*2ˆ¦õ-beŒ GEÓ•Y¦sKë›g|¥!àŸgR•„4÷+³ka5š×lÆM·Œr÷‰AÄ—ìé‡>ƒÞµ£Cœá3z‘(Ù‚ú-e¸}[ Zë©äÆïM«Z±sÛ*.Z8„¿óÏØC­dMx 'h«cÛ=ïCYU½s}ô(JCY?/i¨´°¤(¹E8P)ý ©É™KÒ„M“æêÕMØõƒ/¡àöÛPyÓvÜyûZ0.ä¾°ÜÚj÷£8{d:ž˜­¾Æ©¹M?ãYT¢ašî"ÜÓ´)€a.ž@Å&4„Op±Âz»¹­V¬š«0#Ô& 2š}ìh>ì~•d”š-jàÂÑZþ²/"k¥ùµE‘"Ô­ññ\2IsžHý§H<%ƒ^ ½T¨4A}3Ò™ŒH\°M«×“Ì2^ZûY´ÑŒúÒ“_ÃÕɤ­oým4¯Ý‚[·„QUÃq³žâ¾n_‹ÚôjüÍ¿f´Nª¢vK«ðþŸüTÔ5Û1Ô—œaŒ¶Í©\ÁJ2›"©HrL“‡¨m@eÓvÆÞ+a»®Ì­ä–V«°ióZ<óÔ÷ÐÂ#‰»ïÃí7¯GGs'Î%Iž^4DîÕŒ|Ä¥¨îv\s÷ÝxÝ;îGkE „¨ÜŒ{ïÜŒC‡O⥯ÿTo\C!Íá;x¶\·×®öü3Mšƒ+—­½­­ ñ‘>ç¿óyÆÔorÓúZÜrÏ]X]æã(ãì™ã8zbÛH4‹iâ.f¥¥åè¶ï¼õ ØyÃV"rdç¶+1uÔ6UZÁeíyž6‰<Á˜U#ú·1ŒÈÿø‡—¸¨¢µá~W݆‰‘tìú2öž S~*‚wß׊·ÜU„ú*¸—¦\EÝNCàÔ¨Hza«ßôý¹šp²~vTméµ’R 'M€Êó/_"—ò°"á•©59ÞÁzgSáPŒO_ãñúEHî{/ÕGœè¯È¥°É®-*fÈîz¡,8žxœ²µãV[#SC’i¸¦–Ú¾(}ËŠ¸J”â‰(ê¼üЄ{œaN$_\« ˜JÊë¸À¢žSYL_C®úÍѾs3$ÇÚ ™7»vtvc(X‡Qð-U)úáÒŸ’»ÛsŒÔÅòaY:…Ðs7 îAŸ80\KL»pLI;=#Òª1º½P§ã²Ò?é{§SÚÙ%‡€CÀ!°ÒHNôð]™ÆÖk·™w«Ïøø8Ú¸•¦’Pj¡ç‡?¾û{‰dúÚEiàôÒ–JÚ§ êmÎÍàË«›°†$«ªœ1Öè„~vxã‰ãè9¸cg"èa¼·¾E4޼Œp&n×T:IÙõ4¿®Fy%p»*>• ³½õЙ“¶KòHÓcäá+\p;ŽEea7"U(o¡&*d,±nýDMCt%›7aO<¢¡Îdúœm#,%‘j )aœ5Câx1ÅØl$"ìKäHýâHY, S;¹I¦L£E$Ù E —¤EFFùljHk ’­Þœ–H<´SÃáRóTˆƒÐ†ŠªˆITa˜2±%Ö­a€;Njã:i2<„Q¾«õ>M ŠŽ2Y¶¬b”|Fýß~ÃíŒÇFÿ(2‡4Íù’úR²ýéUNºfȇ˜”´Zij—D úÏAºxe$š•:Ç—~ÏÛ¦¿×ö¡‘¦Ô3lmÌÈ!íŽ\|¼›&Ø8 R3bô›T@a¦4÷-Hk!€g‚” s’&TIc&eŸ"3³%iµ¼Ë”ž„ɦâr)²i¢´8D/£üÍxõ‚¬[Éš!í–TÂ$·žÎ)åž7ì+è-ñJˆ²TÎDçKº‰Œ?’)H"Ź‹–柿|myýòJfÎyKpÓzn{ÆS’›SJ±ü}äkÅ»šðüNÛÓÂÙûþjÆÄÝ!à¸zXó¯B¥ ̘ܤõ²)¨¤–©‚û˜Öá†riµ§û q#ôQ:¤×O5Gå'Ï¢˜~a5Møæ^§RiåIæeÎ>HûŒ)ÕëÏæ¥í¡&Ž&V™¥zÓ?€É¾£ˆÆÛðºÍ<ܵñ=8°ïƸ;ÀZ®B­Œ’€ÑL[^{=M¬g ykX/ŠJjè3U‡¡ž—¦\€¾|P+ûaÐZCÖÓF iլʚòÓ®zŬ¬%‰“ªÉ¿¢5§hÞÆ IÜrëIS¨kj[ø­„$5›æÂÕ–qG‡ÀÔó÷¼ÛvË݇À•Ž€Þï6-ŠÀ™Šª¬·k†èœù¨Œ†Ž¤…„,(Ÿ.ÆÿJr…jˆÄ®8Xêx5Š‚ÕÌ× ‚‹"$72ΗÌËÜt+“‰´~ùɈ`>ËKŠ¡Ѻª‰ÒqN04HI„uR¿f†‰O`b‚+)Ëc¦‡¸A«iv¤V+-ç\qZXŽÿKÖÓ¬çù€Éo®€~aJëTß¶ss%ÿ?¦ˆ+iܦ°b3.9(ÓnþfŒ¦ÎÖ³2˜™ÔêëšÎë³´W q–/Íi‹Ç¥éÝõºR°÷î}ûýX©cqr;…"°(gL$gV³CÎàiÏø•cþ´Dß&í‘YÕPËP»LòUIV¡:¥?˜4hë ɘQoZ#Ù"$¶¬?/â¢$þÉÉ0+P¡¿lªnÜ€íÛ¹30†®ƒŸ£†í(††½-£š6߯÷¡³§#“4·öqeä‡I=ú3ľRc]$F Ṟ<úÇ-”Y™çÂÊšC³gs¹õ4QãÂi3IáL¼&”͇•½îŽ+{Ï»m·®äYvcs\Ýè9ç·æ-ŠÀy„aҘ猦‡¤lÁIL«°’Å=•×Ë)¨´!Y,™1>d<7WêJÐÌJG};àP¸k®{3æ7ãloµsžv{`"íÅ2Ó.Ò&zIשÕ"Q•yRã—u¾$%Ÿä´I9µç;e/ÍyÌ­§v-i³¢»è¸ 0ß;Ž;û½½ ApCv8®H¤¼Yò^¨"$Z±(­úœ)œ†þ”D,r Š%fïP(›üõì¹mƒ*¬<ÿÒŒ1§öô1·Û&#Mð:÷b ”rÛ&Æ|# 3V\,VË䥽³&G’CyÔ³EÏI_„“ s郿äøÍ J;pË$•4fs/‹ô-%ùëI†¼cdÓæüÒºXŠX®ŽCà²C@ßm»e¾¢’N_H—‡Àˆ€UH-hhÆI˜äF&Tƒ‰>lÆœš! … w¡è”ÙOÅp“ÊÏnCÅUo)É_Oš7›¤“LÉL]{Þà Ë4Vñ`M%÷7 ã( h`¢“›Üw!Í…´íš2f||„4>öa Ô|}¨²Æ–XâØLçKüGš ºÄÚ®šCàÊAÀ~õ¼2»Ç\9Cs#q8®bÌTß{~Qn±¸I#´óæbÛµå­ ÕF{^‹ßLI6ä&sј7‡çMT‹>z“b™4ífÊgŠMQÕ·ÉšPuÆö‘Û·-뎇À¥EÀšSå»êq—v.\ï‡Àù#p^&ÔiݓňÛèÁh)ŽŽ6oËZí•ý¼˜£µ–ú8”©nÏ{;(Àˆ×¯-§3¨’_1pÛ®4£ø§Ra†/!ÑS[^´õM=Vô“7ï\Ö|ize%ӋוŠLkÜà?V^ûöš;:ïת~l]˜ö]«‡€CàR"°d œÌ¢üS 6ùÅIÓ–ì|&g=8—&I?ù Ë €« ¾æJ6»RÃOà”—y4ª0D.möÝôHh0cþõ›eç’ß´¯>YHfš)yü‚f˜msµï®9‡€CÀ!àð#àW,-ê·©%Fò5“ZIW–8©?Yòw˜/¯-¸d†œ+™Ý ̶]ÙRª'§mœŒ6M²0MËË„Êó~yÔ—êêy—¢š˜2ª§sþq¨½ù’©“)$Î&yòp7SÂŽCDn¾1Û~í8ígwt8€ýnš?_ÝÕp8—â,6-ŽÀñi˜Ý{ÐkD~&þ‡£ÎÚ?Û‰ÿ¨kêT+BÍÖ\þ‹™¼×2Iûóû¯é²W‡»ˆ…S)Û1çiΔÆÎŸ· eêÙþŒ/›âÒ©|ŒÜq°Ê‚’º#V}?3¶•u]ý‰üÎ5f[^G3Ÿlþk.ïp,ûÝÔ.—‡À•‚À¢M¨ä"ž©t–§¡ÑB̉ڿ4_T ÕU³™cµ•–dP’¹SüK¤Lù)ó¦OF[Võl)šSóÉ 2 I"ZÚ*Ë8ߊX[×^·šK¿ì¶L¾£Ä6ã÷ÉŸ¯œ;çp, }õŒ«Dæ7ÛÂ*ºR‡€Cà2BÀ¯(Z”ÎŽÁ(ûÙçP³qs…×g%NjœZ `µcÓÚÏiXÅe–4D.“—KçsÓÔ9eø7« ¹gùlºÈ´•¯Höºwuª~ôdÅËŸüeó—pg…" ï“¾ïæÙä¾\ …Í•s8.c–Dàæ ­ág‰þòZ•:Wè ™;Ì Œá$â&šY(ÀŽL~žŸØêc.üòΗW;jo¶¤ëùvl0ûÅÒLê’CÀ!pñ˜ïûzñ$q=9Å# ®cS6gÏ\€£:T@ßÙõMÓ;Õd­‹Ù¶¬1‘ðäBëÙú 9Jnɯ¸s6)ŸoL*«ÀZ™jëMœçu]æÔå"‘V&wt8æG@?.l[Ï—‡ÀJBÀ¸ˆeÎ2’ 9=(ù´œÍ,š¯k=\¥H[ȪM•µc™]Ug!õòõ›ïœyγ¿aØš‚Í5_%}6fuÞ§Œ—ìu¿ùÖÊl˸£CÀ!pá°Ï ó=µŽ ×kÙ!àp\½ˆa)RȄȸ¹( q[-†Ûðö¿%i®ô—N/ìײ4^fQZX½ù%¡f/g‘‚LÁÚø>_ò—õçUV/ÜsvG‰tŠòækÐs8–}õCÏl¸ì­»‡Àò#0Í¢·œÍëAè73úÛ11Arɲæz`Ó$ÛQ’ªPuò#Y[g‰¼©¼̶ž´[ê/ß‚ÓÉ<ÿøå±E՞ݪ˞Ë=ÎVÏ•Í —Âqæ6â>;ûƒJG—‡À厀߄º( \¡‚èfÈU¾A† ƒ†låj˜üeE`Œf-)}Ù̇¦G¶h:õïuïñ933òjSmOÛHžõ,I’LKyHç“Çø°‘\¦RCþ”¿Þt|fȶ€qæïÍuœæ›8óëx~® ÚÞ÷Õ[;‚Äv¢:W“òËÉæÞ¢Üê¦*”””ÍÚž¥3ó½æs¡mä 1W½¹úËm'÷s¾vó[j½ó‘-·Ïóùœ¢Û)!ÎÁ•]×ì²²‡pÞÒÓšŠ¢Hø¼Ûq 8Å"päh?âñؼÕüÖÄE¸ ŸpúséÊAÀ,Í s3{X¸©½r&v‘#Isî¯v¯ÀZùîY Ü—a‘·+îp\d.Î*Ô‹<(×Ý" ºt)¦åEôàŠ:V"°É$ã6Î×qE Ä ép\±øß׎À]±Ó<ÿÀ09_áùkºWRËûUóWÚø3žIÜøD|1U\Y‡€CÀ!pQð?§»(_^ˆÁûYüå%“Æ!py à´q—Ç<8),þw·#pY\®šœüÞœæíª™n7Ð%" Mܤ?âöÛqÕ‡À…@À¸ êeܦ̦~‹êDs\r~h"_äâþK.¶À!à¸Bp&Ô+tbç–‚»ÌàeîÜÕ+.¼4¡ˆÜÌìD›E> *>I­µÓÆeqq9‡€Cà’#à4p—| .Ž2)D‚KÙo3‹f+pŸ'Óƒ„_ÅX¸¡;—YÏü«ä»4óp{µÄÍiß."讫+=\$¢S±¥¼³w‡À² àãzÅ8i’t:~á¹gq®í †Æ¹’,Fí[ŠW'ðÌsûp®7ŒŸýÐÛPQ^bÂ(, Ê‹hĘoÕfõO쌋î„CÀ!°ìL2Ðo*U€ÂÂóÿñµì¹+?_¸âȤøSy‚[â<ûìsøá#¡³½±ñ˜‰‰6™,À3ÏîÃ'?õ úÅ ðAœ¼$“ÐÒà‚ó› ãÜHS°K‡ÀÅC ©‚$q.9‹À¯KRÛ6ÐÕ…ƒÇ&ñòÞžyêó¸~Ç6—cÏ‘FúÏ \ÐƒÔøËHÇ7Ñ-ed½¾+Ê ‘-ÀäøÆÆã|'Q½°îá“4¿Ú ´©|À“§à¼4pz…èÏ%‡À2!À¯¢ù6ò¶rwÖ<˜f0ÒÂoʼns>qóàå.;Ë‚À²89ã'“IŒŽàÉ'G{{ÚNw ·çú{;ŒÀU5\ùÙŒ–5hjjÄ=÷܃â’R’ªÂeqâÏE%€qD&áÖ娯\‡Ó'ÇÑ´j#¢ÑR´®^|®÷ŸA[WŠÊ#ØX™ÛÂò~žäÊW- ¡µLººª¡Pdɹ-²– «8 †¼Ñ´ÏŸdŽÁÍ‚QîiýpÕŸâùթ¹è¸Ï‡Àr à߉a™œ·]SwW'Ž;Š/|öÓ(¯l@Ee5Þó¾w¡´¢‘âR#{ll#\ úð÷Áé§ðƒïü~òg~ë7lD]}CƤ¸|š¸J0ÜÎէϱ߳øÍÜŒc{öatpÈÈs,Øf¶¦¨®®¦6°œ¾q“ËN$ bHÆðƒÏ| ûžÂg::EÞ$D´f'ªÖá§~ý£¸qc ¶µ”ÙæûÇ®^¯œ½.˜d(•¶¶6tvv⺛nC¯ƒOÇ’Œõa¤ëy|æÁÇqèhö áµ·µ`ukîzàƒh*+Bm‘LËA$c\9Üñ,þòo¿‡ÁQ3Œ†?ºæµxÿëÖ ´8Jâ6s61t”¦òýøÄ?þpFÙÝ·Îh}ퟯl I}Ýšõ¸íïÅš’¨°Í›Òô“î|Š÷ùi3 •£å¶ŸÆ»ï\…Uu%Ô.å­ëN^ÄhN•.̪.9åDÀ¿ê,¯©…w'R 8jÒ¼;zgOŸ6f„ÚšZÔ5PÛVSMW…"jÙ”bcÔt…‚hji1þ"}8sê4Û*«ªPgú—ƒÄ¥QH#J£¦"Ž`b‚&ÒqT”Ò_%å™t‹Â“†L ˜åè7‹Ÿ4ncƒí$‡±gß?ÛT¤Õa‘Àd¬½±ô÷´cß«‡PÝ„µõÅ(x&¬lS9¹Å.Iå¹È!5Ìð)'ðêžChÝvÅAD/Òº$ÇÚ×݆]Ïì¦&ô úzGIÔêÐÓÛE{] »_<€›[P¹®žc£¾ϲì¡#Œá—BcC)N>€Î®>l[]Œõ«jÑ\æüá ï»ý/¿œ·ì¾õ娴¶Ž„¯¤p§Ná=/ãܹs4ñ§PZ^ŽC‡£g4‰ªm=¨ÝXâÒµ(Ó1Ns¾Éì޽ǎÅÑã=F¦Hºû^z›ê _°[Ö×Ó-àü¥äL¶û¸‚Ð]c¾mΜº‚f͉êXyœ7“ÆJäm|l~îÓ†¼ÝxÛxã߈††ãÛfüÛ’1ƒNqQ%ÅxçûÞ®Î<þH~ôÃ“ÃÆÍQA²'­ÊòÄDK‘ ÆPSÞ{osÁBC={P]þyÎn密 Ó*Ô¡¡Ô×5-Û,ŠNLŒãàóßÇîGÿŸüÒ>Õ_‹wþêŸáçîoAqA/ÆÎ}ŸþÒ^8Ö‡/ýÍßarô}hÜЂëªCÔŒÍ.ÊÂͦÞB S>@ u Oüð«øÄ'Âö·~éÎÑÑì",øŠæ¿ =IÍÛs†¼ýôÏÿ1n¹u'I|~æm·`÷sOãо“øò×÷ã?òÖô=( žÁþ½ÏCe£õ÷`ݺf¼õ·à“óqœëÄÑÎ?ÂO¿ó&|àÍeŒs_ÿú·ñ§ÿç3(ª»cFÙ“ýÿùÑ;qíÚbjϞƗ>ÿ/øä?<„Ûï¼Íü!¡ûõ?ö‡$Ö 84~Vÿ‡»P¾±óö 35ˆ±¡øßþ´uÄPTs+ë^Ï`ýßÄñS¿‡MÛnÄ_üæëPu·ÜU—•/œ6¾wæÔ«nêÝ€kIR'çOàHÞúz{ðüóÏÓlÚˆÒ²Rl¹v‹Ñf ¢¤dº9P¤f’;-nR ¦²]ÝÁÓO=Ûn»µµµ†Ï_-äöŽ|ë/`CÕ0JÃY\ƒ…QšÌîDYõ¹Èâ¾ùÕ¯àºíÛÑðs¿H™KY5[zñ9me5™œÀDÿ.<ýâ+øòwÛð®_ûS´®Û€{ïÚŠ5õE$hÕHÐÿíç>ò*Nœ8‰_úå¿Ædß b¼¼é±Sí=†/}õiš=»qðàlÙ² «Vãùu4H2&0Ú½Ͼp/î>Ú†ÕèÀSOü‘ªhZ»oùÀ¸¥ux>õ‰¯àÉ'öb‚ 7þð7þÞþÞûñ–wÜhÇ÷qôÐqüùß>l¦Ï®Ö}ÿûy´´®Ã*ö¥x¥©T½Ç¿‹²eËÊ9ç”+·¬E͘néó×uêÇèï:kˆÏ½?õQlÛ~ ¶´Ôà†kË©…=†—ç³ÔPÞˆ'÷w£ðÀ§pøð9Sö“÷»X½ª uA¬®êÆÿßÞ—ÀÇyÖg>sßF÷}Z’ï#$  B9ʯ Û]è²Ðni·´»´´Ðn·l lB  ),aI84)즰b;¾ɶlÙ–%Y·43š{öù¿Ÿ>é“,%’%;–ü¾Éxf¾ï=ŸïÓû=ó?ÏéÁçþN4Þ‡“·Ôâü>ó§/¡¤îåø»¿ûØuÿú‹ŸÂ©7}þh+œãýt)B°úåxó~uh¯(F™ç,ºzÆñÕ?… ¯­CiЉڊ¢yé=‡~£¿ø> Îfì¾½ ŸüäÄBn ásøâ#OâØ¥gpú¾ÛPow"â¹¶ÄØÄ÷š¾šf IDAT ùæÚîêP§Ô©®UoµW7ÝJ# ØPšBcI«ÚUäÁ,*BIK% ^ŸETG…”DM$s•øÙx‰$ÆØe˜—4KÉW^ÕPuŠ‚vj—U_ÒÎEµ§á™ºìmˆÇS8r¤5·ÛP ÍÆ[s¸ð„à –ÂEØ’‚P¨HyÎ [}¡…¯tü"†GÇp~øõÎ=hokDcUüÔ‚vUÍqxÝvÔU‡ ¹Èª4›PjÕþ³'pœÄª¯ï2 ÖIò)ŒÑ bóù!D"´³ÉQ-=€‹½ÝØ¿ï0ªê¦1IÃî®.ä)íšÌ¡ñÖl)ËÃKõiï…aŒM$T ãÞs—0Ä+±Ø.÷œd›S8L5¯ÛÁº$p.gug/!ã £ª‰)Ç@¯âä$z-u=¬bä|.5[·²©„måÁo!0Š´‹ÖŽòªz4¶u¢¥£•A;Š e°g‡)I§Ä2ŽÉX ¶Ás˜"µ»K°{g'IY„÷K ØÞ‚7MÒú}Œ^Æ(ëŽ žE2‘‡Ë]´îDÿ·ˆY c‰ŠÓÓÄž‚Î"UMˆV•¢´8‹ªò0.p}Ó#HÓN0ÃØ K26ŠñÁ^½¡8Z—Þ¶…õx''`¿e¾ôðÏ01<€ÑxeE… AàÔ”äv^ˆþþ‚&&ÄŽàidž„KWÐhV€Àª œ¨‡††•·iUm9JJæÜ8…)Æã†ñ¹1 ’càËL†ê´”ƒÒcøM­Ls5…$ Ò—Ïçƒ×ë_”A<úÈ×ñú;?Œ:JûB%QOžvKñLjš°{O/í¯“ܹHfbJkS®OUÕˆñF>’I ÿôã Q’€¿üv´Ð«¡*?u£iþ2|Ò©$|ÞÔ6·àGÿ|í…8Ù1Þ÷¾þ•ïà+_{ m¯û#ÔßÅûÿÿ÷©'p‘dîwã?ã‹_øvn«F|¬=]'(ÁÜÏq\hmmÀ_ýí§ñƒÇÃÀpüÓ¿ÂmŸ36o*Åž[_NŒ§(ñ»„׿û]ØÒDgŽî‡ñŸþð!ä}5xû|[ËãpfÆñ£ï=Šûÿò J²úÍÿõIØb‡0púi|ø£!7SwOÕ&G.©yIÝWþñ«†²’²u,äÚ;œ~¸kÞ‡¶P Z7ŽÛ;Ê)å<®ÆcýHP2&„¾Ì[@G´€t«ñ¬á™;•æýB“E·Í’êvL¥) ¤“ÃðX=&ÑÐÌp4g2@xîʺrQÎ_Š!T6‰´‰Ì&. >p?û—c¬÷"ÔxD­iªPŽö»~ä²ÕÕQe?i½öᨠuœ—“ö‰¼1™ÊqNäsTùWµí‚Û"¡Åésˆòþ­ú­Íõç›Q§ê´[7éÅ×ËÖ¬1VÁÖªœÌkš6^©$SS‘Œ„ÃaJ±h“Ä’NÓË‘c®@3<†x@æ”t#Åsù,‡¦]”‰'aD¤éKú\MuO2™Æ7¾ù¥VÇQ[W¼» ±l921ã¡*ÉëÓ™B´¹knëÀ¦Ÿ§!üßßÿ>ú±!)!¡Sò‡eOE²>ˆœb6«Sì “8$¦r<æD”QER9vi?.\èÇ÷¼_Å¥3 c¸º‘êl ìÃÔä$²Ž¼ú¶&4ÖW`k³™É.œëÉãÿýj?N_F FÑL4xé÷uo{=ÚÚÑIõäÔÀ>œèº€`dú˜²WcÇö)=Úí·ïhB µ$ó$™RD+ðš—u ®ˆêÖìòñ8ÿÕ_`¸ '.NÀ=Ò‡ñS'I|é RÙ€7Üщš"ªpÇËàC¿ª;ri'û))syQWâPX(õ¡,+¯CQ˜6‰¥Ó¼PÆp¡û;øÎcOãÔ¹Q”mþ5„¶“Hpø´熂¨lï„ß˸¼Gì”Òæ©šò&E¤©LNÚ1‘¯DEkËL]ÚŸñ¡ic=³® Ià”6“ƒyJ((Æ(Ï3S¥*}mTÇ Ù‘Çœœ——ô‘¦­”^ͰÀq§®ä¡¯|±ÉaìêlƒÃSI’eø냕D‚!NZÚ6Q æ§-Þ ¾ðÀƒxßoÝÇï>øý+“ s–@ž†;Jæ\ây®T©Ù¦ÈFˆÖäÐAœ:z =ø¿ „X|µ»ßŠM·½e»ŸSjfo¸·o)FmíÖöR„\ÃÈLFOÿ("ª°Û!DÂN §oýõ_CScj‚xo«‡×>‡b½M‡0i¯Á–Í£ŒƒW O¸m•˜<šÆ¹nf£(j¢m]^¹«~g‚(‚2ß.|ó‘'pùòŽ_œ‚jò¤¥î«÷Ô*•x&B}tp¶nw¥‘b8ãÚÊE–+]ZZÎëJÈ\S1ªJÇprß·ðÝžÆÙAv¿íã(kÙŠÕ·‡OÙ¨  jÓ6:£Ô+§(Òì­.QÊž;Ä=å$p›g릉½µˆS¨›?|Å5pûŠùc8³ÿ)œc¿XQæå¥ÍZ6͵ó‡…êÛÒAžÇ.^.à`—EµíˆÖ5*'U¬w”°8¯¯õW’¥ýñ&F@´²hwßzéU"`åFWMà„„ˆŒbdxTÙ­Õ5V¡¢’ž{Þ¼‹áwgá´Ë†5ÿ¡+9!uRÄ~.NïÕö>Æô’>¼”¶¬¶Hœ¯±‹Ïazði±^~çûѹe+ÊËËéi:1¯û,mÄ _</œ§´jÿa|â¿üvíÙMIÜåÃXÈØü5ÌëÀòŨ+‰%Ç Tµ5tx9Gýß9ÐY‚öR|Q^‰êMwà®ªÝøfç¿A]Å4F‡qÏ[>ªzËç è=™Äãäô â=o}šRJï?äA°°8´ç£Ñ}}qäÍNâf<,ìî0¼¯¤-Yx¶I–„.=øs%úÐ5˜Æ/.ãÎ{߃MÍõŒ&ãÐ0ŸÊ¢Ò2®a.–ÕÙQ7ÎÍÖ­›Y'¥\l êæ…¨)æ;£H¸ñþŸãÿá3 ÇÑEGîúOàέ»ñÞ7ì@Ä7 Gö^Ù4ÞQ?~FèÄ<ÉÎ\‹Ùe Â“Àöð0œ-1Kdpž§Œº†3˜­Ü±£Ñ='¿ô—8s,‰HóëðßþþÏÐÈP$M!;Žýô?âࡳø‹Ïý޾ñQbYŠ–"û¬'°ÃáBsi¯lNà§ =²Tõw{3ÉoÉ5Né±Ôôñù!;Mí€8§–ÄÝÐ×JON#p#"`åTWMàd#&(öjSSS˜ŸRß…ï¸]y>lE­DÛ'‡Sýê´!m…¼ Íà§$U‰Ä´’\‰CD<žP}IÿÖÉZûX골a÷ègâú£(.‰¨0[¶ná|#qùI›»²òj®#©º=y¢›v_%ÈReg“9ðèJæaÖµ;=(©ÚA5òa¤&zú‚1ë¦J?Uuåð‡rhi¡#‡˜êåŒ#àw£4DJ|‘ Þqïk z‘g;)âÒÓs»;ªiSǾ†9C›¤#s*Iž`¥DN$C:a“w³™sëá#ñ*§À´>BÏÒC¬Ä3˜¦š[¤]Í\‡ÃÆlžÜ¡i æÌºY$³yøHXx±æêòk…¿€yŸàgç9ã ]f€ñßNák}¨EÕ¶F¼îöZl~é­(«ªAiØ ¯ƒÒ±dµ²̦cèï>‚xòHå´—¤26F§ :/8 Ψ¤Í`€BJàÆ18yœuoGšu½ü៩ë Ñû”^Ç‚H–êyqj6щ‚ µeÅSòÖ²õÕ¢_6}CcÓI¢9Dfù"¿Yì¶,‰uЏ¾dö¥Äн’¨N\¾Àk(’e¹çEÊj¢½1Þe]R ™ùÆXÓ‹¹ õ7Ê,šÄ½˜WA­XÏScÞ« pÌãIAD€†Ú®17CWL*rd§tÉë4lÛD­çdÐ^“˜PÉdU“¨¡ËCL¿EÏHiØ4ÉœHoì|8š³í ½goîÙƒ—ðµGObw{ ÚÚ[±©}Õ‘ŒGOÄ…ÅCFt¸cc1§’ÄUÕÔ#6BŽN>$HË-¦ œÝáF¤r;Iî)æ[íűž~d>ÔFýºèAJac½C± 9+¿Ï…h„˜Žr\O t yïoÕ• rœ¡M½(ÅÑ⟞ü¶µU£¬4€‘ѹ™‰ôN^YÅ«è(Bue&ÆÌ¹9»B»»Î@=1J¥h-K£{x¾Õ¬ÓY„N‚ ‹ƒE¡ÀkH^íORÝœ„½$…®!£n,IÏO?r–ºv÷y_PÈ*ÎIIœ*|Ï$zèÁz÷?ø=ì¾çO°}ó.|胷Г–Á›Éxl6¹æô`&©wІ.—BÿÉý$ø ¾Ѿ­€‘Á> õ÷Ãáo ×i=ˆ‹””01:ˆþž”ª¡ÒmÏa”!K¤®+Ø »‹N1ÂÀXü$ç¥L«!Ö!õ@Ñê=—‹ÔÒ…ñÉ4FÇ“4£óPí/Aªs*“ƒs”%C=GyýèÄÈ¢˜v¼(8߃ =«m\¿ cŒ¤†Ûÿ˜ ’_º¬fˆ{œ&p«†Rw ¸i¸j·bc û b±ƒ“42ª$y½<œÅQAŠä ¢&¤Îd’ò`Íñ!)yS›šø`%iÒ`¡ÅÆZxLTvy‰©Ajê,Û:PÉ”K¥¥utŒHÓ4®lð¶“ï2Ÿ-Û:h`O2Éð{õúûúðe}×{Þ‰ºúºÅš=ï1 œ+ÒŠ ½^[H"·m»ˆo|òwPÛ¶ ùÿOü«N?¥D“˜=€¯~ã§8Ê@¶I²¸Ñ±Õ”xÛ]/Á¡ó¿Dü—¿Ä3ÝChÏyqGkÏ<öÎtuá ?t£|ë+±Åë…ÛòPu1~9)³+ÐAÀ¿ iÏ•ê¾lœ^˜ ªûô™$*‰SMCÝßþ¨Ù†ÿ󊸳-Ž`~OþÔh1+AÙ&”×¶¡0zŒ\m'ÿ4›¶«ºoØ2…é˧ñóÇœ­+ªôbJ =”ÜIik&EÏ×}ÿçv©cwoscëv;ûy/È!ƒ!„ŠKù£ ˆö={@Ë;$¿þ=<ö“£Ø²©ïxyßü½@;IÞg”USj{ÙíØÛó3L=óŒQ·½÷²îw?ˆCG.¨ñÂaz°–Õ}7pâ§t|x ¿ª­5¸{³O>ò¿¬76T£¹ÎÇ8…çñì/O`ß¾³xÕ靖iÎn»ùG¾… =q<üããxçÌšaŸÂÄPªþ’RTy3ÄP ­ÿÑ,‰€x§:•×ò .Ù™>¡Ðlx¬±U8éÈE)ν44õ§iÃfH+Hàx^œiØ/R9!LŠpñ]ÒÜÄÄ3ÕÉ`—n&u—×ÊEôP¤÷ßža¤¦c`lYÆ'#Iò‹…)«fˆãbWWæ#ÌS¤+>¥?nÆ¥s)£ÊkmT›vde…„„äÈá5n-½?›Uº§)ªúï}ÁQ܈Qµzç.Qr”ô0>\ˆqéj…Ó —·¢ªê ê¢'ÑsŒö|#á¼ìÇA¦Ûè»Äf¯`ì8’dJÚ)™.÷ÃY¯cL­e-µmÌ„ÐÜÎü°ŒÑV£šÓ‰Þ®ÃÈǨáZ$Øî%ŽM”JF•ô®X°ª<‰ºRÛ\]ú®3FÝmÛZ (íõ ´¡Å]q†ÿ¸ˆ“‡ö#1Ü ÷°‹öogpq¤ÀxwÛ¥O¤›2[HŽ?íw¾åÞ”T¶ð:ùÏ'pâà^üŠ’B¿#‰Þ¾ j™Oµ²ÆøͲ.B@þV¹m©½Ë°+~¡ú¼F@# 0X)+™‡›<˜‹‹‹•z²ª¦œ¹+ÇfÚ"AgŸ?ÀcsÃPŸÒú¿:ÅÛTT§££4lg[é#Ì„òâý¹RÕ©„üöý'Îb˜±ä¶¶dQKéLq¤ˆÞWªN­ ‘9ŒŒp” ²Hƒ`Y‰šÇKµ˜Mlò$<ÊÜ:TÅçùGÔÓšû·àλƒØ½«Žê½NuŸÇƒø€w¿ÿƒhßG°cÏ3œ¹bnüïÇö—¼A¯»ïBûÎ=ŠèÃÇìG>ýîzª˜(aàka‘˜nGŽFkKÓLqÞ¤ŽtO*vàÀAœ8ÖÀ»¶3»@³Š  :U<2M'±ÒÄ‹2Nª8gÝÝÝ8xà°"r[Ú±sçN5‡`°h¶ÍÂù,ö]ú{¯_>ùœï>‰‹ôàìW ¬ºo~ÛÛU;m¬"žL¤Ëø«×b÷ ’¹ ¦¦=8ùô—qêô<üƒ£hj݆¶–rÜ÷®mèØó•Ôr­+ãºbã"R=Y{–ñÎÒ©Iô]꣣D‚YÆ8¦H%i+Fgy Õ†â¼àK#Y(EÊ^ŽMõLPZ7B ÕiÚÎM'é±›’P-$ Á:¶îRöWbë•K0FÙ4†FÒØ¼U¤ene\?9zŽ^·ItŸ‹aÓæ6%‘tæ'™á‰ä‡/bdÌ‹šº*Õ Jõz9·¸šÛdB¤xvÆk+BsûŽF4 Œ,«0¸@§• &ŽïC"íT±Ó"t¶Pu 0Jq°Rmæl•0)#}]̲0Þ¡9{¼…ײ”ä´¼²´̧â½Ô½GÎÒ…¶‘t@(©ÜD[¼ÚÏÑ CâÃÑÆR0Ž0dË%8Þ{EÝ]Ûš(a¤$X´T…iü1 ŽàÜÙs”ü2ÌKÆ¡Öd&ŽÚ¦*£¢—Äû1Vߓޢ®c”樯é립äŽqýFÌBÉ.ÑÊØ{üÁ@)ª;.l|Wtœÿ(Gõe,êZ‚ìa"…së´[7ÐUÑSÑ\ºº»”Pks'.gLÎddÑlöѤKŠØ‹‹Ðìß>ðÜ™l¡se¬Du1÷l8"]qÓ_‚øŠÍZ–áÆÆÆ™Y`¹ÉK€S#»€K2‰—!!œ¤çê$ëH]i#m%ŽœxµÊ$MÂ77ÚÒŸ+ÍÅ‘K£¼„s(÷ óáÌð8bã! ^ž`ÎJöK#x7Õ]NÆMˉ=ßíT‘Ú YÚ‘À•a-ªÊ2¨.M£È£*Ì 3K¾ÔyÂÍ<ålJ¾Ât¦Hý9Ô7Reš ãS>äËS§­`ù´ç²ùøòRí›G B/Ë¢rDÊF#¯H_q$@|œ”<±1ñw·»ÍÁ›Lf.ôÁ½4KéZBã|×L<¾tÚKÛJª9—<û§1¾‹^ÃáŽMj ™ÛXŒŽ$¸ÌêsщaF˜ËÉ:YÔnFQy Å´/œJ¹Ôõ-%Ù›«+ž»RwŽÆˆJ½´®%ÆÿuÑh4ËB`UNFpÓc0RâdâójÀAæ3ýÕ3ûÔKìáöܺ“*»ZTVV(Û6a’½½½ØÿìAe´/vìbŒ¶Š2ªõ˜…€yQEݶÜ"›^†ž§ý]bðÔc(. 6ï@óŽ—ÁåøŽ9ûþýýøâßÜ[v”«nýŒôŸCñØH'’ÇàODC‹Þ¨ +A¯Îfµ»<“Ì{¹5¯# r ? וµKÍS¤Bfq:)‘âK6g!‘Hž¡B”8HI¨”¸ Ùr'DÖFãå…Ë£4ƒ—Ù"ä|%û4$Z"ÙsÌ~–º‹§Š·GŠgiç÷Ó8kWe¦ù³*䃜ó±Êp,Ö1ä³Ì-ÏܧòøöxiGW]ƒZC¬5³Y³HÝè¨"Þ§ FÒ‡3»²9”–*Fª1ólYy?Î_‡yNÞ­ë7êZÏ.þYl2—[w±Ä{¹Šë×E# Ðh4×%¬² Z‚Q‹u%Zˆõ¹Z=k€F`%¨ç<9—YVMà„Ô¨xoŒŸ¦ THDeU‰í’˜.)F»*Éé91>Ī’´Šà6–p”••Ñæ-¢ìåDÍ'íWR¨ìc€ÛqS¥1:7¿|ži¸˜í ÀÔG 1Ç~ ( Ç•7étºÀØj~JÃÆ)!¢Ú–öcLÈ©ÚH[ {,ö3ÍCœÌh#ÓÓCH%P׳‰ëò´²ù­d-FÝb«yXÏY?›•¬s[ì¼Ô[ê¸ÙÇRïËi÷Bu^èüRc›ÇWÛÞìG¿¯ó.ÓWc5(.ÞV~»¿²ÅüD~˜-^OÕh4k@àDQo1GdQ±rrDðccc ÃAG…©I$ÆD£A\Áe„ÿ$Õh Kƒq¯¨_©¯óѦÉÈØ°òÝJTak-Æ´KBÚ&‡Ï^U‰ßÖÂýŸ¼õ•¬g8`8Œ½ÊdéHÎhÌŸ9¬¢éçvcbè€úlvrþèã´%+ÇÖ»·0G ÇX¾z×ìcþ»•Uí·:ɔѻlú9®ÿÊ"’«ÅŽ_Ysá‘å´[N…ýêïë“UÛÐåš V™Yh>A‡$]4€‰€<ëͲFÎèN ÕÜܽ4Ä;*‰`/DM…+Ï5CŠPò%/ɉê¤ÔÎK1™x\\º@ÔœI¤™"KRQI)äg ºÔ7Úq1‹BÐïdü´34¸Áhò""EÜYïSf5¢¥l‘(½‰†ÙÞí £„m̘àô0 ‚=ÀÅnmõÅzVß›îA# بÈjìu…z]ÀJ°r¥5%pbä.F‘Ɖ:TŠHh$„D~F:¤fu¸†aúJ&o­k³eH SÌ´ )Ÿ8.ƒªŠ†°@ÏÒü©ÓþçNãÌEt»ÑÞ…Ë>ý{ãî=Q”¾¬á3?̨GíL\î FQÕrÜf»"öâ+—Zçk|OÓ[ôʳúˆF@# dOSŽMÜ/­›¶FG# ÐkJàƒTÈ\ÄŽ±égO¯ÕfT(H~K7¢u·2sA3ªR¯âŽ7‰Ë½Ïòµov<ù°­:…Æ@w4Oãø ±LoyÛ½ Ú:Žú†’¿9rVÑüÆZ+×~-¾huãZ ¨ûÐÜ\¤éÔ ZkÉx£‹F@#ps#`ÕàͱªkˆÉZ¶+§(¤KÒUy);V75£LznD0:6¯zePÌø±"%ÌEÝHd™K´² eÃ(Ž^V*UÕ€zÔ@¸n_˜>ÅèyøKXT§so^ÇËþŸӺh6 üs¿uWë[ûš^U^I(DîÚí§×tºs€F`¸.nç¼hwv'ã´ñOÊ[J­¿m^½Ú|-U—~÷nzžJV;\vRÎóâ<¥.uy+ÙVˆ›/Íb38¯Ïå~ƬզËEK×[¨Ÿ3Â&xo›c=Ì{½ÎQö /⤧¼KÒœè¢ÐÜ”˜y{e IDATåeñ€À1¸,íÓŒ" écL™%«àg‰’oØ›||ÖP•kŸb*/!g.Ú—ÐîMìõØ7Ÿkå/[&†·'ùNµ/wLQyyÕãjfŒ•½iµéÊðÒµ5¥ÈÎäMuêÕïJK÷¯Ïh476ÕÃ,€ÀÉRæÛ‰ºSò–Úlb/"ž©†|@T§’àIÕ%A›kc¶Ù©†%“úòkWÈž¡$âÛU-—¸*Øt#€F`idoâÙ|ÎP§JîT]4› Bàæ.ž©ö”#…‚„þ0ŠÅßaR}Êc³ûžHãŒØn†­ÛLš)VqÌñ™^–ÿöBY –ß“®©Ðhæ# qℼyÜ+Ë\3¿ýM# XoXU¨« (ëkÙnvÖü¤×rö&Y¼–cè¾5›1ó»8]4› ¨B]ÎÅ›¹-§ò*ëhõé*ÔÍ× æ_•¾ã¯ÿ3L=$}  ¼òÒE# ¸y¸i$p×ã’ŠäM;-\¤õ7 š9¼è—"•NCTªºh4k!MàÖäz›aB´bMàÔh4+B@¤q’µAÞuÑhn4[£ël޼F]ên4À²ýGò¦ê}hYpéJu‹€õo|Ãy¡^ï«¢U¦×q=žF@#°âÔ Zm¯N»µDú¸F`]# U¨ktù¬LxºÔÝhÖÊ€^[ÑßHN¤q’zK€F`c!`åZ…z•×V@Ôé±®<ÝlC! ÎÊ—aC­jý.Æ´‡³†X¿«Ñ3×h–B@¸¥yžã¢6ÕäíyÒ§4‘Â%Sé}zÀÚ!`U¡®ÈN$òÌàBCÙ¹\\ §ecŠ@$T‹¥¹–sòŸôñBþÅêÚ˜K’ÉXňÖ9¨”YKŒmÖ30æ`[É»‘ea%-t]ÀM€€ú»» Ö¹ž–({öÒÛõzZ‰ž«F@#°¸œ»A&]’| r¹Èd—R9&›·³zL-Ù‹1ËÅêºÝNEÞ¤ÿ…ET9.¦–É©„ÏKïZ2?!qÏ·Ž…}ëï€F`½! {bÖ–›Ë¸Þ ç«¸IXÎ-eºeIf¿&*TIï±e—Ü$ÈýÈy³¼Ï/6E…àeIÔä¬.!^Ù ƒåf ræâÆdö+yê<‰£ˆ Y\.;œNë>ä¼Ù¯Ô“MNú‘þÌb›yÞÉd÷ægY“ùÙlgÎGú0?K) ëʱ¥Æsºh4Õ" ;¡ìu枸Úþt{€FàÅC@x5êò$pdJÎD²•¡›z&›-æl"b'ÊŠjU©8 ܬšSJîx>ŸwÀ‘³3f U.F;ç 2ºäInœý“¬ÍÐR™8³™R²GéßÌØÒVˆ‘ô+óCÞŽ´97ž ¥ãYƒ¬IϪ_›SIgç&'Ôü ¢˜-Ì­‰?ÖpÜe 6d93ùœã»|·ó³Ú$ÙΜOvf]æÜ ú&k«+£.=×¢‹F@# X#Ôz8¸?ê¢Ðܘ?qØùìâ°Ì²,'–••at°i¦mÑE# Ðh4€F`m())ÏWDAÏÒ?´ªP—Gà(Ý*+-C¤8B ÚÒ¶ek³ Ý‹F@# Ðh4›§“Z<»]™‹-µê«S¡²7·Û£^Ku¬k4€F@# Ð\––Õ]Ÿñõ(€F@# Ðh4Ë@Àj¿¯ Ü2ÓU4€F@# Ð¼ØˆÕ,šÀ™Hèw€F@# Ðh4ëyNB{L'§—Ìr°NÖ¤§©Ðh4€F`Ã!`U¡Îz¡Ž1o^ñDn—ûy=!6"zA€F@# Ðhn0R©„Ÿ…Ãá+f6KàöíÛwÅI}@# Ðh4€FàÅEàŽ;î€×ë]<Ã=÷܃D"x<ŽÄt|6ÞÛøØ„ÊÀ--Q³—8p£#cðù}üêX&“Ô“:fš³]I4¢¤y"ö]´]q$Ì´W.Õ×bíd<É´ õ¤˜ãYÛÅã L'¦ÕÄÈOÆ[ØNæ.s°¶K°]‚ídžæÜ¥ŽuÌ{MßW¢ËÁa9u¤·¥ê™˜?_9·°ÞÂï‹ÕYìØõngÃRXëÈç¥ê]ï¹_ïñ¬8,…µÎFÃJÖ#e±µ‹ð(•Îï”7Û[ÿü§'»Ï¨®ÿÕh4€F@# ¸‘ o»øÿ¬*X%]2É:IEND®B`‚trove-5.0.0/apidocs/src/images/Choose_Image_CCP.png0000664000567000056710000005140512701410316023207 0ustar jenkinsjenkins00000000000000‰PNG  IHDRlíÃhö± pHYs  šœ IDATxí¼ÅÚ‡‡‹¶X)P ÅZ H‹\Šwww·‹\ìâîîî.(îÅ­h)Ö"EJ)N+¥ÀÇ3“»'MÎIÒäHó¼¿_N6»³3³Orv÷?ï;ïNðç_4 H@€$  H@huþÑêzd‡$  H@€$  H P°ùC€$  H@€$ÐJ (ØZéc·$  H@€$  (Øü H@€$  H@h¥l­ô‹±[€$  H@€lþ$  H@€$  ´R ¶VúÅØ- H@€$  H@ 6€$  H@€Z)[+ýbì–$  H@€$ ›¿ H@€$  H@­”ÀDåökÀ€á7Þˆ»Í=÷Ü¡wïÞåVay H@€$  H@(@Ù‚­_¿~áꫯŽUo°Á ¶ [D€$  H@@% ‰¬„šûH@€$  H@h ¶f€l€$  H@€*!PvHd±F† ~øá‡¸¹{÷îáÏ?ÿ ï½÷^|uíÚ5Ì7ß|aâ‰'ŽÛ)÷Úk¯Åò -´PèܹsÁj¿ÿþûðú믇¯¿þ:Ö7Í4ÓÊO7ÝtË=:¼ýöÛáÃ? ³Ï>{èÑ£G˜d’IÂ;ï¼÷Ÿl²É}ÉÚ˜1cÂûï¿Þ}÷Ý0ù䓿åÍ<óÌa‚ &ÈË-üñDZþ¯¾ú*–Ÿa†Â?ÿùÏ0ÑDUC™kË H@€$  H ¾ TMeœþùá±Ç‹4oºé¦pÔQGE!”ðvëÖ-\vÙeáå—_ÇsLøõ×_Ó¦°þúë‡#Ž8"÷™…+®¸"\uÕU–5„Ô¾ûî¶Új«ìê( =ôЀ JÖ©S§pÞyç…vÚ)¶‡¤oÉ>øàƒpøá‡7è'Û–Xb‰pì±Ç†Ž;¦¢áÇŒý~òÉ'sëÒ´ÓNÎ:ë¬(Ó:ß%  H@€$  Œ+š„Dîºë®Qá}Jž*¼^[n¹e@TýöÛoaª©¦Êõý®»î }úôÉ}~àÂ%—\’k/;d…ž8ÚLb o^;¼`;ï¼sl/Wùß _|ñEØn»írb ï/ìùçŸÛ²bñ”SNɵ‰[tÑEÃL3ÍË>< ÔQ£FÅÏþ‘€$  H@€$P 5lˆª›o¾9Üÿýáâ‹/ÎõE¨âÃ?|ðÁ°ñÆç¶=óÌ3¹å^x!>yá…ÜSO=ÖYg\/ìòË/0>/°À±þ{î¹'ÜrË-¡]»vá?þˆEéW²sÏ=7$µÑFžÒßõÖ[/ùüóÏÃwÜ— ›|â‰'âò\sÍúöíëÞ{ï ›o¾y ÑìÒ¥K4hPªÞw H@€$  H@ãL &‚pE„ ¶È"‹äûì“K²Ùf›Åye„X>÷ÜsQÈQ>µÑ¡C‡^‰¸âE˜b6»ã'Ÿ|’_}ƒÇL?ýôáøã«LZAè&†XC´í±Ç¡_¿~1ÛåK/½”óî!üH†rå•W¦]}—€$  H@€$0Nj"ØÆ¥Gxט'†‘é‘9bIœ¥ÐG¶eÅbî­·ÞbuÌ6Éœ4Œd#x¾ò!„\NÉ3Þx®ÉP0æÒÝvÛmùo<ŠÁ†GŽ$'d¡dþáiþâmï½÷Žû¦,•ñƒ$  H@€$  Œ#V'ØRFGŽ ÆÜ5 ßxãø·t¼„B&#SczŽÏzcîb‹Ì’< ;ß“\k­µÂÝwß7‘²·Ýv ³Í6[|žZšŸ¶é¦›†ƒ:(Š8ž×†r‰WnÉ%—Œ `>\²ìü¼´Îw H@€$  H@•hu‚g›õèÑ# 5ÄÛöÛo“yàùÊÎÃ3–Œ„ < €ùeéö“­¹æšñ±|Nž:–ɉ×lèСá•W^‰/Ö'C|š‰áÁCþ÷¿ÿóåðúñ\8úè8¿m¡… d~dÿcŽ9&ì¸ãŽ¹î¤‡o³bÆg ×]w]ÌþHVȬõîÝ;œvÚi12­G¤|ðÁpJŒ‡j'±F›—^zi(”‘2íï»$  H@€$ r Lð×\°ÿ=MºÜ½k\/óÇUÌ> ¿Ù!C†DùY"ß|óͰÃ;Ä]b\pAþîq>ܧŸ~€‡D&Å \Ç™,™ßÆ<»B™(‹íïz H@€$  H@¥hu!‘ÙŽ#Àx5eW\qE|.a‰ÌwcÙ(W<à:Yz˜wúœÞ •œyæ™ÓÇFß)K’^š$  H@€$ ZhÕ¶R¼oß¾á°ÃËç¹i³Î:kÀó–’“0ÿ­OŸ>12WÐ H@€$  H@­˜Àx!Øà{É%—„믿¾ÁC­w¼ax`Xi¥•Ò*ß%  H@€$  ´zã`ƒô÷ß^|ñÅølµ‘#GưE昑°¤]»v­þ˰ƒ€$  H@€²Æ+Á–=0—%  H@€$  ´u­2­[‡jÿ%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  Tƒ€‚­­C€$  H@@ (ØjÕ*%  H@€$  TƒÀDåTÒ¿ÿrŠ[Vh½zõjVmR€$  H ÊltÀ›ÁZ| ã_ˆûnݺÖÊèÃ?lå=´{€$  H@å0$²Z–•€$  H@€$ÐŒlÍÛ¦$  H@€$  ”C@ÁV-ËJ@€$  H@hF ¶f„mS€$  H@€Ê! `+‡–e%  H@€$  4#[3¶) H@€$  H@åP°•C˲€$  H@€𑀂­aÛ”$  H@€$ r(ØÊ¡eY H@€$  H@ÍH@ÁÖŒ°mJ€$  H@@9låв¬$  H@€$ f$ `kFØ6% H@€$  H  ¶rhYV€$  H@@3˜¨9Úúä“OÂo¿ýÖ ©I&™$L=õÔaÊ)§l°¾¾þúëðÃ?„:„Ž;V£ÊV]|§Ÿ~ú0Ùd“5èç˜1cÂÈ‘# 2øóÏ?ã¶öíÛ‡‰'ž¸Á~_~ùe`}-¾› ùð믿†?þ8Ð馛.Ì>ûì¡]»vEJ7ÿêáLJü1L5ÕTaši¦i´_|ñEäÏï=k¿ÿþ{¬ƒÿd¿üòK\Çw©I@€$  HÍ"Øþýï‡aÆ$Þ³gÏpÊ)§„gœ±àöJV^xá…á¾ûî ;î¸cØc=*©¢Íìóí·ß†W^y%¬¿þúcõùé§Ÿßÿ}Øpà l:th`a ' ;w+¬°B˜tÒIc¹?þø#n_k­µì×î¾ûîpñÅM2„ã®»îV_}õ´ªE߯»îºðÀ„M7Ý4ì²Ë.Eû2bĈ0pàÀ°âŠ+ŽU†ï 1½Ê*«ä¶!ê^}õÕ°ì²Ë¶*šë  €$  H@ÍN YC"»víz÷î_ÿüç?£Xxë­·Æ{QUËoõ¥—^ ‹,²H˜`‚ rÍàÍ|æ™gÂ|[—>úhôÈm¼ñÆaƒ 6´Ç{,¾Sn¦™fŠß ž»æ´~ýú… .¸ 6ÉïcÝu×Þ5¼¥guV4hPsvgœÛzóÍ7C=|7x=û÷ï ±E<Ï=÷Üÿ M€$  H@h[B½å–[F>?ÿüóï^³Î:kÚð}úé§a–Yf‰¡¹ / 2ðÚá%šk®¹Â?þѸö¤> /ÆSL— k£ÝÑ£GG/S§Nâzþ z „ âáùî»ïÂçŸæœsÎXG®àß „ñ <8†È!x õ'u ñ£~øa m„E¡:²í‡~Á(5¼?ý Î¬ÑG¼WK-µTŽÃb‹-î¼óÎ("f›m¶X|Þyç /¾øb˜y晎l]Õ^~á…b•k®¹fØ{ï½ã2ßÃvÛms}öÙgÃ|óÍ× Ùô½téÒ%†Ø²QÄw‡eC …F8# ÃÓÅo /o¡Z~k,öÏþFãÎEþ|õÕW±Þ¬ç˜>áÙÄ{ÌÿnXÏwùúë¯Ç~Ò«I@€$  Ô7flù¨„9SØk¯½Ž;î¸(¤âŠ¿þÌ3Ï<áœsΉó™R™£Ž:*|öÙg±ó€Ö^{í°ß~û¥]¼_~ùåáÒK/‹eÓ5×\.»ì²7ÍÚÆð‚ìµ×^aÑE7ò÷Þ{o\ÏMô‰'ž_|ñøyÔ¨Qaÿý÷¡l AHD†H8þøãÃSO=?O4ÑDa¡… 'Ÿ|rNPàõ:á„â& !Ù§W¯^qŸBŸÃk™µŸþ9 ØX ö ‘™5D¢$‰V¶1O aÊœ±$ØjO<ñDsÌ1G¶Šš-§>áìÞ½{Xb‰%¢ã»C,eE.^¨3Î8#ŠúÔ!„;̘˷í¶Û¾›óÎ;/ >±„ƒ>8ÿM7ÝEù%—\î¹çžTEôVtÐA¹ï!Çoâ£>Še`‘¹óð""³Æà|96¶#Bó ‰È{ûí·ãñço÷³$  H@@}hÜ-Uexln¼ñÆpýõ×n”ÿóŸÿÄEˆ¼TI¬m±ÅaŸ}ö‰7½ï¼óN܇ÂxTöÝwß(ÖÜi§ˆzüñ±zŒÐ↟›}nÞk„ z‡p9òÈ#ñÇ=x¦yä‘u¼üòË1¼pë­·Žû"¾9ä@h!Æq–ˆ;ðÀÃÒK/E×I'”«!†XãFœþâ%bÓé§ŸËПÃ;,?aмðÆ!S;¹Êþ^@ÀpßDoÚÎ|4x¦ùhi}zŸ|òÉ#CÄL2êÁ“•]‡p@4i¿j¿¯·ÞzQHÂøÌ3Ï ›l²IØsÏ=£ ÂË•’£à™JbùyÌ#ƒ-ÞÃÛn»-pŒÌÉòßçÃ?×­¶ÚjÑ»vË-·Äºñvm¿ýö‘Ë÷rî¹çÆrü9í´Ó¢Xƒž>~k”iÌønøþò“‡Ps6óä×Å~É#œ¿ÍÏ€$  H@õE Y=lx‘x%ãÂ2¡’7äÌU"³sÝÝc™k¼?ÞB C,aóÏ?¼‰Î)xÉ®½öÚèÁCPK7Ãx<~úé§Øž7nÆ …¡Ñ'næéÏJ+­ÃíúöíBo7â„Nâ!t8nÜñp=ù䓱]êÁ»‚%1 â¡tóÍ7Çùc Ä †hÕ]wÝ…B\™ùCýAœ”côï7Þˆ"”ybÔ(E ±œµ$î²ëj¹<à 3D.x? —E@¾÷Þ{ñõàƒÆä4xþøžöˆI<ôÞ$,ùæ›obI˜Â>°ß}÷Ý#ççž{.n[c5â{Ÿ>}â;žÔ%—\2l´ÑFQ,SŽï aˆ§ CÔó}ñÝ‘à%+ncÌ~SÌä5Îl*i‘ýømòÛH"µ¤-$ H@€$0ÞhVÁÆ.ሼf¤k'Ó^š7Dh$¢‡y>Ü´ö–nŒ“˜ Ý;– Ä«Å+ߘ ”,›”á†pB ¢0<4d!Üa‡ˆ úƒÃ5x¯LI#è;âŠðFB“`@¬qÓ>dȸ/s¦ÙÃ…‡~x\æO*Ãñ".°ʘډ+3ð4b)Œ0³©ÑÅi§6,¿üòQTÒ.Ç„p£¯ùžêfž\sB¼ˆ%’và½$ã'‚ýꫯŽÑô;aÎÛÿûßø[Êÿ¼õþûïÊ!òñàò»Á+‰hGXaxåøMbðÀ·MsÌø¾RX(Ûù=4æeK^ÑqlôQÎ÷¥I@€$  Ô/fldÌ#+!sŶÙf›(ÈH»à 7ÄyGÂÎðª!À8à€(Œ’—Œ¯)%âÀË’,ÍË®KÛ¨6Â&¹áfŽÓUW]“sàanm"ÙžDuàé@ ¤›o¼Xj O‚ñI(#BPFŒ¾¦þòžˆ‡äDb„…vëÖ-.§÷$òâÊÌŸ$,’Íljr!CýÚ¦.ÒÏs Y£îÔNv}-–ù ÅkF ÿ\0Š+‰>˜ÃH1^1¼jx7ù !,ñÄf3¯‘ýØ) ¿=ŒßÇÆ12×,=Ã.%K^|ÿðTwS̳¦ÊÅŽø“öKõ(â* H@€$ :!ЬsØSÄBƒ—‡›j O oëðÈ¥›W<@X2x8’x"´m™e–‰ak±Ðߘ߄P#1ž2!bx÷oìϼ6nèéí³†`îFyƸáFž°;Œ9U;ï¼sÎ[È:úœ’‚ FSxǸÜrËÅãc}JNBòæÁñ ‡…^˜ªÆ2ÄÆþ傈ã¥oxŽx¯¦ùI2¨;µSN•”EÓ7¼žxÒ’÷ æÉ›•úG¨+bï‘Gˆcú ¬’ñü3æ.â¥# ”ã%ôã7Ȇ(äûG(â e=!·)û&ß3¿Œ~5•v?%%I"1îXÆöヾh€$  H@õMànªfæÀM1Þ4nÎ\<@8eóCÄá#Œ ¯ –n~y6ž0ʬ³Î:1Ù4nìIZ‘µ$HH^B¢ȼꪫÆa¼vˆÒú†‡§ KóÜ⇿ÿ0¯Œ›Bô¸a')õà•A@’ì‚9i„Þzë­¹]é?ó²˜Ouÿý÷ÇpK„%‚!ˆ÷>"ØN$&A¬!LÈbHý¼2„sÌð%!S¨\þ:Ä™ž$J¡„ rÜùáwÔǰ¹ŒÌŽGqDô¤á­ÄÈ÷Œ`ÂÒƒ³SŸøîàÃwÂü5ŒãI#¾7ØcÌLžQ>#ÞrçŸ~x÷ÝwᶈC¸ ¨ “MZ’ÓPžùmið€: žSÚæ7K¢‘rýïÉ;[îþ–—€$  H@´ˆ‡-áã‘æ1 á„— ÏÂE]Èê—2I"r#d^ÛÈ´È|1sàÅ¢7Ú…Œ›nž5†!•?ívŸ,“Ì]:ûì³£HÀS“ÚKõ0߉ymd Ä3FZv¼)„1ˆ€ÀkÅ?¢€ùS(ÀH%·£bÐPŽ£ ýb;Þ?|xE^¬l]ðæ÷ÖØ÷Ç|?¬Ô0EÄ=¿!¼«ý&²ý`™ï䡇Š% ?å^oŽÿÑrûey H@€$ Ê´XHdcÝ%<°)±ÆþåfH,Ô&B§1/VÚÑPL8P¦”Ð7¼[‰5ê!®TcŽœVż‹Õ•©ä—!äPA<“•ˆµüú*ùLÒ”Œ¥ØþüNJá^lÿ´žd"MÕSªPKuò½àä{xåJ5<„$>©D¬•Ú†å$  H@€Ú ‰l;˜ZoOy^‚-%ݨFO†”t£uÖcdº„e©ß Þ^¼¦é1õÈÌc–€$  H@hH UzØv±e>1¿ùq-åa*õ¨ \yå•ãœ/ùä¹ïï?þ_|ñEüܹsçÜz$  H@h[¸¦÷Ýwaô°JH IDATèÑaª©¦jp½o GBÿüñÇØï 'œp¬.§íÙ OF]tÑ0à 3Ä#ùüóÏi´X`š}xíµ×Â’K.YÓvRå .¸`˜i¦™âGŽûûï¿o¾ùf|_i¥•Jö¶5w¿Sÿ}—€š&P÷>ó>}úF±š²‘#GMxBÖÉc'Iì›o¾ Ÿ~úiÈŽêñyøðáÙÝrËì?tèÐ#¹.H@€$P2„Ê·ß~bg™e–0É$“ħùæ›/¼÷Þ{QÈP!×m®ÕD×ðÊ×f®ÛùëS¼QÜ;pͧ Ÿ1Þýõ׸L¼²FYêÍ_ŸÊ° ãLëšzŸl²É”SN_ÓO?} zh¹å–‹÷&o¿ývƒÝkÕïøA¨:ºö°òˆ»óÎ;ÃÖ[o].a ×^{møê«¯rÛguÖ°×^{Åp‡W^y%\sÍ5a‘E „'0z‡zAì›æÅõìÙ3ì±Ç¹p Fû®¿þú¸ûtèÐ!ì´ÓN¡{÷î|,É~ÿý÷0hРЭ[·Ð®]»’GÒJªÜB€$ 6D€9êˆ3®§ùƵ—æ®ßwß}aî¹çï¼óN,ºÂ +ÄÐ÷Þz+®›h¢‰^'D¡”\c1DÚK/½…÷”騱cXzé¥ã5ø¡‡Šå¸?øì³Ï¢§ ¡Æ>ôz¹vÓ6÷„0büq0`@‘lçº^©B‰× Ï"¡‘X­ú]iÝO(@]{ØV]uÕHê™gž‰Þ³|lŒ€%±FXÁFm¦›nºèëÛ·oƒâœ˜9Ánºé¦QÈáU;õÔSÃŒ3ÎÖXc8ÂÇEàÕW_ûq¿üòËã(#a¼~øá‡pá…–äñ£F9s’§¾4ZØ c~€$  Ô®Ù#FŒˆ×àB‡K"Ž$ÖÒö>ø ,µÔRñ…Èáó»ï¾Úzë­qÔËœ8 Õ‹/¾!Û×_ý°Ì2ËD¯Þ!CI?X1Ý"…E"ÖH~²òÊ+Çí½zõ LË` ¯÷sÎ9gXgub»\ßÇÅH¬F}®U¿Ç¥î+ ”N ®=lóÎ;o MàDzã7†}öÙ§9FÎöÜsÏx"&”D¨ÅO<O®Ù„$P–ð âÇtœüwÛm·8âÆ(1팮a=öXù$L1ÅÑãFÚenÊ•Zòä!8¹Ÿ©E¿+í›ûI@å¨kÁªM6Ù$NÎ6lXbY|ŒÆqr~ýõ×!}ôQ!£ ‚)k„C Ö0ÄÆI<¥ÖM™šRl{ “¤Î³Ï>;–Oóܲá—qCÞÂ/8ù?ýôÓ¹Pö}ôÑGÃÌ3Ïðrâ×$  H@õB€ë×\¢UJ5æ~%#J†yi¼g£hÒõžz)ϵÈpY‡Wër*—êKï©?L_Há—lã~1…÷‹:lͲq1ŽÑ–îMjÑo‡Çår_ ”N î'È 6Ø ÜtÓMc¥ÇÅãuòÉ'Ç“òüóÏ6Ûl³8—4»ù'©Ûú´-‰´B_GÚÆ 9ewJƌ° .„Sd>a‘\ÉHÂ1[Æe H@ÀøH±Æ5•ka!ÃF sÇ’ ÏS¾1•h•|Ã;Æõ•×jæÅMÃj1Ca´[ÌûGßSò²TmŒ‹í“i­ú=.ýs_ H tu=‡-a"þœFÕ²Æh#h„Gd„ Åi-€³åËYf¢1FÅæ›o_\@h‹ OÅŒðB8I†’ß_úÆ ¯Ý=÷ÜGþƵŸÅúáz H@@k#@Ræ3 !ßH †ˆÉŸÇ–Ê!ÞØÆ`-×âôb€•(DÑ1x̘wNˆ!¢ Æ>éz›mS½És†KuòΜ6’‘P1˜Ÿ=2Eâ¤zÊyOÙ©ç˜cޏ[­ú]NŸ,+ TN î=l ã$¼ÕV[…O<1wÂe=±í'ºx Š!æ¯aœdÇÅl´K¸å%—\ãÕ i$táÈ#«j.\hxG\&á8VÁ¿Wà]#´Ï!š$  H`|'€Ç áCbyæ™'&þÂsÅ1"SfŸ}ö‚¡N\ð‚‘Œtødid_I¹–"ÌR˜"×WB0ÙžÍìH=\Û1¦.P1Æ´™ǵ™hæ¯-ÃɳÓ1¨Ô¡Øwß}Ãu×]OöŒ°q¦=R3æÊ¥ÑºbeXO} µÆ¹M€Æ7\û[l±øˆDÏeÃHBòD\c†HãÚŽ`#³3×QB$I‚qý¥¼u ¸bˆ<„XJ*†`c`ƺUVY%&AŒ½ð qèñöóÏ?GñÆ3œ“XKï¬ûã?Â/¿ü†Z[Y¸,, H@€$  Ô;",+ÎÚ·o~üñÇ(Üð°±ré…‡årMÁV.1ËK@€$  H@uM 2 1ÞÿüóÏ(Ò²á“L2I d;n̘1esS°•Ì$  H@€$  üáŽ?ýôS|áEãszQ* »ÿíQú’‚­tV–”€$  H@€$½ixÔ°Ñ£G‡o¾ù&Š5>ãUcó×&Ÿ|ò(ÖX®Ôl•’s? H@€$  H . Þˆýúë¯aذaaàÀáË/¿ #FŒˆóØÈÙ±cÇеk×°à‚ †N:ÅÐÈ$òʦ`+‡–e%  H@€$ º'@¸#IDÞ~ûí0`À€˜âê©§ݺu‹¶‘#G†/¾ø"ôë×/¼÷Þ{aùå—sÍ5WE¡‘ ¶ºÿ¹ @€$  H@(‡ž2  CIñOø#™"'›l²0ãŒ3†ž={†ÁƒGA÷È#Äêg›m¶rš‰eÿß—Wönî  H@€$  H ~ 2$ÎeãáÙíÚµ‹ÍF¬Ég油袋†~ø! 7ÞË5=lå³¼$  H@€$Pט»6jÔ¨^5DÉElˆ5æ¹ñ¾È"‹„Ï?ÿ< ùKà}öÙgqŸrà)ØÊ¡eY H@€$  H î ’ylSN9e˜gžy¢Gí·ß~ ¿üòKîAÙ7Rü#âzôèÞ|óÍðÝwß)Øêþ×# H@€$  H ¦H8‚‘’ùkÌic‰Fxp6žµôm–çwÞ˜ö¡W®éa+—˜å%  H@€$ º'€›vÚi£ ˆ6 jé•B$ñ¶FYIZ“ŽÔýOM€$  H@@9f¼dL0AÜ5‰5 Y—Â!S½„PR¦\+r[°¼$  H@€$ ñŒ@mVm¼'±†`Ë 4ÖWâakñH&çá$æs̘1¹¯1?ö3·Á H@€$  H@-L–di9‰¶üîå—Ïß^ìs‹ 6ÄÚÀãÀ™ GÖ”dÓM7]X|ñÅÃ,³Ìæž{îš¶UóÔšô£’‡ØêÇW_}ÅçL3ÍThs£ëÈ*C¦™fšÐ±cÇ‚eµ¤yæ™sJ¾PÁ¯¿þ:®ž~úéÇÚŒ@þä“OœsÎ9Ö¶b+šÚ§Ô~«ßõ€$ q%À½ÄÏ?ÿ\°æšpí,Å>úè£À}Ùßʵo¿ý6ÞÏtéÒ%><7B¢¸Ö3ò΃u¹«Äšj‡:¿ÿþû˜Å®Ø=Ej÷ý÷ß³Î:kL’Ö¥÷RÚIe}—@½ :‚ÿkþ—yÇ’pã½Òÿñ,ËlÜà?õÔS¡oß¾¡S§N¡{÷îQ˜ñ°9„ i/Ÿx≘"s…VË-·\¶ßUY¦Í7ß<žÜïºë®’Oä5~þùç‡áLJ‹.º¨±bcm;óÌ3í·Þ×ãm\h¡…Âqdzΰ÷é)§œ~øáÈgŠ)¦Ç|XrÉ%Ǫ‹§®o»í¶a“M6 ;î¸ãXÛ©—‹ÑM7Ý4Ö¶b+ŠíSN¿ŠÕíz H@@5œp á…^(XÕ\sÍn¾ùæ‚ÛòWn±ÅáC 믿~þ¦¢Ÿ¿üòËpøá‡‡×^{-Š1®›m¶YØwß}s  Ž<òÈÀà.7v³Ï>{8ñÄãýOÑŠó6”ÒΰaÃ,è ÷¤ç>ƒäùöØc…ƒ>8ÜqÇQ´¥í¥´“Êú.z&ÀÿzeI¨%lË·T6}cŸ[dJô­·ÞŠbm饗œ7Ø`ƒø$pžc€XÙtÓMÆnŸkð裆wß}7žt;˜r·Q/£gxðn¿ýörw¯Zùþýû‡[n¹%uÔQQÄ^qÅqtîÐC͵qÛm·…'Ÿ|2qÄá™gž‰ÌößÿèmËúkQ²½öÚ+$[v£ŽœÀ}¥ZSû”Ú¯RÛ³œ$  H`\:›kzþ묳Ηj›Ü÷šk® ?þøcà΀4 ˜D"Ø=è ƒB·nÝÂ}÷Ýî¾ûîСC‡¸.Ê7ÙÈ_šj‡DY1fgš mçÛý÷ߎ>úèüÕñsSíÜÉ•¨CI€gÕÂÑ"‚ ÏÖsÏ==k /¼p˜a†âäW Ó ï©ž={Fñ6õÔS‡gŸ}¶Á·jàd¹ÄK„UW]5Ü{ï½!ÿ¹œxúé§èÑzï½÷LDt¾ýöÛñ çÅúÂW)Bóúë¯áŸk­µV Ÿ@°n½õÖ1\o?€ /¼0ŠØ•W^9òÙi§b¸FVhâ%d?B,`–5ž¬Žfä‘§­—bMíSj¿JiË2€$ j R‡iù¯ü© \ã‰æ!Œ²˜!²`ÎNÙ(T–ÐCî#¶ÜrËxßBÖYg°Øb‹ÅÁVöášJ=Ûl³M¼ÿ!d’(ŸO?ý4†Hª7])ípõÕWƒ¾ˆC¦Yì³Ï>áõ×_ï¼óN®J<Dê,¿üò¹ui¡”vRYß%Pïlx²±üÁ—$æÆ•Q‹„DrP¸Ú O`t‰‘ŸB6ùä“Ç q䈟j*סC‡ÆPí·ß>Îå"”‘°€5ÖX#ו“N:)Æ rBç„ʉõ€ˆâ‰3ÜžË2Ë,CˆGÇFŽvß}÷@衟#‚ O^!Ãë•ÿVÉ—Ü®]»b‰pdN_2¶!¼¸$cŸcŽ9&¬¾úê Žƒí?ŽPÉË.»,àÕkÊšÚ‡öJéWSí¸]€$Ð\¸n_|ñÅáÚk¯ƒ¤ "÷êÕ+F dç}¿ôÒKáœsΉ×q„á‘„2°œoSM5U`Ð4°”ù_ Jc³ÿþÈ€4×ßE]4®ãÎZK±RÚùøãc}Ù¹ùóÏ?qN Dhe@”ÁXŒV4Dß-ëÝ»wNlq¿@D aMÙ Aƒ¢ØcCŠÜáÞƒ{ If¦¬K1õLîܹs®š#F4øœÛÐL ­µ_Ítø6# H@mŒ@º~#¦–/l#;לëqÖHÅOÄOcbGæ’óX29gË’l­k×®9±FÝ<²ˆiˆ¢r[cípmæþ kw%÷ µ“­ße Ô3¬>a@ã.oªÁ¦øQ5j/R¡$Ð 4àƒ>ˆáéÀ²ï< ší<ä/V©1ÞEš«€)->©l³/FÄðbq‚*dœxËfY¢“’!«Ô8NÂ1˜ ̨`V¬Q'™­—L€N'>’ÙRÖZûÕR ,sßÓ½{÷BÕŽµ®”vÆÚɨSI¬UKœÂØ"!‘6N$ãxä‘Gâ(^%N0Œ2!Hƒdò/b°ž%R #;$a)œ/¿ÎõÖ[/žØ*dë®»n M/↠£rˆ¦VX¡Pñ&×ñxB!0.ùÆV<·…‰ÉÛm·]®<Øø4,¿J>#<™d]ŽølŽ~Ur,î# H@(D`ƒ 6ȉ0æ‹3‚IžK–5æ¸qÄQÔá™ÃkÆýH!{øá‡c<·WÖp&c4Ó.˜7Ï í-·ÜÃ¥‡D Í1Çq©ÉDMTQ!oX)íPí5´ÒJ+Åû*®íåD•ÚNö8]–@½@°1ð‚c)…D&I̥ϕ¾Oð—,ÙWdzCxVI5-B9IeSÔ3ÇŒ'ºj‰µjö›/†xöüqV³Buá}d´¯˜à,´Os¬k­ýjŽc· H@h{¸±bДëi±)Ü"qÒ©S§¢e*9rÚåæ®ÐµœÁX2Q&WIýiá|‘ü`Õò¬¥^µ¸`CøðÒ$  H@@= ªˆÉÖ:X]ÏßÇ.ÆdZv¹±}ÊÙÖâ‚­œÎZV€$  Œ¯Ä_¿Yk|'€g- µZxÙZ$Käøþ¥y|€$  H@ÀøO 5Ž4MïB´%áV  ¶jP´ H@€$  H îd½kùáÌÕm ¶ºûYyÀ€$  H@@5$[µÄY¡>)Ø Qq$  H@€$ & ØxÜ–B"Ó.I̥ϕ¾+Ø*%ç~€$  H@@]ÈŠ´üÈjQ°U‹¤õH@€$  H@uG =‡-xµ["á»$  H@€$ 2 dZv¹ÌjŠW°Eã H@€$  H@È&IËÕn ¶Æù»U€$  H@@AYa–æ³!Ú’p+¸S™+le³¸$  H@€$  Ì’hËO:R-Ѧ`ó·& H@€$  H I¬UKœê‚‚­×I@€$  H@h‚‚Íç°5ÉÍ€$  H@€Z‚@š·FÛù!‘Õê¶j‘´ H@€$  H îø¶ºûÊ=` H@€$  H ­HóØèov¹ZýŸ¨ÜŠú÷ï_î.–—€$  H@€$0^ÈfŠLÉGª)ÜÊl½zõ/A{P€$  H@€J!ðÐCÅbYa–æ³!Ú’p+¥®¦Ê”-ØšªÐí€$  H@€Æw“L2I˜p sÉFRÒ‘$Ö’€W&W‚î/ H@€$  Ô¬'-‰4 ¤õ¶jˆ6[Ýý´<` H@€$  H`\ ä‹1>#Ö’Pcy̘1 šÉ†P6ØÐÈ[#pÜ$ H@€$  H „Ù¨Q£¢@CœñíôJŸ“xcÿ‰&š(>Y¨¾bëlÅȸ^€$  H@@ÌW=ztøæ›oÂo¿ý…Þ4¢eÞ)Ãòˆ#B»víÂÄO\ ¶ÆW™t¤q>n•€$  H@€$Ѐ¡ˆ¶?ü0´oß>б_ý5Š3{˜bŠ)Â÷ß_;[Y¸,, H@€$  Ô;ÄÍža†â¶o¿ý6—`žµü1zÜk¼¦™fš(âÊe§`+—˜å%  H@€$ º'€hcŽÚä“O¦vÚIšÂ"™ËÆ:¼p5“ ðð¼•k ¶r‰Y^€$  H@¨{„?"ÂH>‚h# ëxO/Â"YÆkd,×lå³¼$  H@€$ ¿ $Oâ, ´$Øplç™l•x×hÂ,‘þÔ$  H@€$  Œ#ÄYhˆ´ôy«U°+@÷—€$  H@€$P+-™žU€›p̘1¹ãĘ\Šés¹.H@€$  H@¨-*Øk ýúõ _|ñEøî»ïrȧ›nº°øâ‹‡Yf™%Ì=÷ܹÉz¹UZ@$òäñ#FćÙuéÒ¥J5—V ?ûì³ðÁÚžcŽ9Ý‘ç<ð4uú=çœs85e´Q(f–I’MÙ?ü~úé§0ãŒ36UÔí€$ %À½ÄÏ?ÿ\° Ï<óÌ·å¯üè£âõuÊ)§ÌßÔägR{s?Ã5,qùÆõ˜ë>×`®­„LUbMµC<ë‰öx¸ocöþûïÇ”ã0Ê·RÚÉßÇÏ@u ´˜`Cp<õÔS¡oß¾¡S§N¡{÷îQ˜qrãÙo¾ùfxâ‰'B»ví +¬–[n¹ª9"ä˜cŽ Ï?ÿ||f'L¼|´uâ‰'VôŒ„bä„yÁ„#Ž8¢A‘·Þz+}ôÑáã?ŽÙex^é?W]uÕ°ß~ûÅ Šix}öÙá–[n‰«è/BlÑE 'Ÿ|r£OM¿þúëÃy痪ʽßpà ažyæÉ}Î.p±9òÈ#à /¼Å2¢y·Ýv K.¹d¶˜Ë€$ VCà„Nˆ×­Bšk®¹ÂÍ7ß\hÓXë¶Øb‹pÈ!‡„õ×_¬mÅV|ùå—áðï½öZc\£7Ûl³°ï¾ûÆÏì7`À€xmýꫯ¢šý¯‡èrÏÁ5¶T+¥aÆXÐîmzôèÎ<óÌx‘ßÎc=>øàpÇwDÑ–¶—ÒN*ë»$P[-"Ø’g ±¶ôÒK‡^½zEÁAèc"óÎ;o4hP`”ëÑG£Px”Jñ 5…Œç#p>|x8ꨣBïÞ½ã.Ï>ûl )Ë1sâ¾øâ‹ Æ®¯¾úêáÆoŒ¢vƒ 6ˆ]ìܹsxå•WÂ'Ÿ|çõ¥~s²ßh£ ÆÉS†‹ "‹=ÚÄÛ6ß|ó5ÇO˜(qõI.hÄÚ¶Áˆ& H@h˸áú{íµ×Æk'ƒÈDúà•c@4ÙK/½U¹¦s-%<’è“B‰Ðô%…æ¬1PË`,Æu”iîA˜Î€!<-užx)ípͧ>Dk²ùçŸ?+í¥©ÜÓ\wÝuñظWÉZ)ídË», Ô–@‹6&À2ZÅIƒl…b¤Ãæ„B($qØÕlx»Hð±Þzë¥&¼ÓŸ$ÖØ€X#Ñ^7úñÀBK±ílGä?묳Æùb—]vYXwÝuc;ôû”SNÉÅÕßzë­±ÍÆŒÏŽW1Ù®»î½wn¸a_$dYd‘EâE†‘¼bÆDbøÒ>Þ@¹X!&™“VÈðÖ‘ì…pKúÊþ\P˜ƒ‡WO“€$  ´Vß|óMOùýK×ìÛo¿= ÚJ+­#n¸®3 ÊÀk2¦(¤ëáÌ5c /Z¾ñ¼¥ü$`xº¸3 ‹q­>í´Ób; À"?ÿüóx}æÞ£+¥@eJÒ©¬î&÷⤅×QVÌØN(Bµ 'ÉR„‚cñ5M&NFl7¡‚¿d„,&K!„O2#kÔShŒ}u$ᤞŒz ÙL“— « ¬ᆀ+âIVÇlfGÚóÆ#Y8—Xb‰T}î6áŽÈC(b,?þøã1ÌN;í×ùG€$ÐÚ,°ÀqP²P¿FS¹>3¥ ×dŒ{î0æ|gAYq1 ‹ª˜ƒXãúʱdxìðà‘ØŒLŒùm\_¤e`´+Ö÷ L#É7"Œò½€ùe }.ÖN¡²®“€jC Eó¼8ià†O'Æb‡Gè$ÏI#¼1aWlÿüõŒ”qÒe”«12ÅD\&'A†@b.]cV(¦½XyFó0Â< Mö¥]&sÑÁ-dtyq)&ïäRK-OúŒàq!($ØØŸQ½lvMº ñ¾2&Cóý$±FÖ±OS¯ Õç: H@@k ®ëˆ©B×ßlzýüg°1]€ˆŸÆÄÚƒ>Ž9昘@Œ ÎÙ²$ëÚµkN¬Áƒˆæ›!ŠÊlµÃ<<¦?dãf]vp9»½ØrcíÛÇõ@õ "ª~[¹ÌÁB41Ÿ O'“ü±Ýl'Ñ^"ÄV5l•UV‰Þ%NžùvÉ%—ÄPHæ±qbÅððqRM/B È*Uª€Ì/‡ð!Í>á)ä1Û2M‘ÙqÅWŒ«9ù’Ü„g¤ä#}pãùm…Œ6¨'O—€p-dd¯D¸™ŒãeÄŽÐPM€$Ð ¸-\Ó5wÖ‘å9;øš?°Ë\õt_PèØI‘Oš~¦.à5ËŠ5Êsï“ÿPo¼^DÛ0¢Tkª®á ôf½l s/“ˆmª½¦Úij·K@Õ#Ð"‚ á…ûŸ“'MN,i¾a ˆÄ!‰ˆ6Bô(Ÿò«Þ3FÖxÀ%s´GÄ®3áøž{î‰ÉBðn‘…ÄÄ»ó,8Nv„â@ßK5úO¸¢‹3ƳÖàÀäeêæÄJö&ÒüS?'û4úÇ„‡xÒžÇÇyì»$ £€Äã2&TÓoº SâÐy¤ûl¼ñÆqÚæšéâ„ÇQyØa‡Eaš¶ãu+BY¨]×I@€Z#m·Ý6F¸ðL2¢FfŽ6÷ÙhžaF¦H¦@05‚yíÌi/dÜø„|¹WH/î/°µ×^;&ý ¡Þ.®­,“æ¹qßÃõ˜÷BVJ; ˆ3 Kˆ¤aî-˜³×½{÷BÕŽµ®”vÆÚÉ@ÍTÇeUf÷*œ4ðüy‰“b€ £LĘÉä_ÄÂ…y_Õ2<^ˆ3âÈyx71åx©˜¤‹ÊžyX4™”xž Æ(‚¯X–ÉB}Dô1ªµ÷Þ{Çùf;ì°C|îíò@ì‹.º(+ ¡HVªüú ‡$,”l•„(Ð_F ñÔqr/–À¤GQrQÀsÇ~„|Ðnš?Ç|92\SÏw@ø$}âÆ¡ ú[¬BÇí: H@@k#Àãr’#ª†d F2"kÌqãzŒ¨ãz›HÍ–c™¤^Ô9`À€øÊngÀ™c3LÆfMI\Æõ˜pH®á)k4ƒÔ\‰*â¾!ßJi‡}h‡yt Úr_Õ»wïØÿüúŠ}.µbû»^¨. þ:aü/sFu“ä"M”m¢hÉ›Ñzæ™g¢'-›nq ‹]5ÅZ¡Ž1E˜BcóäðüqÒ.õY)…Ú!,‘gËòâýâ¹'\š2úB,=£x¥†‰:IX#í§¤(MµÃvúŒ€.rYJ–‘€$  ´6\°dÚA±k)·Hˆ(æq+SÉqÑ.÷Ùç¾¥zˆ²!ú&‰¸´¾’wwµP߸ɪäÚY¨®ì:ˆ Ù¿B!ml®\¡ýŠ­#zI“€Ú>lˆ2^š$  H@¨g Æ"ÙƒÕõÌÙc—@[#Ðâ‚­­³¿€$  H Ä®Uë”@Û'Ð"Y"Û>6@€$  H@@í (ØjÏØ$  H@€$  TD@ÁV6w’€$  H@€$P{ ¶Ú3¶ H@€$  H@P°U„Í$  H@€$  Ôž€‚­öŒmA€$  H@@Elas' H@€$  H@µ' `«=c[€$  H@€$P[EØÜI€$  H@@í (ØjÏØ$  H@€$  TD@ÁV6w’€$  H@€$P{ ¶Ú3¶ H@€$  H@P°U„Í$  H@€$  Ôž€‚­öŒmA€$  H@@Elas' H@€$  H@µ'0Q¹Môïß¿Ü],/ H@€$  H@([°õêÕ«‚fÜE€$  H@ÀøA _¿~Ív †D6j’€$  H@€$P[y¼,- H@€$  H Ù(Øš µ I@€$  H@(€‚­<^––€$  H@€$Ðll͆چ$  H@€$  ”Gàÿ¿»ANÍÄÇ£IEND®B`‚trove-5.0.0/apidocs/src/cdb-devguide.xml0000664000567000056710000012560112701410316021271 0ustar jenkinsjenkins00000000000000 GET'> PUT'> POST'> DELETE'> '> '> ]> OpenStack Cloud Databases Developer Guide OpenStack Cloud Databases Developer Guide OpenStack LLC 2010 2011 2012 2013 OpenStack LLC API v1.0 OpenStack Cloud Databases Copyright details are filled in by the template. This document is intended for software developers interested in developing applications using the OpenStack Cloud Databases Application Programming Interface (API). 2013-05-02 This document is for the initial OpenStack review. this is a placeholder for the front cover this is a placeholder for the back cover API Developer Guide cdb 20 Overview OpenStack Cloud Databases is an OpenStack-based MySQL relational database service that allows customers to easily provision database instances of varying virtual resource sizes without the need to maintain and/or update MySQL. Interactions with Cloud Databases occur programmatically via the Cloud Databases API as described in this developer guide. OpenStack recommends that Cloud Databases users back up their data using mysqldump until backups are supported in Cloud Databases. The following figure shows an overview of Cloud Databases Infrastructure: Reviewer: need to edit graphic above so it says "The Cloud" rather than "The Rackspace Cloud".
Intended Audience This Guide is intended to assist software developers who want to develop applications using the Cloud Databases API. It assumes the reader has a general understanding of databases and is familiar with: ReSTful web services HTTP/1.1 conventions JSON data serialization format
Document Change History This version of the Developer Guide replaces and obsoletes all previous versions. The most recent changes are described in the table below:
Additional Resources You can download the most current versions of this and other API-related documents from http://docs.openstack.org/. We welcome feedback, comments, and bug reports at https://bugs.launchpad.net/reddwarf. This API uses standard HTTP 1.1 response codes as documented at: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html.
Concepts To use the Cloud Databases API effectively, you should understand several key concepts:
Database Instance A database instance is an isolated MySQL instance in a single tenant environment on a shared physical host machine. Writer: once we support MSSQL, we need to describe here what is used for MSSQL in place of database instance.
Database A MySQL database within a database instance. Writer: once we support MSSQL, we need to modify the wording here, such as: The actual database, whether it is in MySQL or MSSQL.
Flavor A flavor is an available hardware configuration for a database instance. Each flavor has a unique combination of memory capacity and priority for CPU time.
Volume A volume is user-specified storage that contains the MySQL data directory. Volumes are automatically provisioned on shared Internet Small Computer System Interface (iSCSI) storage area networks (SAN) that provide for increased performance, scalability, availability and manageability. Applications with high I/O demands are performance optimized and data is protected through both local and network RAID-10. Additionally, network RAID provides synchronous replication of volumes with automatic failover and load balancing across available storage clusters.
General API Information The Cloud Databases API is implemented using a ReSTful web service interface. Like other cloud products, the Database Service shares a common token-based authentication system that allows seamless access between products and services. All requests to authenticate against and operate the service are performed using SSL over HTTP (HTTPS) on TCP port 443.
Authentication Each HTTP request against the Cloud Database service requires the inclusion of specific authentication credentials. A single deployment may support multiple authentication schemes (OAuth, Basic Auth, Token). The authentication scheme used is determined by the provider of the Cloud Database service. Please contact your provider to determine the best way to authenticate against this API. Some authentication schemes may require that the API operate using SSL over HTTP (HTTPS).
Cloud Databases Service Versions The Cloud Databases version defines the contract and build information for the API.
Contract Version The contract version denotes the data model and behavior that the API supports. The requested contract version is included in all request URLs. Different contract versions of the API may be available at any given time and are not guaranteed to be compatible with one another. Example Request URL (contract version in <emphasis role="strong" >bold</emphasis>) https://ord.databases.api.rackspacecloud.com/v1.0/1234 This document pertains to contract version 1.0.
API Version The API List Versions call is available to show the current API version as well as information about all versions of the API. Refer to for details.
Date/Time Format The Database Service uses an ISO-8601 compliant date format for the display and consumption of date/time values. The system timezone is in UTC. MySQL converts TIMESTAMP values from the current time zone to UTC for storage, and back from UTC to the current time zone for retrieval. This does not occur for other types, such as DATETIME. DB Service Date/Time Format yyyy-MM-dd'T'HH:mm:ss.SSSZ See the table below for a description of the date/time format codes. May 19th, 2011 at 8:07:08 AM, GMT-5 would have the following format: 2011-05-19T08:07:08-05:00
Explanation of Date/Time Format Codes
Code Description
yyyy Four digit year
MM Two digit month
dd Two digit day of month
T Separator for date/time
HH Two digit hour of day (00-23)
mm Two digit minutes of hour
ss Two digit seconds of the minute
SSS Three digit milliseconds of the second
Z RFC-822 timezone
Pagination To reduce load on the service, list operations return a maximum of 20 items at a time. This is referred to as pagination. Cloud Databases has separate paging limits for instances, databases, and users, which are currently all set to 20. If a request supplies no limit or one that exceeds the configured default limit, the default is used instead. Pagination provides the ability to limit the size of the returned data as well as retrieve a specified subset of a large data set. Pagination has two key concepts: limit and marker. Limit is the restriction on the maximum number of items for that type that can be returned. Marker is the ID of the last item in the previous list returned. The ID is the UUID in the case of instances, and the name in the case of databases and users. For example, a query could request the next 10 instances after the instance "1234" as follows: ?limit=10&marker=1234. Items are displayed sorted by ID. Pagination applies only to the calls listed in the following table: Verb URI Description &GET; /instances/ Lists the status and information for all database instances. &GET; /instances/{instanceId}/databases Lists databases for the specified instance. &GET; /instances/{instanceId}/users Lists the users in the specified database instance. If the content returned by a call is paginated, the response includes a structured link much like an instance item's links, with the basic structure {"href": "<url>", "rel": "next"}. Any response that is truncated by pagination will have a next link, which points to the next item in the collection. If there are no more items, no next link is returned. See the examples of paged List Instances calls that follow. Reviewer: Need new examples that show OpenStack host. List Instances Paged Request: JSON Notice that the paged request examples above set the limit to 2 (?limit=2), so the responses that follow each show 2 instances and return a marker set to the UUID of the last item in the returned list (?marker=4137d6a4-03b7-4b66-b0ef-8c7c35c470d3). Also a link is provided to retrieve the next 2 results (limit=2) in the link element identified by "rel":"next": List Instances Paged Response: JSON HTTP/1.1 200 OK Content-Type: application/json Via: 1.1 Repose (Repose/2.6.7) Content-Length: 1172 Date: Mon, 18 Mar 2013 19:09:17 GMT Server: Jetty(8.0.y.z-SNAPSHOT)
Faults When an error occurs, the Database Service returns a fault object containing an HTTP error response code that denotes the type of error. In the body of the response, the system will return additional information about the fault. The following table lists possible fault types with their associated error codes and descriptions. Fault Type Associated Error Code Description badRequest 400 There was one or more errors in the user request. unauthorized 401 The supplied token is not authorized to access the resources, either it's expired or invalid. forbidden 403 Access to the requested resource was denied. itemNotFound 404 The back-end services did not find anything matching the Request-URI. badMethod 405 The request method is not allowed for this resource. overLimit 413 Either the number of entities in the request is larger than allowed limits, or the user has exceeded allowable request rate limits. See the details element for more specifics. Contact support if you think you need higher request rate limits. badMediaType 415 The requested content type is not supported by this service. unprocessableEntity 422 The requested resource could not be processed on at the moment. instanceFault 500 This is a generic server error and the message contains the reason for the error. This error could wrap several error messages and is a catch all. notImplemented 501 The requested method or resource is not implemented. serviceUnavailable 503 The Database Service is not available. The following two instanceFault examples show errors when the server has erred or cannot perform the requested operation: Example Fault Response: JSON HTTP/1.1 500 Internal Server Error Content-Length: 120 Content-Type: application/json; charset=UTF-8 Date: Tue, 29 Nov 2011 00:33:48 GMT The error code (code) is returned in the body of the response for convenience. The message element returns a human-readable message that is appropriate for display to the end user. The details element is optional and may contain information that is useful for tracking down an error, such as a stack trace. The details element may or may not be appropriate for display to an end user, depending on the role and experience of the end user. The fault's root element (for example, instanceFault) may change depending on the type of error. The following two badRequest examples show errors when the volume size is invalid: Example badRequest Fault on Volume Size Errors: JSON HTTP/1.1 400 None Content-Length: 120 Content-Type: application/json; charset=UTF-8 Date: Tue, 29 Nov 2011 00:33:48 GMT The next two examples show itemNotFound errors: Example itemNotFound Fault: JSON HTTP/1.1 404 Not Found Content-Length: 78 Content-Type: application/json; charset=UTF-8 Date: Tue, 29 Nov 2011 00:35:24 GMT
Synchronous Versus Asynchronous Faults<?sbr?> Synchronous faults occur at request time. When a synchronous fault occurs, the fault contains an HTTP error response code, a human readable message, and optional details about the error. The following Database API calls are synchronous and may produce synchronous faults: List Users List Instances List Instance Details by ID List Databases Enable Root User List Root-Enabled Status List Flavors List Versions List Version Details Asynchronous faults occur in the background while an instance, database, or user is being built or an instance is executing an action. When an asynchronous fault occurs, the system places the instance, database, or user in an ERROR state and embeds the fault in the offending instance, database, or user. When an asynchronous fault occurs, the fault contains an HTTP error response code, a human readable message, and optional details about the error. The following Database API calls are asynchronous and may produce asynchronous faults: Create Instance Delete Instance Create Database Delete Database Create User Delete User Resize Volume Resize Instance Restart Instance Note that an asynchronous operation, if it fails, may not give the user an error, and the operation can error out without a failure notification.
Database Instance Status When making an API call to create, list, or delete database instance(s), the following database instance status values are possible: BUILD – The database instance is being provisioned. REBOOT – The database instance is rebooting. ACTIVE – The database instance is online and available to take requests. BLOCKED – The database instance is unresponsive at the moment. RESIZE – The database instance is being resized at the moment. SHUTDOWN – The database instance is terminating services. Also, SHUTDOWN is returned if for any reason the MySQL instance is shut down but not the actual server. If MySQL has crashed (causing the SHUTDOWN status), please call support for assistance. ERROR – The last operation for the database instance failed due to an error.
API Operations Do not use trailing slashes (/) at the end of calls to API operations, since this may cause the call to fail. For example, do not use &GET; /instances/detail/ (with the trailing slash at the end). Rather, use &GET; /instances/detail instead.
API Versions This section describes the versions that are supported for the Cloud Databases API.
Database Instances This section describes the operations that are supported for database instances.
Database Instance Actions This section describes the actions that are supported for database instances.
Databases This section describes the operations that are supported for databases.
Users This section describes the operations that are supported for managing database users. In this section, "user has access to a database" means that the user has full create, read, update, and delete access to the given database. In other words, on a cloud database instance, a user named USER and a database named DATABASE exist, and within MySQL, a GRANT ALL ON DATABASE.* TO USER has been issued on the instance. There is a bug in a python library that development is using that may cause incorrect user deletions to occur if a period (.) is used in the user name. In this case, the user name is truncated to remove the portion of the name from the period to the end, leaving only the portion from the beginning up to the period. For example, for a user named "my.userA", the bug would truncate the user name to "my", and if the user "my" exists, that user will be incorrectly deleted. To avoid the problem, do not use periods in user names.
Flavors This section describes the operations that are supported for flavors.
Glossary database A MySQL database within a database instance. database instance A database instance is an isolated MySQL instance in a single tenant environment on a shared physical host machine. Also referred to as instance. flavor A flavor is an available hardware configuration for a database instance. Each flavor has a unique combination of memory capacity and priority for CPU time. volume A volume is user-specified storage that contains the MySQL data directory. Volumes are automatically provisioned on shared Internet Small Computer System Interface (iSCSI) storage area networks (SAN) that provide for increased performance, scalability, availability and manageability. Applications with high I/O demands are performance optimized and data is protected through both local and network RAID-10. Additionally, network RAID provides synchronous replication of volumes with automatic failover and load balancing across available storage clusters. trove-5.0.0/apidocs/src/xslts/0000775000567000056710000000000012701410521017373 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/xslts/js/0000775000567000056710000000000012701410521020007 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/xslts/js/trc/0000775000567000056710000000000012701410521020577 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/xslts/js/trc/schema/0000775000567000056710000000000012701410521022037 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/xslts/js/trc/schema/controller.js0000664000567000056710000001174712701410316024574 0ustar jenkinsjenkins00000000000000/** controller.js (C) 2009 Rackspace Hosting, All Rights Reserved This file definas a single object in global scope: trc.schema.controller The controller object is responsible for displaying a menu that allows users to view schema source and jump to various definitions in the schema. **/ // // Initialization code... // (function() { // // Make sure dependencies are defined in the global scope, throw // an error if they are not. // if ((!window.trc) || (!trc.util)) { throw new Error("Require trc/util.js to be loaded."); } // // We use YUI to build our controller menu make sure we have the // proper dependencies loaded, call init when we do... // function InitController() { trc.schema.controller._init(); } trc.util.yui.loadYUIDeps (["menu"], InitController); })(); if (!trc.schema) { trc.schema = new Object(); } trc.schema.controller = { // // Internal and external links by type: // // type --> array of links // // possible types include: import, include, element, // attribute, complextype, simpleType // // each link contains the following properties: // name : the name of the link // href : the link itself // title : a description of the link links : new Object(), // // A single link that points to the schema index document. // index : null, // // Our initialization function // _init : function() { // // Load the menu... // var controllerDiv = document.getElementById("Controller"); var mainMenu = this._menuMarkup("mainmenu"); for (var linkType in this.links) { var subItem = this._menuItemMarkup(mainMenu, linkType, "#", null); var subMenu = this._menuMarkup (linkType+"_subMenu"); var items = this.links[linkType]; for (var i=0;i Array of sample ids. // samples : new Object(), // // An array of code data.. // // Code data is defined as an object with the following // properties: // // type: The mimetype of the code...href: The location of the code // or null if it's inline // // id: The id of the pre that contains the code. // // The initial object is the source code for the current document. // codes : new Array({ id : "SrcContentCode", type : "application/xml", href : (function() { var ret = location.href; if (location.hash && (location.hash.length != 0)) { ret = ret.replace (location.hash, ""); } return ret; })() }), // // Sets up the manager, begins the loading process... // _init : function() { // // Setup an array to hold data items to load, this is used by // the loadSample method. // this._toLoad = new Array(); for (var i=0;i -1) && (ieVersion < 8)) { code = trc.util.text.unix2dos (code); } var pre = document.getElementById(codeData.id); var preNodes = pre.childNodes; // // Remove placeholder data... // while (preNodes.length != 0) { pre.removeChild (preNodes[0]); } // // Set the correct class type... // switch (codeData.type) { /* Javascript mimetypes */ case 'application/json': case 'application/javascript': case 'application/x-javascript': case 'application/ecmascript': case 'text/ecmascript': case 'text/javascript': trc.util.dom.setClassName (pre, "sh_javascript"); break; /* Not real mimetypes but this is what we'll use for Java. */ case 'application/java': case 'text/java': trc.util.dom.setClassName (pre, "sh_java"); break; default: trc.util.dom.setClassName (pre, "sh_xml"); break; } // // Add new code... // pre.appendChild (document.createTextNode (code)); }, // // Retrieves source code text // _getCodeText : function (codeData /* Info for the code to get*/) { var pre = document.getElementById(codeData.id); pre.normalize(); // // Should be a single text node after pre... // return pre.firstChild.nodeValue; }, // // Normalizes text by ensuring that top, bottom, right indent // levels are equal for all samples. // _normalizeCodeText : function (top, /* integer, top indent in lines */ bottom, /* integer, bottom indent in lines */ right /* integer, right indent in spaces */ ) { for (var i=0;i -1) && (ieVersion < 7)) { element.className = name; } else { element.setAttribute ("class",name); } } }; trc.util.text = { // // Useful RegExps // blank : new RegExp ("^\\s*$"), /* A blank string */ indent : new RegExp ("^\\s+"), /* Line indent */ lines : new RegExp ("$","m"), /* All lines */ linechars : new RegExp ("(\n|\r)"), /* EOL line characters */ tabs : new RegExp ("\t","g"), /* All tabs */ // // We need this because microsoft browsers before IE 7, cannot // display pre-formatted text correctly win unix style line // endings. // unix2dos : function(txt /* String */) { //if already DOS... if (txt.search(/\r\n/) != -1) { return txt; } return txt.replace (/\n/g, "\r\n"); }, // // Useful to normalize text. // dos2unix : function(txt /* String */) { //if already unix... if (txt.search(/\r\n/) == -1) { return txt; } return txt.replace(/\r/g, ""); }, // // Create a string with a character repeated x times. // repString : function (length, /* integer, size of the string to create */ ch /* string, The character to set the string to */ ) { var ret = new String(); for (var i=0;idep. // _deps : new Object(), // // An array of callback functions, these should be called when all // dependencies are loaded. // _callbacks : new Array(), // // The init function simply calls the YUI loader... // _init : function() { var yuiUtil = this; // // It takes safari a while to load the YUI Loader if it hasn't // loaded yet keep trying at 1/4 second intervals // if (!window.YAHOO) { window.setTimeout (function() { yuiUtil._init(); }, 250); return; } // // Collect requirements... // var required = new Array(); for (var req in this._deps) { required.push (req); } // // Load YUI dependencies... // var loader = new YAHOO.util.YUILoader({ require: required, loadOptional: true, filter: "RAW", onSuccess: function() { yuiUtil._depsLoaded(); }, timeout: 10000, combine: true }); loader.insert(); }, // // Called after all dependencies have been loaded // _depsLoaded : function() { // // Dependencies are loaded let everyone know. // for (var i=0;i|\|/g, 'sh_symbol', -1 ], [ /\{|\}/g, 'sh_cbracket', -1 ], [ /(?:[A-Za-z]|_)[A-Za-z0-9_]*(?=[ \t]*\()/g, 'sh_function', -1 ], [ /([A-Za-z](?:[^`~!@#$%&*()_=+{}|;:",<.>\/?'\\[\]\^\-\s]|[_])*)((?:<.*>)?)(\s+(?=[*&]*[A-Za-z][^`~!@#$%&*()_=+{}|;:",<.>\/?'\\[\]\^\-\s]*\s*[`~!@#$%&*()_=+{}|;:",<.>\/?'\\[\]\^\-\[\]]+))/g, ['sh_usertype', 'sh_usertype', 'sh_normal'], -1 ] ], [ [ /$/g, null, -2 ], [ /(?:?)|(?:?)/g, 'sh_url', -1 ], [ /<\?xml/g, 'sh_preproc', 2, 1 ], [ //g, 'sh_keyword', -1 ], [ /<(?:\/)?[A-Za-z](?:[A-Za-z0-9_:.-]*)/g, 'sh_keyword', 6, 1 ], [ /&(?:[A-Za-z0-9]+);/g, 'sh_preproc', -1 ], [ /<(?:\/)?[A-Za-z][A-Za-z0-9]*(?:\/)?>/g, 'sh_keyword', -1 ], [ /<(?:\/)?[A-Za-z][A-Za-z0-9]*/g, 'sh_keyword', 6, 1 ], [ /@[A-Za-z]+/g, 'sh_type', -1 ], [ /(?:TODO|FIXME|BUG)(?:[:]?)/g, 'sh_todo', -1 ] ], [ [ /\?>/g, 'sh_preproc', -2 ], [ /([^=" \t>]+)([ \t]*)(=?)/g, ['sh_type', 'sh_normal', 'sh_symbol'], -1 ], [ /"/g, 'sh_string', 3 ] ], [ [ /\\(?:\\|")/g, null, -1 ], [ /"/g, 'sh_string', -2 ] ], [ [ />/g, 'sh_preproc', -2 ], [ /([^=" \t>]+)([ \t]*)(=?)/g, ['sh_type', 'sh_normal', 'sh_symbol'], -1 ], [ /"/g, 'sh_string', 3 ] ], [ [ /-->/g, 'sh_comment', -2 ], [ //g, 'sh_comment', -2 ], [ //g, 'sh_comment', -2 ], [ / XML Schema Documentation application/xhtml+xml http://www.w3.org/2001/XMLSchema http://web4.w3.org/TR/2001/REC-xmlschema-2-20010502/# " ' element_ attrib_ attgrp_ grp_ type_ http://yui.yahooapis.com/2.7.0/build/ stylesheet text/css <xslt:value-of select="xsd:annotation/xsd:appinfo/xsdxt:title"/> <xslt:value-of select="$defaultTitle"/>
Loading...

Namespaces

Your browser does not seem to have support for namespace nodes in XPath. If you're a Firefox user, please consider voting to get this issue resolved: https://bugzilla.mozilla.org/show_bug.cgi?id=94270

trc.schema.controller.links[' ']=[ , ]; trc.schema.controller.index = index Index Schema Document ; trc.schema.controller.links[' ']=[ # See definition of , ]; { href : , name : , title : }

Imports

Visit

Includes

Visit

Elements

trc.schema.sampleManager.showSample( );
Sample
Loading...

Complex Types

Simple Types

#

extends: ,
restricts: ,
SubAttributes Attributes
SubDocumentation Documentation
Sequence
<?> (Any Element) @? (Any Attribute)
restriction
enum values
(id = )
(id = ) (fixed)
< > @
trove-5.0.0/apidocs/src/xslts/xslt/.htaccess0000664000567000056710000000013012701410316022157 0ustar jenkinsjenkins00000000000000AddType application/xml wadl AddType application/xml xsd AddType application/xml xslt trove-5.0.0/apidocs/src/xsd/0000775000567000056710000000000012701410521017014 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/xsd/dbaas.wadl0000664000567000056710000021676112701410316020756 0ustar jenkinsjenkins00000000000000 %common; ]> The account ID of the owner of the specified instance. The instance ID for the specified database instance. The name for the specified database. The name for the specified user. The name for the specified database. The flavor ID for the specified flavor. Returns detailed information about the specified version of the API. This operation returns detailed information about the specified version of the API. The following example shows the List Version Details requests: The following example shows the List Version Details responses: &commonFaults; &getFaults; Lists information about all versions of the API. This operation lists information about all versions of the API. The following example shows the List Versions requests: The following example shows the List Versions responses: &commonFaults; &getFaults; Creates a new database instance. This operation asynchronously provisions a new database instance. This call requires the user to specify a flavor and a volume size. The service then provisions the instance with the requested flavor and sets up a volume of the specified size, which is the storage for the database instance. Notes You can create only one database instance per POST request. You can create a database instance with one or more databases, and users associated to those databases. The default binding for the MySQL instance is port 3306. The following table lists the required and optional attributes for Create Instance:
Required and Optional Attributes for Create Instance
Applies To Name Description Required
Instance flavorRef Reference (href) to a flavor as specified in the response from the List Flavors API call. This is the actual URI as specified by the href field in the link. Refer to the List Flavors response examples that follow for an example of the flavorRef. Rather than the flavor URI, you can also pass the flavor id (integer) as the value for flavorRef. Refer to for details. Yes
(volume) size Specifies the volume size in gigabytes (GB). The value specified must be between 1 and 50. Yes
name Name of the instance to create. The length of the name is limited to 255 characters and any characters are permitted. No
Database name Specifies database names for creating databases on instance creation. Refer to for the required json format. No
character_set Set of symbols and encodings. The default character set is utf8. No
collate Set of rules for comparing characters in a character set. The default value for collate is utf8_general_ci. No
User name Specifies user name for the database on instance creation. Refer to for the required json format. No
password Specifies password for those users on instance creation. Refer to for the required json format. No
(database) name Specifies names of databases that those users can access on instance creation. Refer to for the required json format. No
Refer to for a list of possible database instance statuses that may be returned.
The following example shows the Create Database Instance requests and responses: For convenience, notice in the response examples above that resources contain links to themselves. This allows a client to easily obtain resource URIs rather than to construct them. There are two kinds of link relations associated with resources. A self link contains a versioned link to the resource. These links should be used in cases where the link will be followed immediately. A bookmark link provides a permanent link to a resource that is appropriate for long term storage. &commonFaults; &getFaults;
Deletes the specified database instance. This operation deletes the specified database instance, including any associated data. Refer to for a list of possible database instance statuses that may be returned. This operation is not allowed when the instance status is BUILD. The following example shows the Delete Database Instance requests: &commonFaults; The following example shows the Delete Database Instance responses: &getFaults; Lists the status and information for all database instances. This operation lists the status and information for all database instances. Refer to for a list of possible database instance statuses that may be returned. The following example shows the List All Database Instances Detail requests: The following example shows the List All Database Instances responses: &commonFaults; &getFaults; Lists status and details for a specified database instance. This operation lists the status and details of the specified database instance. This operation lists the volume size in gigabytes (GB) and the approximate GB used. After instance creation, the used size of your volume will be greater than 0. This is expected and due to the automatic creation of non-empty transaction logs for mysql optimization. The used attribute is not returned in the response when the status for the instance is BUILD, REBOOT, RESIZE, or ERROR. Refer to for a list of possible database instance statuses that may be returned. The list operations return a DNS-resolvable hostname associated with the database instance instead of an IP address. Since the hostname always resolves to the correct IP address of the database instance, this relieves the user from the task of maintaining the mapping. Note that although the IP address may likely change on resizing, migrating, and so forth, the hostname always resolves to the correct database instance. The following example shows the List Database Instance Status and Details requests: The following example shows the List Database Instance Status and Details responses: &commonFaults; &getFaults; Restart the database service on the instance. The restart operation restarts only the MySQL instance. Restarting MySQL erases any dynamic configuration settings that you have made within MySQL. The MySQL service is unavailable until the instance restarts. This operation returns a 202 Accepted response. The following example shows the Restart Instance requests: The following example shows the Restart Instance responses: &commonFaults; &getFaults; &postPutFaults; Resize the memory of the instance. This operation changes the memory size of the instance, assuming a valid flavorRef is provided. Restarts MySQL in the process. The following example shows the Resize Instance requests: The following example shows the Resize Instance responses: &commonFaults; &getFaults; &postPutFaults; Resize the volume attached to the Instance. This operation supports resizing the attached volume for an instance. It supports only increasing the volume size and does not support decreasing the size. The volume size is in gigabytes (GB) and must be an integer. You cannot increase the volume to a size larger than the API volume size limit specifies. This operation returns a 202 Accepted response. The following example shows the Resize Instance Volume requests: The following example shows the Resize Instance Volume responses: &commonFaults; &getFaults; &postPutFaults; Creates a new database within the specified instance. This operation creates a new database within the specified instance. The name of the database is a required attribute. The following additional attributes can be specified for each database: collate and character_set.
Required and Optional Attributes for Create Database
Name Description Required
name Specifies the database name for creating the database. Refer to the request examples for the required json format. Yes
character_set Set of symbols and encodings. The default character set is utf8. No
collate Set of rules for comparing characters in a character set. The default value for collate is utf8_general_ci. No
See the MySQL documentation for information about supported character sets and collations at http://dev.mysql.com/doc/refman/5.1/en/charset-mysql.html. The following database names are reserved and cannot be used for creating databases: lost+found, information_schema, and mysql. Refer to the following tables for information about characters that are valid/invalid for creating database names.
Valid Characters That Can Be Used in a Database Name
Character
Letters (upper and lower cases allowed)
Numbers
'@', '?', '#', and spaces are allowed, but not at the beginning and end of the database name
'_' is allowed anywhere in the database name
Characters That Cannot Be Used in a Database Name
Character
Single quotes
Double quotes
Back quotes
Semicolons
Commas
Backslashes
Forward slashes
Length Restrictions for Database Name
Restriction Value
Database-name maximum length 64
The following example shows the Create Database requests: The following example shows the Create Database responses: &commonFaults; &getFaults;
Lists databases for the specified instance. This operation lists the databases for the specified instance. This operation returns only the user-defined databases, not the system databases. The system databases (mysql, information_schema, lost+found) can only be viewed by a database administrator. The following example shows the List Databases for Instance requests: The following example shows the paginated List Databases for Instance requests: The following example shows the List Databases for Instance responses: The following example shows the paginated List Databases for Instance responses: &commonFaults; &getFaults; Deletes the specified database. This operation deletes the requested database within the specified database instance. Note that all data associated with the database is also deleted. The following example shows the Delete Database requests: The following example shows the Delete Database responses: &commonFaults; &getFaults; Creates a user for the specified database instance. This operation asynchronously provisions a new user for the specified database instance based on the configuration defined in the request object. Once the request is validated and progress has started on the provisioning process, a 202 Accepted response object is returned. Writer: please add the following note back into the doc once the List User Details call is added back into the API: Using the identifier, the caller can check on the progress of the operation by performing a GET on users/name (for more details on this operation see the "List User Details" section of this document). If the corresponding request cannot be fulfilled due to insufficient or invalid data, an HTTP 400 "Bad Request" error response is returned with information regarding the nature of the failure. Failures in the validation process are non-recoverable and require the caller to correct the cause of the failure and POST the request again. The following table lists the required attributes for Create User. Refer to the request examples for the required json format:
Required Attributes for Create User
Applies To Name Description Required
User name Name of the user for the database. Yes
password User password for database access. Yes
(database) name Name of the database that the user can access. One or more database names must be specified. No
Notes A user is granted all privileges on the specified databases. The following user name is reserved and cannot be used for creating users: root. Refer to the following tables for information about characters that are valid/invalid for creating database names, user names, and passwords.
Valid Characters That Can Be Used in a Database Name, User Name, and Password
Character
Letters (upper and lower cases allowed)
Numbers
'@', '?', '#', and spaces are allowed, but not at the beginning and end of the database name, user name, and password
"_" is allowed anywhere in the database name, user name, and password
Characters That Cannot Be Used in a Database Name, User Name, and Password
Character
Single quotes
Double quotes
Back quotes
Semicolons
Commas
Backslashes
Forward slashes
Spaces at the front or end of the user name or password
Length Restrictions for Database Name, User Name, and Password
Restriction Value
Database name maximum length 64
User name maximum length 16
Password maximum length unlimited (no restrictions)
The following example shows the Create User requests: The following example shows the Create User responses: &commonFaults; &getFaults;
Lists the users in the specified database instance. This operation lists the users in the specified database instance, along with the associated databases for that user. This operation does not return the system users (database administrators that administer the health of the database). Also, this operation returns the "root" user only if "root" user has been enabled. The following notes apply to MySQL users: User names can be up to 16 characters long. When you create accounts with INSERT, you must use FLUSH PRIVILEGES to tell the server to reload the grant tables. For additional information, refer to: http://dev.mysql.com/doc/refman/5.1/en/user-account-management.html The following example shows the List Users in Database Instance requests: The following example shows the paginated List Users in Database Instance requests: The following examples show the List Users in Database Instance responses: The following example shows the paginated List Users in Database Instance responses: &commonFaults; &getFaults; Changes the MySQL password of one or more users. This operation changes the MySQL password of one or more users. For information about choosing a valid password, please refer to for details. The following example shows the Change User(s) Password requests: The following examples show the Change User(s) Password responses: &commonFaults; &getFaults; Deletes the user identified by {name} for the specified database instance. This operation deletes the specified user for the specified database instance. There is a bug in a python library that development is using that may cause incorrect user deletions to occur if a period (.) is used in the user name. In this case, the user name is truncated to remove the portion of the name from the period to the end, leaving only the portion from the beginning up to the period. For example, for a user named "my.userA", the bug would truncate the user name to "my", and if the user "my" exists, that user will be incorrectly deleted. To avoid the problem, do not use periods in user names. The following example shows the Delete User requests: The following example shows the Delete User responses: &commonFaults; &getFaults; Lists the specified user's name and a list of databases that the user can access. This operation lists the specified user's name and a list of databases that the user can access. The following example shows the List User requests: The following examples show the List User responses: &commonFaults; &getFaults; Shows a list of all databases a user has access to. This operation shows a list of all databases a user has access to. The following example shows the List User Access requests: The following example shows the List User Access responses: &commonFaults; &getFaults; Grant access for the specified user to one or more databases for the specified instance. This operation grants access for the specified user to one or more databases for the specified instance. The user is granted ALL privileges on the database. Refer to the information at the beginning of for more details on access. The following example shows the Grant User Access requests: The following example shows the Grant User Access responses: &commonFaults; &getFaults; Remove access to the specified database for the specified user. This operation removes access to the specified database for the specified user. The following example shows the Revoke User Access requests: The following example shows the Revoke User Access responses: &commonFaults; &getFaults; Enables the root user for the specified database instance and returns the root password. This operation enables login from any host for the root user and provides the user with a generated root password. Changes you make as a root user may cause detrimental effects to the database instance and unpredictable behavior for API operations. When you enable the root user, you accept the possibility that we will not be able to support your database instance. While enabling root does not prevent us from a “best effort†approach to helping you if something goes wrong with your instance, we cannot ensure that we will be able to assist you if you change core MySQL settings. These changes can be (but are not limited to) turning off binlogs, removing users that we use to access your instance, and so forth. The following example shows the Enable Root User requests: The following example shows the Enable Root User responses: &commonFaults; &getFaults; Returns true if root user is enabled for the specified database instance or false otherwise. This operation checks an active specified database instance to see if root access is enabled. It returns True if root user is enabled for the specified database instance or False otherwise. The following example shows the Check Root User Access requests: The following example shows the Check Root User Access responses: &commonFaults; &getFaults; Lists information for all available flavors. This operation lists information for all available flavors. This resource is identical to the flavors found in the OpenStack Nova API, but without the disk property. The following example shows the List Flavors requests: The following example shows the List Flavors responses: &commonFaults; &getFaults; Lists all flavor information about the specified flavor ID. This operation lists all information for the specified flavor ID with details of the RAM. This resource is identical to the flavors found in the OpenStack Nova API, but without the disk property. The flavorId parameter should be an integer. If a floating point value is used for the flavorId parameter, the decimal portion is truncated and the integer portion is used as the value of the flavorId. Reviewer / Writer: need to confirm that this behavior is not changed in subsequent releases, and if it is prevented, remove the Note above. The following example shows the List Flavor By ID requests: The following example shows the List Flavor By ID responses: &commonFaults; &getFaults;
trove-5.0.0/apidocs/src/xsd/common.ent0000664000567000056710000000647112701410316021026 0ustar jenkinsjenkins00000000000000 GET'> PUT'> POST'> DELETE'> '> '> '> trove-5.0.0/apidocs/src/xsd/dbaas.xsd0000664000567000056710000005251312701410316020616 0ustar jenkinsjenkins00000000000000 DBaaS

This is the main index XML Schema document for DBaaS API Schema Types Version 1.0.

A MySQL User.

A MySQL User.

A Database Instance.

Database Instances.

Settings such as memory used to create a database instance.

A list of flavors.

A Database.

List of Databases.

Restart the Database

Resize an Instance

A list of database user names.

A list of databases.

The name for the user.

The password for the user.

The status of the user.

A list of database instances.

A list of optional databases.

A list of optional users.

The links for the type of instance.

The volume attached to the instance.

A unique database instance id.

Date/Time the instance was created.

Date/Time the instance was last updated.

The name for the instance.

The flavor reference of the instance.

The status of the instance.

The hostname attached to the instance.

Whether or not root is enabled for the instance.

The type of link.

The URL.

The type of link.

A list of flavors.

A list of links.

A unique flavor id.

The name for the instance.

The RAM in megabytes.

A list of links.

A list of databases.

The Database character set.

The name for the instance.

The Collation type of the database.

Whether or not root is enabled for the given instance.

Size of the volume in GBs.

Used space on the attached volume in GBs.

A unique flavor id.

The volume attached to the instance.

New flavorRef to size the instance.

The instance status.

Healthy status.

Down status.

unavailable status.

The mysql user status.

Enabled status.

A human readable message that is appropriate for display to the end user.

The optional <details> element may contain useful information for tracking down errors (e.g a stack trace). This information may or may not be appropriate for display to an end user.

The HTTP status code associated with the current fault.

An optional dateTime denoting when an operation should be retried.

trove-5.0.0/apidocs/src/samples/0000775000567000056710000000000012701410521017662 5ustar jenkinsjenkins00000000000000trove-5.0.0/apidocs/src/samples/db-datastore-by-id-request-json.txt0000664000567000056710000000034612701410316026440 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/datastores/a00000a0-00a0-0a00-00a0-000a000000aa HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-backup-create-response-json.txt0000664000567000056710000000015612701410316026323 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 404 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-version-response-json.txt0000664000567000056710000000014612701410316025301 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 150 Date: Wed, 25 Jan 2012 21:53:04 GMTtrove-5.0.0/apidocs/src/samples/db-datastores-list-request-json.txt0000664000567000056710000000030112701410316026561 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/datastores HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instance-resize-flavor-request.json0000664000567000056710000000006212701410316027220 0ustar jenkinsjenkins00000000000000{ "resize": { "flavorRef": 3 } } trove-5.0.0/apidocs/src/samples/db-datastores-list-response.json0000664000567000056710000000236512701410316026126 0ustar jenkinsjenkins00000000000000{ "datastores": [ { "default_version": "b00000b0-00b0-0b00-00b0-000b000000bb", "id": "a00000a0-00a0-0a00-00a0-000a000000aa", "links": [ { "href": "https://troveapi.org/v1.0/1234/datastores/a00000a0-00a0-0a00-00a0-000a000000aa", "rel": "self" }, { "href": "https://troveapi.org/datastores/a00000a0-00a0-0a00-00a0-000a000000aa", "rel": "bookmark" } ], "name": "mysql", "versions": [ { "id": "b00000b0-00b0-0b00-00b0-000b000000bb", "links": [ { "href": "https://troveapi.org/v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", "rel": "self" }, { "href": "https://troveapi.org/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", "rel": "bookmark" } ], "name": "5.5" } ] } ] } trove-5.0.0/apidocs/src/samples/db-get-default-instance-configuration-request-json.txt0000664000567000056710000000036312701410316032317 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/configuration HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-mgmt-instance-diagnostics-response-json.txt0000664000567000056710000000015012701410316030662 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 125 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-detach-from-instance-response-json.txt0000664000567000056710000000015412701410316032453 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-backup-list-response.json0000664000567000056710000000252612701410316025221 0ustar jenkinsjenkins00000000000000{ "backups": [ { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Backup", "id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": "http://localhost/path/to/backup", "name": "snapshot", "parent_id": null, "size": 0.14, "status": "COMPLETED", "updated": "2014-10-30T12:30:00" }, { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Incremental Backup", "id": "2e351a71-dd28-4bcb-a7d6-d36a5b487173", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": "http://localhost/path/to/backup", "name": "Incremental Snapshot", "parent_id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "size": 0.14, "status": "COMPLETED", "updated": "2014-10-30T12:30:00" } ] } trove-5.0.0/apidocs/src/samples/db-change-users-password-response-json.txt0000664000567000056710000000015412701410316030037 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 152 Date: Wed, 21 Mar 2012 17:46:46 GMTtrove-5.0.0/apidocs/src/samples/db-configuration-parameter-for-datastore-version-request-json.txt0000664000567000056710000000046012701410316034525 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/datastores/a00000a0-00a0-0a00-00a0-000a000000aa/versions/b00000b0-00b0-0b00-00b0-000b000000bb/parameters/collation_server HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-create-databases-request.json0000664000567000056710000000041312701410316026016 0ustar jenkinsjenkins00000000000000{ "databases": [ { "character_set": "utf8", "collate": "utf8_general_ci", "name": "testingdb" }, { "name": "anotherdb" }, { "name": "oneMoreDB" } ] } trove-5.0.0/apidocs/src/samples/db-enable-root-user-response.json0000664000567000056710000000011512701410316026156 0ustar jenkinsjenkins00000000000000{ "user": { "name": "root", "password": "12345" } } trove-5.0.0/apidocs/src/samples/db-grant-user-access-response-json.txt0000664000567000056710000000015212701410316027137 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Wed, 27 Jun 2012 23:11:19 GMTtrove-5.0.0/apidocs/src/samples/db-backup-create-request-json.txt0000664000567000056710000000027712701410316026161 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/backups HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-create-instance-response-json.txt0000664000567000056710000000015012701410316026654 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 697 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-get-host-detail-response-json.txt0000664000567000056710000000015012701410316027543 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 302 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-flavors-by-id-response-json.txt0000664000567000056710000000015012701410316026265 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 214 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-instance-restart-request.json0000664000567000056710000000002712701410316026115 0ustar jenkinsjenkins00000000000000{ "restart": {} } trove-5.0.0/apidocs/src/samples/db-instance-restart-response-json.txt0000664000567000056710000000015412701410316027101 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-datastore-versions-list-response.json0000664000567000056710000000114112701410316027600 0ustar jenkinsjenkins00000000000000{ "versions": [ { "datastore": "a00000a0-00a0-0a00-00a0-000a000000aa", "id": "b00000b0-00b0-0b00-00b0-000b000000bb", "links": [ { "href": "https://troveapi.org/v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", "rel": "self" }, { "href": "https://troveapi.org/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", "rel": "bookmark" } ], "name": "5.5" } ] } trove-5.0.0/apidocs/src/samples/db-revoke-user-access-response-json.txt0000664000567000056710000000015212701410316027317 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Wed, 27 Jun 2012 23:11:19 GMTtrove-5.0.0/apidocs/src/samples/db-configuration-parameter-for-datastore-version-response-json.txt0000664000567000056710000000015012701410316034667 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 147 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-detach-from-instance-request.json0000664000567000056710000000007112701410316031466 0ustar jenkinsjenkins00000000000000{ "instance": { "configuration": "" } } trove-5.0.0/apidocs/src/samples/db-backup-create-incremental-response.json0000664000567000056710000000114212701410316030001 0ustar jenkinsjenkins00000000000000{ "backup": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Incremental Backup", "id": "2e351a71-dd28-4bcb-a7d6-d36a5b487173", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": null, "name": "Incremental Snapshot", "parent_id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "size": null, "status": "NEW", "updated": "2014-10-30T12:30:00" } } trove-5.0.0/apidocs/src/samples/db-mgmt-instance-index-response.json0000664000567000056710000000360312701410316026653 0ustar jenkinsjenkins00000000000000{ "instances": [ { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5" }, "deleted": false, "deleted_at": null, "flavor": { "id": "3", "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/3", "rel": "self" }, { "href": "https://troveapi.org/flavors/3", "rel": "bookmark" } ] }, "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "self" }, { "href": "https://troveapi.org/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "bookmark" } ], "name": "json_rack_instance", "server": { "deleted": false, "deleted_at": null, "host": "hostname_1", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "local_id": 0, "name": "44b277eb-39be-4921-be31-3d61b43651d7-lay", "status": "ACTIVE", "tenant_id": "3000" }, "service_status": "ACTIVE", "status": "ACTIVE", "task_description": "No tasks for the instance.", "tenant_id": "3000", "updated": "2014-10-30T12:30:00", "volume": { "size": 4 } } ] } trove-5.0.0/apidocs/src/samples/db-flavors-response.json0000664000567000056710000001326312701410316024457 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "id": 1, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://troveapi.org/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "str_id": "1" }, { "id": 2, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/2", "rel": "self" }, { "href": "https://troveapi.org/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "str_id": "2" }, { "id": 3, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/3", "rel": "self" }, { "href": "https://troveapi.org/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "str_id": "3" }, { "id": 4, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/4", "rel": "self" }, { "href": "https://troveapi.org/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "str_id": "4" }, { "id": 5, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/5", "rel": "self" }, { "href": "https://troveapi.org/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "str_id": "5" }, { "id": 6, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/6", "rel": "self" }, { "href": "https://troveapi.org/flavors/6", "rel": "bookmark" } ], "name": "m1.nano", "ram": 64, "str_id": "6" }, { "id": 7, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/7", "rel": "self" }, { "href": "https://troveapi.org/flavors/7", "rel": "bookmark" } ], "name": "m1.micro", "ram": 128, "str_id": "7" }, { "id": 8, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/8", "rel": "self" }, { "href": "https://troveapi.org/flavors/8", "rel": "bookmark" } ], "name": "m1.rd-smaller", "ram": 768, "str_id": "8" }, { "id": 9, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/9", "rel": "self" }, { "href": "https://troveapi.org/flavors/9", "rel": "bookmark" } ], "name": "tinier", "ram": 506, "str_id": "9" }, { "id": 10, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/10", "rel": "self" }, { "href": "https://troveapi.org/flavors/10", "rel": "bookmark" } ], "name": "m1.rd-tiny", "ram": 512, "str_id": "10" }, { "id": 11, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/11", "rel": "self" }, { "href": "https://troveapi.org/flavors/11", "rel": "bookmark" } ], "name": "eph.rd-tiny", "ram": 512, "str_id": "11" }, { "id": 12, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/12", "rel": "self" }, { "href": "https://troveapi.org/flavors/12", "rel": "bookmark" } ], "name": "eph.rd-smaller", "ram": 768, "str_id": "12" }, { "id": null, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/custom", "rel": "self" }, { "href": "https://troveapi.org/flavors/custom", "rel": "bookmark" } ], "name": "custom.small", "ram": 512, "str_id": "custom" } ] } trove-5.0.0/apidocs/src/samples/db-create-databases-response-json.txt0000664000567000056710000000015412701410316027003 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-edit-parameters-request.json0000664000567000056710000000014312701410316030561 0ustar jenkinsjenkins00000000000000{ "configuration": { "values": { "connect_timeout": 300 } } } trove-5.0.0/apidocs/src/samples/db-mgmt-get-root-details-request-json.txt0000664000567000056710000000035712701410316027577 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/instances/44b277eb-39be-4921-be31-3d61b43651d7/root HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instances-index-pagination-response-json.txt0000664000567000056710000000015112701410316031033 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 1251 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-instances-index-request-json.txt0000664000567000056710000000030012701410316026532 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-restore-delete-request-json.txt0000664000567000056710000000035012701410316026366 0ustar jenkinsjenkins00000000000000DELETE /v1.0/1234/instances/d5a9db64-7ef7-41c5-8e1e-4013166874bc HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instance-resize-instance-request.json0000664000567000056710000000016112701410316027533 0ustar jenkinsjenkins00000000000000{ "resize": { "flavorRef": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/2" } }trove-5.0.0/apidocs/src/samples/db-backup-delete-response-json.txt0000664000567000056710000000015412701410316026320 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-create-response.json0000664000567000056710000000105712701410316027111 0ustar jenkinsjenkins00000000000000{ "configuration": { "created": "2014-10-30T12:30:00", "datastore_name": "mysql", "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "datastore_version_name": "5.5", "description": "example description", "id": "43a6ea86-e959-4735-9e46-a6a5d4a2d80f", "instance_count": 0, "name": "example-configuration-name", "updated": "2014-10-30T12:30:00", "values": { "collation_server": "latin1_swedish_ci", "connect_timeout": 120 } } } trove-5.0.0/apidocs/src/samples/db-list-user-dbs-response-json.txt0000664000567000056710000000014412701410316026307 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 0 Date: Wed, 27 Jun 2012 23:11:19 GMTtrove-5.0.0/apidocs/src/samples/db-get-default-instance-configuration-response.json0000664000567000056710000000307512701410316031653 0ustar jenkinsjenkins00000000000000{ "instance": { "configuration": { "basedir": "/usr", "connect_timeout": "15", "datadir": "/var/lib/mysql/data", "default_storage_engine": "innodb", "innodb_buffer_pool_size": "150M", "innodb_data_file_path": "ibdata1:10M:autoextend", "innodb_file_per_table": "1", "innodb_log_buffer_size": "25M", "innodb_log_file_size": "50M", "innodb_log_files_in_group": "2", "join_buffer_size": "1M", "key_buffer_size": "50M", "local-infile": "0", "max_allowed_packet": "1024K", "max_connections": "100", "max_heap_table_size": "16M", "max_user_connections": "100", "myisam-recover": "BACKUP", "open_files_limit": "512", "pid_file": "/var/run/mysqld/mysqld.pid", "port": "3306", "query_cache_limit": "1M", "query_cache_size": "8M", "query_cache_type": "1", "read_buffer_size": "512K", "read_rnd_buffer_size": "512K", "server_id": "271898715", "skip-external-locking": "1", "sort_buffer_size": "1M", "table_definition_cache": "256", "table_open_cache": "256", "thread_cache_size": "4", "thread_stack": "192K", "tmp_table_size": "16M", "tmpdir": "/var/tmp", "user": "mysql", "wait_timeout": "120", "performance_schema": "ON" } } } trove-5.0.0/apidocs/src/samples/db-instance-resize-instance-response-json.txt0000664000567000056710000000016412701410316030521 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: text/plain; charset=UTF-8 Content-Length: 58 Date: Mon, 06 Feb 2012 21:28:10 GMTtrove-5.0.0/apidocs/src/samples/db-delete-databases-response-json.txt0000664000567000056710000000015412701410316027002 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-instance-diagnostics-response.json0000664000567000056710000000030312701410316030045 0ustar jenkinsjenkins00000000000000{ "diagnostics": { "fdSize": 64, "threads": 2, "version": "1", "vmHwm": 2872, "vmPeak": 29160, "vmRss": 2872, "vmSize": 29096 } } trove-5.0.0/apidocs/src/samples/db-mgmt-get-storage-response-json.txt0000664000567000056710000000015012701410316026772 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 177 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-list-hosts-request-json.txt0000664000567000056710000000030112701410316026512 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/hosts HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-list-databases-pagination-response-json.txt0000664000567000056710000000015012701410316030636 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 192 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-backup-get-request-json.txt0000664000567000056710000000034312701410316025467 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/backups/a9832168-7541-4536-b8d9-a8a9b79cf1b4 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-mgmt-get-instance-details-request-json.txt0000664000567000056710000000035212701410316030413 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/instances/44b277eb-39be-4921-be31-3d61b43651d7 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-backups-by-instance-response.json0000664000567000056710000000252612701410316026645 0ustar jenkinsjenkins00000000000000{ "backups": [ { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Backup", "id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": "http://localhost/path/to/backup", "name": "snapshot", "parent_id": null, "size": 0.14, "status": "COMPLETED", "updated": "2014-10-30T12:30:00" }, { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Incremental Backup", "id": "2e351a71-dd28-4bcb-a7d6-d36a5b487173", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": "http://localhost/path/to/backup", "name": "Incremental Snapshot", "parent_id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "size": 0.14, "status": "COMPLETED", "updated": "2014-10-30T12:30:00" } ] } trove-5.0.0/apidocs/src/samples/db-grant-user-access-request-json.txt0000664000567000056710000000043212701410316026772 0ustar jenkinsjenkins00000000000000PUT /v1.0/1234/instances/692d8418-7a8f-47f1-8060-59846c6e024f/users/exampleuser/databases HTTP/1.1 User-Agent: python-example-client Host: ord.databases.api.rackspacecloud.com X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/jsontrove-5.0.0/apidocs/src/samples/db-mgmt-list-hosts-response-json.txt0000664000567000056710000000015012701410316026662 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 101 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-create-request.json0000664000567000056710000000062012701410316026736 0ustar jenkinsjenkins00000000000000{ "configuration": { "datastore": { "type": "a00000a0-00a0-0a00-00a0-000a000000aa", "version": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "example description", "name": "example-configuration-name", "values": { "collation_server": "latin1_swedish_ci", "connect_timeout": 120 } } } trove-5.0.0/apidocs/src/samples/db-configuration-parameter-without-datastore-version-response.json0000664000567000056710000000025212701410316034772 0ustar jenkinsjenkins00000000000000{ "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "min": 0, "name": "collation_server", "restart_required": false, "type": "string" } trove-5.0.0/apidocs/src/samples/db-configuration-parameters-without-datastore-version-response.json0000664000567000056710000000301512701410316035155 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "min": 0, "name": "collation_server", "restart_required": false, "type": "string" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 65535, "min": 0, "name": "connect_timeout", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 57671680, "min": 0, "name": "innodb_buffer_pool_size", "restart_required": true, "type": "integer" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 4294967296, "min": 0, "name": "join_buffer_size", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 4294967296, "min": 0, "name": "key_buffer_size", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 1, "min": 0, "name": "local_infile", "restart_required": false, "type": "integer" } ] } trove-5.0.0/apidocs/src/samples/db-datastore-by-id-response-json.txt0000664000567000056710000000015012701410316026577 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 678 Date: Mon, 18 Mar 2013 19:09:17 GMT ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000trove-5.0.0/apidocs/src/samples/db-configuration-parameter-without-datastore-version-request-json.txttrove-5.0.0/apidocs/src/samples/db-configuration-parameter-without-datastore-version-request-json.tx0000664000567000056710000000041312701410316035254 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb/parameters/collation_server HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-get-default-instance-configuration-response-json.txt0000664000567000056710000000015112701410316032460 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 1110 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-instance-reboot-response-json.txt0000664000567000056710000000015412701410316026707 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-get-storage-request-json.txt0000664000567000056710000000030312701410316026624 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/storage HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-delete-request-json.txt0000664000567000056710000000035512701410316027557 0ustar jenkinsjenkins00000000000000DELETE /v1.0/1234/configurations/43a6ea86-e959-4735-9e46-a6a5d4a2d80f HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instance-resize-flavor-response-json.txt0000664000567000056710000000015412701410316030205 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-backup-create-incremental-response-json.txt0000664000567000056710000000015612701410316030622 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 462 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-instance-status-detail-request-json.txt0000664000567000056710000000034512701410316030034 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-mgmt-instance-index-response-json.txt0000664000567000056710000000015112701410316027463 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 1082 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-list-users-pagination-response.json0000664000567000056710000000127212701410316027241 0ustar jenkinsjenkins00000000000000{ "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/users?limit=2&marker=dbuser2%2540%2525", "rel": "next" } ], "users": [ { "databases": [ { "name": "databaseA" } ], "host": "%", "name": "dbuser1" }, { "databases": [ { "name": "databaseB" }, { "name": "databaseC" } ], "host": "%", "name": "dbuser2" } ] } trove-5.0.0/apidocs/src/samples/db-create-databases-request-json.txt0000664000567000056710000000036012701410316026634 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/databases HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000trove-5.0.0/apidocs/src/samples/db-configuration-parameter-without-datastore-version-response-json.txttrove-5.0.0/apidocs/src/samples/db-configuration-parameter-without-datastore-version-response-json.t0000664000567000056710000000015012701410316035230 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 147 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-backup-list-response-json.txt0000664000567000056710000000015012701410316026025 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 929 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-list-users-pagination-response-json.txt0000664000567000056710000000015012701410316030050 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 336 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-parameters-for-datastore-version-response.json0000664000567000056710000000301512701410316034240 0ustar jenkinsjenkins00000000000000{ "configuration-parameters": [ { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "min": 0, "name": "collation_server", "restart_required": false, "type": "string" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 65535, "min": 0, "name": "connect_timeout", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 57671680, "min": 0, "name": "innodb_buffer_pool_size", "restart_required": true, "type": "integer" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 4294967296, "min": 0, "name": "join_buffer_size", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 4294967296, "min": 0, "name": "key_buffer_size", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "max": 1, "min": 0, "name": "local_infile", "restart_required": false, "type": "integer" } ] } trove-5.0.0/apidocs/src/samples/db-list-users-request-json.txt0000664000567000056710000000035312701410316025560 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/users HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instance-restart-request-json.txt0000664000567000056710000000035512701410316026736 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/action HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-list-instances-request-json.txt0000664000567000056710000000036412701410316031255 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/configurations/43a6ea86-e959-4735-9e46-a6a5d4a2d80f/instances HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-mgmt-get-account-details-request-json.txt0000664000567000056710000000031112701410316030236 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/accounts/3000 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-delete-instance-response-json.txt0000664000567000056710000000015412701410316026657 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-backup-restore-response.json0000664000567000056710000000223112701410316025722 0ustar jenkinsjenkins00000000000000{ "instance": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5" }, "flavor": { "id": "1", "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://troveapi.org/flavors/1", "rel": "bookmark" } ] }, "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com", "id": "d5a9db64-7ef7-41c5-8e1e-4013166874bc", "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/d5a9db64-7ef7-41c5-8e1e-4013166874bc", "rel": "self" }, { "href": "https://troveapi.org/instances/d5a9db64-7ef7-41c5-8e1e-4013166874bc", "rel": "bookmark" } ], "name": "backup_instance", "status": "BUILD", "updated": "2014-10-30T12:30:00", "volume": { "size": 2 } } } trove-5.0.0/apidocs/src/samples/db-configuration-list-request-json.txt0000664000567000056710000000030512701410316027263 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/configurations HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-change-user-attributes-request.json0000664000567000056710000000013412701410316027213 0ustar jenkinsjenkins00000000000000{ "user": { "name": "new_username", "password": "new_password" } } trove-5.0.0/apidocs/src/samples/db-grant-user-access-request.json0000664000567000056710000000020512701410316026153 0ustar jenkinsjenkins00000000000000{ "databases": [ { "name": "databaseC" }, { "name": "databaseD" } ] }trove-5.0.0/apidocs/src/samples/db-instances-index-pagination-response.json0000664000567000056710000000465712701410316030235 0ustar jenkinsjenkins00000000000000{ "instances": [ { "datastore": { "type": "mysql", "version": "5.5" }, "flavor": { "id": "1", "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://troveapi.org/flavors/1", "rel": "bookmark" } ] }, "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com", "id": "43a6ea86-e959-4735-9e46-a6a5d4a2d80f", "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/43a6ea86-e959-4735-9e46-a6a5d4a2d80f", "rel": "self" }, { "href": "https://troveapi.org/instances/43a6ea86-e959-4735-9e46-a6a5d4a2d80f", "rel": "bookmark" } ], "name": "The Third Instance", "status": "ACTIVE", "volume": { "size": 2 } }, { "datastore": { "type": "mysql", "version": "5.5" }, "flavor": { "id": "1", "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://troveapi.org/flavors/1", "rel": "bookmark" } ] }, "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "self" }, { "href": "https://troveapi.org/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "bookmark" } ], "name": "json_rack_instance", "status": "ACTIVE", "volume": { "size": 2 } } ] } trove-5.0.0/apidocs/src/samples/db-mgmt-list-accounts-response-json.txt0000664000567000056710000000014712701410316027347 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 50 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-instances-index-pagination-request-json.txt0000664000567000056710000000031012701410316030662 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances?limit=2 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-list-user-access-response-json.txt0000664000567000056710000000014412701410316027000 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 0 Date: Wed, 27 Jun 2012 23:11:19 GMTtrove-5.0.0/apidocs/src/samples/db-faults-badRequest.json0000664000567000056710000000022412701410316024533 0ustar jenkinsjenkins00000000000000{ "badRequest": { "code": 400, "message": "Volume 'size' needs to be a positive integer value, -1.0 cannot be accepted." } }trove-5.0.0/apidocs/src/samples/db-backups-by-instance-response-json.txt0000664000567000056710000000015012701410316027451 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 929 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-backups-by-instance-request-json.txt0000664000567000056710000000035512701410316027312 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/backups HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-list-users-pagination-request-json.txt0000664000567000056710000000036312701410316027710 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/users?limit=2 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-create-response-json.txt0000664000567000056710000000015012701410316027717 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 431 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-faults-itemNotFound.json0000664000567000056710000000015212701410316025047 0ustar jenkinsjenkins00000000000000{ "itemNotFound": { "code": 404, "message": "The resource could not be found." } }trove-5.0.0/apidocs/src/samples/db-instance-resize-flavor-request-json.txt0000664000567000056710000000035512701410316030042 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/action HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instance-resize-volume-request-json.txt0000664000567000056710000000035512701410316030060 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/action HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instances-index-response-json.txt0000664000567000056710000000015012701410316026703 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 633 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-flavors-by-id-response.json0000664000567000056710000000060512701410316025455 0ustar jenkinsjenkins00000000000000{ "flavor": { "id": 1, "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://troveapi.org/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "str_id": "1" } } trove-5.0.0/apidocs/src/samples/db-delete-databases-request-json.txt0000664000567000056710000000037412701410316026640 0ustar jenkinsjenkins00000000000000DELETE /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/databases/testingdb HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-backup-create-incremental-request-json.txt0000664000567000056710000000027712701410316030460 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/backups HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-edit-parameters-response-json.txt0000664000567000056710000000014612701410316031547 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-get-root-details-response.json0000664000567000056710000000023012701410316027116 0ustar jenkinsjenkins00000000000000{ "root_history": { "enabled": "2014-10-30T12:30:00", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "user": "3000" } } trove-5.0.0/apidocs/src/samples/db-backup-get-response.json0000664000567000056710000000111312701410316025014 0ustar jenkinsjenkins00000000000000{ "backup": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Backup", "id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": "http://localhost/path/to/backup", "name": "snapshot", "parent_id": null, "size": 0.14, "status": "COMPLETED", "updated": "2014-10-30T12:30:00" } } trove-5.0.0/apidocs/src/samples/db-flavors-response-json.txt0000664000567000056710000000015112701410316025264 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 2730 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-create-instance-request-json.txt0000664000567000056710000000030112701410316026504 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instance-resize-volume-request.json0000664000567000056710000000011712701410316027237 0ustar jenkinsjenkins00000000000000{ "resize": { "volume": { "size": 4 } } } trove-5.0.0/apidocs/src/samples/db-list-databases-pagination-request-json.txt0000664000567000056710000000036712701410316030502 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/databases?limit=1 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-change-user-attributes-response-json.txt0000664000567000056710000000015412701410316030200 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-create-users-request.json0000664000567000056710000000131512701410316025232 0ustar jenkinsjenkins00000000000000{ "users": [ { "databases": [ { "name": "databaseA" } ], "name": "dbuser1", "password": "password" }, { "databases": [ { "name": "databaseB" }, { "name": "databaseC" } ], "name": "dbuser2", "password": "password" }, { "databases": [ { "name": "databaseD" } ], "name": "dbuser3", "password": "password" } ] } trove-5.0.0/apidocs/src/samples/db-instance-reboot-request.json0000664000567000056710000000002612701410316025722 0ustar jenkinsjenkins00000000000000{ "reboot": {} } trove-5.0.0/apidocs/src/samples/db-mgmt-get-instance-details-response-json.txt0000664000567000056710000000015112701410316030556 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 1533 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-restore-delete-response-json.txt0000664000567000056710000000015412701410316026536 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-list-hosts-response.json0000664000567000056710000000030512701410316026047 0ustar jenkinsjenkins00000000000000{ "hosts": [ { "instanceCount": 1, "name": "hostname_1" }, { "instanceCount": 0, "name": "hostname_2" } ] } trove-5.0.0/apidocs/src/samples/db-backup-create-incremental-request.json0000664000567000056710000000035312701410316027636 0ustar jenkinsjenkins00000000000000{ "backup": { "description": "My Incremental Backup", "instance": "44b277eb-39be-4921-be31-3d61b43651d7", "name": "Incremental Snapshot", "parent_id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4" } } trove-5.0.0/apidocs/src/samples/db-configuration-attach-to-instance-response-json.txt0000664000567000056710000000015412701410316032146 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-instance-resize-volume-response-json.txt0000664000567000056710000000015412701410316030223 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-get-account-details-response-json.txt0000664000567000056710000000014712701410316030413 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 85 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-list-accounts-response.json0000664000567000056710000000015212701410316026526 0ustar jenkinsjenkins00000000000000{ "accounts": [ { "id": "3000", "num_instances": 1 } ] } trove-5.0.0/apidocs/src/samples/db-disable-root-user-response-json.txt0000664000567000056710000000014612701410316027154 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-delete-users-response-json.txt0000664000567000056710000000015412701410316026214 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-instance-reboot-request-json.txt0000664000567000056710000000036212701410316026542 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/mgmt/instances/44b277eb-39be-4921-be31-3d61b43651d7/action HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-backup-create-request.json0000664000567000056710000000022612701410316025336 0ustar jenkinsjenkins00000000000000{ "backup": { "description": "My Backup", "instance": "44b277eb-39be-4921-be31-3d61b43651d7", "name": "snapshot" } } trove-5.0.0/apidocs/src/samples/db-flavors-request-json.txt0000664000567000056710000000027612701410316025126 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/flavors HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-list-users-response-json.txt0000664000567000056710000000015012701410316025721 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 322 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-delete-response-json.txt0000664000567000056710000000015412701410316027722 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-backup-create-response.json0000664000567000056710000000105012701410316025500 0ustar jenkinsjenkins00000000000000{ "backup": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Backup", "id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": null, "name": "snapshot", "parent_id": null, "size": null, "status": "NEW", "updated": "2014-10-30T12:30:00" } } trove-5.0.0/apidocs/src/samples/db-delete-users-request-json.txt0000664000567000056710000000036712701410316026054 0ustar jenkinsjenkins00000000000000DELETE /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/users/demouser HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-create-users-request-json.txt0000664000567000056710000000035412701410316026051 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/users HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-update-parameters-request-json.txt0000664000567000056710000000035212701410316031735 0ustar jenkinsjenkins00000000000000PUT /v1.0/1234/configurations/43a6ea86-e959-4735-9e46-a6a5d4a2d80f HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-enable-root-user-response-json.txt0000664000567000056710000000014712701410316027000 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 47 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-datastore-versions-list-request-json.txt0000664000567000056710000000035712701410316030257 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/datastores/a00000a0-00a0-0a00-00a0-000a000000aa/versions HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-details-request-json.txt0000664000567000056710000000035212701410316027737 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/configurations/43a6ea86-e959-4735-9e46-a6a5d4a2d80f HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-check-root-user-response-json.txt0000664000567000056710000000014712701410316026627 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 21 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-disable-root-user-request-json.txt0000664000567000056710000000035512701410316027010 0ustar jenkinsjenkins00000000000000DELETE /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/root HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-revoke-user-access-request-json.txt0000664000567000056710000000044712701410316027160 0ustar jenkinsjenkins00000000000000DELETE /v1.0/1234/instances/692d8418-7a8f-47f1-8060-59846c6e024f/users/exampleuser/databases/databaseC HTTP/1.1 User-Agent: python-example-client Host: ord.databases.api.rackspacecloud.com X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/jsontrove-5.0.0/apidocs/src/samples/db-datastore-version-by-id-response.json0000664000567000056710000000103012701410316027443 0ustar jenkinsjenkins00000000000000{ "version": { "datastore": "a00000a0-00a0-0a00-00a0-000a000000aa", "id": "b00000b0-00b0-0b00-00b0-000b000000bb", "links": [ { "href": "https://troveapi.org/v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", "rel": "self" }, { "href": "https://troveapi.org/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", "rel": "bookmark" } ], "name": "5.5" } } trove-5.0.0/apidocs/src/samples/db-list-users-response.json0000664000567000056710000000155612701410316025117 0ustar jenkinsjenkins00000000000000{ "users": [ { "databases": [ { "name": "databaseA" } ], "host": "%", "name": "dbuser1" }, { "databases": [ { "name": "databaseB" }, { "name": "databaseC" } ], "host": "%", "name": "dbuser2" }, { "databases": [ { "name": "databaseD" } ], "host": "%", "name": "dbuser3" }, { "databases": [ { "name": "sampledb" } ], "host": "%", "name": "demouser" } ] } trove-5.0.0/apidocs/src/samples/db-datastore-by-id-response.json0000664000567000056710000000215412701410316025770 0ustar jenkinsjenkins00000000000000{ "datastore": { "default_version": "b00000b0-00b0-0b00-00b0-000b000000bb", "id": "a00000a0-00a0-0a00-00a0-000a000000aa", "links": [ { "href": "https://troveapi.org/v1.0/1234/datastores/a00000a0-00a0-0a00-00a0-000a000000aa", "rel": "self" }, { "href": "https://troveapi.org/datastores/a00000a0-00a0-0a00-00a0-000a000000aa", "rel": "bookmark" } ], "name": "mysql", "versions": [ { "id": "b00000b0-00b0-0b00-00b0-000b000000bb", "links": [ { "href": "https://troveapi.org/v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", "rel": "self" }, { "href": "https://troveapi.org/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", "rel": "bookmark" } ], "name": "5.5" } ] } } trove-5.0.0/apidocs/src/samples/db-change-users-password-request.json0000664000567000056710000000031712701410316027055 0ustar jenkinsjenkins00000000000000{ "users": [ { "name": "dbuser1", "password": "newpassword" }, { "name": "dbuser2", "password": "anotherpassword" } ] }trove-5.0.0/apidocs/src/samples/db-mgmt-instance-diagnostics-request-json.txt0000664000567000056710000000036612701410316030525 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/instances/44b277eb-39be-4921-be31-3d61b43651d7/diagnostics HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-create-instance-response.json0000664000567000056710000000223412701410316026044 0ustar jenkinsjenkins00000000000000{ "instance": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5" }, "flavor": { "id": "1", "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://troveapi.org/flavors/1", "rel": "bookmark" } ] }, "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "self" }, { "href": "https://troveapi.org/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "bookmark" } ], "name": "json_rack_instance", "status": "BUILD", "updated": "2014-10-30T12:30:00", "volume": { "size": 2 } } } trove-5.0.0/apidocs/src/samples/db-configuration-attach-to-instance-request.json0000664000567000056710000000013512701410316031162 0ustar jenkinsjenkins00000000000000{ "instance": { "configuration": "43a6ea86-e959-4735-9e46-a6a5d4a2d80f" } } trove-5.0.0/apidocs/src/samples/db-create-users-response-json.txt0000664000567000056710000000015412701410316026215 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-list-databases-pagination-response.json0000664000567000056710000000043612701410316030030 0ustar jenkinsjenkins00000000000000{ "databases": [ { "name": "anotherdb" } ], "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/databases?limit=1&marker=anotherdb", "rel": "next" } ] } trove-5.0.0/apidocs/src/samples/db-check-root-user-request-json.txt0000664000567000056710000000035212701410316026457 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/root HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instance-status-detail-response-json.txt0000664000567000056710000000015012701410316030174 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 712 Date: Mon, 18 Mar 2013 19:09:17 GMT ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000trove-5.0.0/apidocs/src/samples/db-configuration-parameters-without-datastore-version-request-json.txttrove-5.0.0/apidocs/src/samples/db-configuration-parameters-without-datastore-version-request-json.t0000664000567000056710000000037212701410316035253 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb/parameters HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instance-resize-instance-request-json.txt0000664000567000056710000000040612701410316030352 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances/23a3d4fb-3731-497b-afd4-bf25bde2b5fc/action HTTP/1.1 User-Agent: python-example-client Host: ord.databases.api.rackspacecloud.com X-Auth-Token: 2eeb3252-0164-40f5-8fb7-85df5faa2698 Accept: application/json Content-Type: application/jsontrove-5.0.0/apidocs/src/samples/db-configuration-update-parameters-request.json0000664000567000056710000000036412701410316031123 0ustar jenkinsjenkins00000000000000{ "configuration": { "description": "example updated description", "name": "example-updated-name", "values": { "collation_server": "utf8_unicode_ci", "connect_timeout": 150 } } } trove-5.0.0/apidocs/src/samples/db-list-databases-response-json.txt0000664000567000056710000000015012701410316026507 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 129 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-backup-delete-request-json.txt0000664000567000056710000000034612701410316026155 0ustar jenkinsjenkins00000000000000DELETE /v1.0/1234/backups/a9832168-7541-4536-b8d9-a8a9b79cf1b4 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-datastore-version-by-id-request-json.txt0000664000567000056710000000042412701410316030120 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/datastores/a00000a0-00a0-0a00-00a0-000a000000aa/versions/b00000b0-00b0-0b00-00b0-000b000000bb HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-instances-index-response.json0000664000567000056710000000234612701410316026077 0ustar jenkinsjenkins00000000000000{ "instances": [ { "datastore": { "type": "mysql", "version": "5.5" }, "flavor": { "id": "1", "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://troveapi.org/flavors/1", "rel": "bookmark" } ] }, "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "self" }, { "href": "https://troveapi.org/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "bookmark" } ], "name": "json_rack_instance", "status": "ACTIVE", "volume": { "size": 2 } } ] } trove-5.0.0/apidocs/src/samples/db-mgmt-get-account-details-response.json0000664000567000056710000000021112701410316027566 0ustar jenkinsjenkins00000000000000{ "account": { "id": "3000", "instance_ids": [ "44b277eb-39be-4921-be31-3d61b43651d7" ] } } trove-5.0.0/apidocs/src/samples/db-versions-request-json.txt0000664000567000056710000000030612701410316025314 0ustar jenkinsjenkins00000000000000GET / HTTP/1.1 User-Agent: python-example-client Host: ord.databases.api.rackspacecloud.com X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/jsontrove-5.0.0/apidocs/src/samples/db-backup-restore-request.json0000664000567000056710000000036112701410316025556 0ustar jenkinsjenkins00000000000000{ "instance": { "flavorRef": 1, "name": "backup_instance", "restorePoint": { "backupRef": "a9832168-7541-4536-b8d9-a8a9b79cf1b4" }, "volume": { "size": 2 } } } trove-5.0.0/apidocs/src/samples/db-flavors-by-id-request-json.txt0000664000567000056710000000030012701410316026114 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/flavors/1 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-list-instances-response-json.txt0000664000567000056710000000014712701410316031422 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 93 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-versions-response-json.txt0000664000567000056710000000014612701410316025464 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 153 Date: Wed, 25 Jan 2012 21:53:04 GMTtrove-5.0.0/apidocs/src/samples/db-mgmt-get-storage-response.json0000664000567000056710000000057112701410316026164 0ustar jenkinsjenkins00000000000000{ "devices": [ { "capacity": { "available": 90, "total": 100 }, "name": "fake_storage", "provision": { "available": 40, "percent": 10, "total": 50 }, "type": "test_type", "used": 10 } ] } trove-5.0.0/apidocs/src/samples/db-backup-list-request-json.txt0000664000567000056710000000027612701410316025670 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/backups HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-mgmt-get-host-detail-response.json0000664000567000056710000000072112701410316026732 0ustar jenkinsjenkins00000000000000{ "host": { "instances": [ { "id": "44b277eb-39be-4921-be31-3d61b43651d7", "name": "44b277eb-39be-4921-be31-3d61b43651d7-lay", "server_id": "44b277eb-39be-4921-be31-3d61b43651d7", "status": "ACTIVE", "tenant_id": "3000" } ], "name": "hostname_1", "percentUsed": 12, "totalRAM": 32000, "usedRAM": 4096 } } trove-5.0.0/apidocs/src/samples/db-version-response.json0000664000567000056710000000043512701410316024465 0ustar jenkinsjenkins00000000000000{ "version": { "id": "v1.0", "links": [ { "href": "https://ord.databases.api.rackspacecloud.com/v1.0/", "rel": "self" } ], "status": "CURRENT", "updated": "2012-01-01T00:00:00Z" } }trove-5.0.0/apidocs/src/samples/db-backup-restore-request-json.txt0000664000567000056710000000030112701410316026365 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-mgmt-get-root-details-response-json.txt0000664000567000056710000000015012701410316027734 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 114 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-mgmt-instance-index-request-json.txt0000664000567000056710000000032312701410316027316 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/instances?deleted=false HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-create-request-json.txt0000664000567000056710000000030612701410316027554 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/configurations HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-delete-instance-request-json.txt0000664000567000056710000000035012701410316026507 0ustar jenkinsjenkins00000000000000DELETE /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-list-user-access-response.json0000664000567000056710000000020512701410316026161 0ustar jenkinsjenkins00000000000000{ "databases": [ { "name": "databaseA" }, { "name": "databaseB" } ] }trove-5.0.0/apidocs/src/samples/db-backup-restore-response-json.txt0000664000567000056710000000015012701410316026535 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 694 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-list-user-dbs-response.json0000664000567000056710000000033012701410316025467 0ustar jenkinsjenkins00000000000000{ "user": { "name": "exampleuser", "databases": [ { "name": "databaseA" }, { "name": "databaseB" } ] } }trove-5.0.0/apidocs/src/samples/db-configuration-update-parameters-response-json.txt0000664000567000056710000000015412701410316032103 0ustar jenkinsjenkins00000000000000HTTP/1.1 202 Accepted Content-Type: application/json Content-Length: 0 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-list-instances-response.json0000664000567000056710000000022512701410316030602 0ustar jenkinsjenkins00000000000000{ "instances": [ { "id": "44b277eb-39be-4921-be31-3d61b43651d7", "name": "json_rack_instance" } ] } trove-5.0.0/apidocs/src/samples/db-configuration-edit-parameters-request-json.txt0000664000567000056710000000035412701410316031402 0ustar jenkinsjenkins00000000000000PATCH /v1.0/1234/configurations/43a6ea86-e959-4735-9e46-a6a5d4a2d80f HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-list-databases-response.json0000664000567000056710000000044512701410316025701 0ustar jenkinsjenkins00000000000000{ "databases": [ { "name": "anotherdb" }, { "name": "nextround" }, { "name": "oneMoreDB" }, { "name": "sampledb" }, { "name": "testingdb" } ] } trove-5.0.0/apidocs/src/samples/db-configuration-parameters-for-datastore-version-response-json.txt0000664000567000056710000000015112701410316035053 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 1008 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-change-user-attributes-request-json.txt0000664000567000056710000000036312701410316030034 0ustar jenkinsjenkins00000000000000PUT /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/users/dbuser1 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-datastore-versions-list-response-json.txt0000664000567000056710000000015012701410316030414 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 371 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-list-user-dbs-request-json.txt0000664000567000056710000000042012701410316026136 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/692d8418-7a8f-47f1-8060-59846c6e024f/users/exampleuser HTTP/1.1 User-Agent: python-example-client Host: ord.databases.api.rackspacecloud.com X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/jsontrove-5.0.0/apidocs/src/samples/db-create-instance-request.json0000664000567000056710000000123212701410316025673 0ustar jenkinsjenkins00000000000000{ "instance": { "databases": [ { "character_set": "utf8", "collate": "utf8_general_ci", "name": "sampledb" }, { "name": "nextround" } ], "flavorRef": 1, "name": "json_rack_instance", "users": [ { "databases": [ { "name": "sampledb" } ], "name": "demouser", "password": "demopassword" } ], "volume": { "size": 2 } } } trove-5.0.0/apidocs/src/samples/db-configuration-list-response.json0000664000567000056710000000072012701410316026615 0ustar jenkinsjenkins00000000000000{ "configurations": [ { "created": "2014-10-30T12:30:00", "datastore_name": "mysql", "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "datastore_version_name": "5.5", "description": "example description", "id": "43a6ea86-e959-4735-9e46-a6a5d4a2d80f", "name": "example-configuration-name", "updated": "2014-10-30T12:30:00" } ] } trove-5.0.0/apidocs/src/samples/db-mgmt-get-host-detail-request-json.txt0000664000567000056710000000031412701410316027377 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/hosts/hostname_1 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-mgmt-get-instance-details-response.json0000664000567000056710000000466612701410316027760 0ustar jenkinsjenkins00000000000000{ "instance": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5" }, "deleted": false, "deleted_at": null, "flavor": { "id": "3", "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/3", "rel": "self" }, { "href": "https://troveapi.org/flavors/3", "rel": "bookmark" } ] }, "guest_status": { "state_description": "running" }, "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "self" }, { "href": "https://troveapi.org/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "bookmark" } ], "name": "json_rack_instance", "root_enabled": "2014-10-30T12:30:00", "root_enabled_by": "3000", "server": { "addresses": { "private": [ { "addr": "123.123.123.123" } ] }, "deleted": false, "deleted_at": null, "host": "hostname_1", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "local_id": 0, "name": "44b277eb-39be-4921-be31-3d61b43651d7-lay", "status": "ACTIVE", "tenant_id": "3000" }, "service_status": "ACTIVE", "status": "ACTIVE", "task_description": "No tasks for the instance.", "tenant_id": "3000", "updated": "2014-10-30T12:30:00", "volume": { "attachments": [ { "device": "vdb", "server_id": "44b277eb-39be-4921-be31-3d61b43651d7" } ], "availability_zone": "fake-availability-zone", "created_at": "2001-01-01-12:30:30", "id": "VOL_44b277eb-39be-4921-be31-3d61b43651d7", "size": 4, "status": "in-use", "total": 4.0, "used": 0.16 } } } trove-5.0.0/apidocs/src/samples/db-configuration-list-response-json.txt0000664000567000056710000000015012701410316027427 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 336 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-list-databases-request-json.txt0000664000567000056710000000035712701410316026352 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/databases HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000trove-5.0.0/apidocs/src/samples/db-configuration-parameters-without-datastore-version-response-json.txttrove-5.0.0/apidocs/src/samples/db-configuration-parameters-without-datastore-version-response-json.0000664000567000056710000000015112701410316035230 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 1008 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-attach-to-instance-request-json.txt0000664000567000056710000000034512701410316032002 0ustar jenkinsjenkins00000000000000PUT /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-parameter-for-datastore-version-response.json0000664000567000056710000000025212701410316034055 0ustar jenkinsjenkins00000000000000{ "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "min": 0, "name": "collation_server", "restart_required": false, "type": "string" } trove-5.0.0/apidocs/src/samples/db-mgmt-list-accounts-request-json.txt0000664000567000056710000000030412701410316027174 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/mgmt/accounts HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-configuration-details-response-json.txt0000664000567000056710000000015012701410316030101 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 431 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-change-users-password-request-json.txt0000664000567000056710000000040412701410316027667 0ustar jenkinsjenkins00000000000000PUT /v1.0/1234/instances/692d8418-7a8f-47f1-8060-59846c6e024f/users HTTP/1.1 User-Agent: python-example-client Host: ord.databases.api.rackspacecloud.com X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/jsontrove-5.0.0/apidocs/src/samples/db-configuration-details-response.json0000664000567000056710000000105712701410316027273 0ustar jenkinsjenkins00000000000000{ "configuration": { "created": "2014-10-30T12:30:00", "datastore_name": "mysql", "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", "datastore_version_name": "5.5", "description": "example description", "id": "43a6ea86-e959-4735-9e46-a6a5d4a2d80f", "instance_count": 0, "name": "example-configuration-name", "updated": "2014-10-30T12:30:00", "values": { "collation_server": "latin1_swedish_ci", "connect_timeout": 120 } } } trove-5.0.0/apidocs/src/samples/db-enable-root-user-request-json.txt0000664000567000056710000000035312701410316026631 0ustar jenkinsjenkins00000000000000POST /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7/root HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-datastores-list-response-json.txt0000664000567000056710000000015012701410316026731 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 681 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-parameters-for-datastore-version-request-json.txt0000664000567000056710000000043712701410316034714 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/datastores/a00000a0-00a0-0a00-00a0-000a000000aa/versions/b00000b0-00b0-0b00-00b0-000b000000bb/parameters HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-datastore-version-by-id-response-json.txt0000664000567000056710000000015012701410316030262 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 368 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-configuration-detach-from-instance-request-json.txt0000664000567000056710000000034512701410316032307 0ustar jenkinsjenkins00000000000000PUT /v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7 HTTP/1.1 User-Agent: python-troveclient Host: troveapi.org X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/json trove-5.0.0/apidocs/src/samples/db-faults-instanceFault.json0000664000567000056710000000023512701410316025236 0ustar jenkinsjenkins00000000000000{ "instanceFault": { "code": 500, "message": "The server has either erred or is incapable of performing the requested operation." } }trove-5.0.0/apidocs/src/samples/db-version-request-json.txt0000664000567000056710000000031312701410316025127 0ustar jenkinsjenkins00000000000000GET /v1.0/ HTTP/1.1 User-Agent: python-example-client Host: ord.databases.api.rackspacecloud.com X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/jsontrove-5.0.0/apidocs/src/samples/db-versions-response.json0000664000567000056710000000052612701410316024651 0ustar jenkinsjenkins00000000000000{ "versions": [ { "id": "v1.0", "links": [ { "href": "https://ord.databases.api.rackspacecloud.com/v1.0/", "rel": "self" } ], "status": "CURRENT", "updated": "2012-01-01T00:00:00Z" } ] }trove-5.0.0/apidocs/src/samples/db-check-root-user-response.json0000664000567000056710000000003512701410316026006 0ustar jenkinsjenkins00000000000000{ "rootEnabled": true } trove-5.0.0/apidocs/src/samples/db-instance-status-detail-response.json0000664000567000056710000000226712701410316027372 0ustar jenkinsjenkins00000000000000{ "instance": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5" }, "flavor": { "id": "1", "links": [ { "href": "https://troveapi.org/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://troveapi.org/flavors/1", "rel": "bookmark" } ] }, "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com", "id": "44b277eb-39be-4921-be31-3d61b43651d7", "links": [ { "href": "https://troveapi.org/v1.0/1234/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "self" }, { "href": "https://troveapi.org/instances/44b277eb-39be-4921-be31-3d61b43651d7", "rel": "bookmark" } ], "name": "json_rack_instance", "status": "ACTIVE", "updated": "2014-10-30T12:30:00", "volume": { "size": 2, "used": 0.16 } } } trove-5.0.0/apidocs/src/samples/db-backup-get-response-json.txt0000664000567000056710000000015012701410316025631 0ustar jenkinsjenkins00000000000000HTTP/1.1 200 OK Content-Type: application/json Content-Length: 439 Date: Mon, 18 Mar 2013 19:09:17 GMT trove-5.0.0/apidocs/src/samples/db-list-user-access-request-json.txt0000664000567000056710000000043212701410316026632 0ustar jenkinsjenkins00000000000000GET /v1.0/1234/instances/692d8418-7a8f-47f1-8060-59846c6e024f/users/exampleuser/databases HTTP/1.1 User-Agent: python-example-client Host: ord.databases.api.rackspacecloud.com X-Auth-Token: 87c6033c-9ff6-405f-943e-2deb73f278b7 Accept: application/json Content-Type: application/jsontrove-5.0.0/test-requirements.txt0000664000567000056710000000164512701410316020256 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 hacking<0.11,>=0.10.0 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=0.1.1 # Apache2 coverage>=3.6 # Apache-2.0 nose # LGPL nosexcover # BSD openstack-doc-tools>=0.23 # Apache-2.0 openstack.nose-plugin>=0.7 # Apache-2.0 WebTest>=2.0 # MIT wsgi-intercept>=0.6.1 # MIT License proboscis>=1.2.5.3 # Apache-2.0 python-troveclient!=2.1.0,>=1.2.0 # Apache-2.0 mock>=1.2 # BSD mox3>=0.7.0 # Apache-2.0 testtools>=1.4.0 # MIT testrepository>=0.0.18 # Apache-2.0/BSD pymongo!=3.1,>=3.0.2 # Apache-2.0 redis>=2.10.0 # MIT psycopg2>=2.5 # LGPL/ZPL cassandra-driver>=2.1.4 # Apache-2.0 pycrypto>=2.6 # Public Domain couchdb>=0.8 # Apache-2.0