manila-2013.2.dev175.gbf1a399/0000775000175000017500000000000012301410516015507 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/run_tests.sh0000775000175000017500000001324412301410454020101 0ustar chuckchuck00000000000000#!/bin/bash set -u function usage { echo "Usage: $0 [OPTION]..." echo "Run Manila's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -x, --stop Stop running tests after the first error or failure." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -X, --coverage-xml Generate XML coverage report." echo " -h, --help Print this usage message" echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_option { case "$1" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -m|--patch-migrate) patch_migrate=1;; -w|--no-patch-migrate) patch_migrate=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -X|--coverage-xml) coverage_xml=1;; -*) noseopts="$noseopts $1";; *) noseargs="$noseargs $1" esac } venv=.venv with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= noseargs= noseopts= wrapper="" just_pep8=0 no_pep8=0 coverage=0 coverage_xml=0 recreate_db=1 patch_migrate=1 update=0 export NOSE_WITH_OPENSTACK=true export NOSE_OPENSTACK_COLOR=true export NOSE_OPENSTACK_SHOW_ELAPSED=true for arg in "$@"; do process_option $arg done # If enabled, tell nose to collect coverage data if [ $coverage -eq 1 ]; then noseopts="$noseopts --with-coverage --cover-package=manila" fi if [ $coverage_xml -eq 1 ]; then noseopts="$noseopts --with-xcoverage --cover-package=manila --xcoverage-file=`pwd`/coverage.xml" fi if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete # Just run the test suites in current environment ${wrapper} $NOSETESTS RESULT=$? return $RESULT } # Files of interest # NOTE(lzyeval): Avoid selecting manila-api-paste.ini and manila.conf in manila/bin # when running on devstack. # NOTE(lzyeval): Avoid selecting *.pyc files to reduce pep8 check-up time # when running on devstack. # NOTE(dprince): Exclude xenapi plugins. They are Python 2.4 code and as such # cannot be expected to work with tools/hacking checks. xen_net_path="plugins/xenserver/networking/etc/xensource/scripts" srcfiles=`find manila -type f -name "*.py" ! -path "manila/openstack/common/*"` srcfiles+=" `find bin -type f ! -name "manila.conf*" ! -name "*api-paste.ini*"`" srcfiles+=" `find tools -type f -name "*.py"`" srcfiles+=" setup.py" function run_pep8 { echo "Running PEP8 and HACKING compliance check..." bash -c "${wrapper} flake8 manila* bin" } NOSETESTS="nosetests $noseopts $noseargs" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi run_tests RET=$? # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (noseopts), which begin with a '-', and # arguments (noseargs). if [ -z "$noseargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage html --include='manila/*' --omit='manila/openstack/common/*' -d covhtml -i fi exit $RET manila-2013.2.dev175.gbf1a399/openstack-common.conf0000664000175000017500000000104512301410454021634 0ustar chuckchuck00000000000000[DEFAULT] # The list of modules to copy from openstack-common module=context module=exception module=excutils module=fileutils module=flakes module=gettextutils module=importutils module=install_venv_common module=jsonutils module=local module=lockutils module=log module=network_utils module=notifier module=policy module=processutils module=rootwrap module=rpc module=scheduler module=scheduler.filters module=scheduler.weights module=strutils module=timeutils module=uuidutils # The base module to hold the copy of openstack.common base=manila manila-2013.2.dev175.gbf1a399/HACKING.rst0000664000175000017500000001755712301410454017325 0ustar chuckchuck00000000000000Manila Style Commandments ======================= - Step 1: Read http://www.python.org/dev/peps/pep-0008/ - Step 2: Read http://www.python.org/dev/peps/pep-0008/ again - Step 3: Read on General ------- - Put two newlines between top-level code (funcs, classes, etc) - Put one newline between methods in classes and anywhere else - Long lines should be wrapped in parentheses in preference to using a backslash for line continuation. - Do not write "except:", use "except Exception:" at the very least - Include your name with TODOs as in "#TODO(termie)" - Do not shadow a built-in or reserved word. Example:: def list(): return [1, 2, 3] mylist = list() # BAD, shadows `list` built-in class Foo(object): def list(self): return [1, 2, 3] mylist = Foo().list() # OKAY, does not shadow built-in - Use the "is not" operator when testing for unequal identities. Example:: if not X is Y: # BAD, intended behavior is ambiguous pass if X is not Y: # OKAY, intuitive pass - Use the "not in" operator for evaluating membership in a collection. Example:: if not X in Y: # BAD, intended behavior is ambiguous pass if X not in Y: # OKAY, intuitive pass if not (X in Y or X in Z): # OKAY, still better than all those 'not's pass Imports ------- - Do not import objects, only modules (*) - Do not import more than one module per line (*) - Do not make relative imports - Order your imports by the full module path - Organize your imports according to the following template (*) exceptions are: - imports from ``migrate`` package - imports from ``sqlalchemy`` package - imports from ``manila.db.sqlalchemy.session`` module Example:: # vim: tabstop=4 shiftwidth=4 softtabstop=4 {{stdlib imports in human alphabetical order}} \n {{third-party lib imports in human alphabetical order}} \n {{manila imports in human alphabetical order}} \n \n {{begin your code}} Human Alphabetical Order Examples --------------------------------- Example:: import httplib import logging import random import StringIO import time import unittest import eventlet import webob.exc import manila.api.ec2 from manila.api import openstack from manila.auth import users from manila.endpoint import cloud import manila.flags from manila import test Docstrings ---------- Example:: """A one line docstring looks like this and ends in a period.""" """A multi line docstring has a one-line summary, less than 80 characters. Then a new paragraph after a newline that explains in more detail any general information about the function, class or method. Example usages are also great to have here if it is a complex class for function. When writing the docstring for a class, an extra line should be placed after the closing quotations. For more in-depth explanations for these decisions see http://www.python.org/dev/peps/pep-0257/ If you are going to describe parameters and return values, use Sphinx, the appropriate syntax is as follows. :param foo: the foo parameter :param bar: the bar parameter :returns: return_type -- description of the return value :returns: description of the return value :raises: AttributeError, KeyError """ Dictionaries/Lists ------------------ If a dictionary (dict) or list object is longer than 80 characters, its items should be split with newlines. Embedded iterables should have their items indented. Additionally, the last item in the dictionary should have a trailing comma. This increases readability and simplifies future diffs. Example:: my_dictionary = { "image": { "name": "Just a Snapshot", "size": 2749573, "properties": { "user_id": 12, "arch": "x86_64", }, "things": [ "thing_one", "thing_two", ], "status": "ACTIVE", }, } Calling Methods --------------- Calls to methods 80 characters or longer should format each argument with newlines. This is not a requirement, but a guideline:: unnecessarily_long_function_name('string one', 'string two', kwarg1=constants.ACTIVE, kwarg2=['a', 'b', 'c']) Rather than constructing parameters inline, it is better to break things up:: list_of_strings = [ 'what_a_long_string', 'not as long', ] dict_of_numbers = { 'one': 1, 'two': 2, 'twenty four': 24, } object_one.call_a_method('string three', 'string four', kwarg1=list_of_strings, kwarg2=dict_of_numbers) Internationalization (i18n) Strings ----------------------------------- In order to support multiple languages, we have a mechanism to support automatic translations of exception and log strings. Example:: msg = _("An error occurred") raise HTTPBadRequest(explanation=msg) If you have a variable to place within the string, first internationalize the template string then do the replacement. Example:: msg = _("Missing parameter: %s") % ("flavor",) LOG.error(msg) If you have multiple variables to place in the string, use keyword parameters. This helps our translators reorder parameters when needed. Example:: msg = _("The server with id %(s_id)s has no key %(m_key)s") LOG.error(msg % {"s_id": "1234", "m_key": "imageId"}) Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. For more information on creating unit tests and utilizing the testing infrastructure in OpenStack Manila, please read manila/testing/README.rst. openstack-common ---------------- A number of modules from openstack-common are imported into the project. These modules are "incubating" in openstack-common and are kept in sync with the help of openstack-common's update.py script. See: http://wiki.openstack.org/CommonLibrary#Incubation The copy of the code should never be directly modified here. Please always update openstack-common first and then run the script to copy the changes across. OpenStack Trademark ------------------- OpenStack is a registered trademark of OpenStack, LLC, and uses the following capitalization: OpenStack Commit Messages --------------- Using a common format for commit messages will help keep our git history readable. Follow these guidelines: First, provide a brief summary (it is recommended to keep the commit title under 50 chars). The first line of the commit message should provide an accurate description of the change, not just a reference to a bug or blueprint. It must be followed by a single blank line. If the change relates to a specific driver (libvirt, xenapi, qpid, etc...), begin the first line of the commit message with the driver name, lowercased, followed by a colon. Following your brief summary, provide a more detailed description of the patch, manually wrapping the text at 72 characters. This description should provide enough detail that one does not have to refer to external resources to determine its high-level functionality. Once you use 'git review', two lines will be appended to the commit message: a blank line followed by a 'Change-Id'. This is important to correlate this commit with a specific review in Gerrit, and it should not be modified. For further information on constructing high quality commit messages, and how to split up commits into a series of changes, consult the project wiki: http://wiki.openstack.org/GitCommitMessages manila-2013.2.dev175.gbf1a399/bin/0000775000175000017500000000000012301410516016257 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/bin/manila-api0000775000175000017500000000352112301410454020217 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for manila OS API.""" # NOTE(jdg): If we port over multi worker code from Nova # we'll need to set monkey_patch(os=False), unless # eventlet is updated/released to fix the root issue import eventlet eventlet.monkey_patch() import os import sys from oslo.config import cfg possible_topdir = os.path.normpath(os.path.join(os.path.abspath( sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, "manila", "__init__.py")): sys.path.insert(0, possible_topdir) from manila.openstack.common import gettextutils gettextutils.install('manila') from manila.common import config # Need to register global_opts from manila.openstack.common import log as logging from manila import service from manila import utils from manila import version CONF = cfg.CONF if __name__ == '__main__': CONF(sys.argv[1:], project='manila', version=version.version_string()) logging.setup("manila") utils.monkey_patch() server = service.WSGIService('osapi_share') service.serve(server) service.wait() manila-2013.2.dev175.gbf1a399/bin/manila-share0000775000175000017500000000430012301410454020544 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for manila Share.""" import eventlet eventlet.monkey_patch() import os import sys from oslo.config import cfg # If ../manila/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'manila', '__init__.py')): sys.path.insert(0, possible_topdir) from manila.openstack.common import gettextutils gettextutils.install('manila') from manila.common import config # Need to register global_opts from manila.openstack.common import log as logging from manila import service from manila import utils from manila import version CONF = cfg.CONF if __name__ == '__main__': args = CONF(sys.argv[1:], project='manila', version=version.version_string()) logging.setup("manila") utils.monkey_patch() launcher = service.ProcessLauncher() if CONF.enabled_share_backends: for backend in CONF.enabled_share_backends: host = "%s@%s" % (CONF.host, backend) server = service.Service.create( host=host, service_name=backend) launcher.launch_server(server) else: server = service.Service.create(binary='manila-share') launcher.launch_server(server) launcher.wait() manila-2013.2.dev175.gbf1a399/bin/manila-rpc-zmq-receiver0000775000175000017500000000325312301410454022643 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet eventlet.monkey_patch() import contextlib import os import sys # If ../manila/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'manila', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) from oslo.config import cfg from manila.openstack.common import log as logging from manila.openstack.common import rpc from manila.openstack.common.rpc import impl_zmq CONF = cfg.CONF CONF.register_opts(rpc.rpc_opts) CONF.register_opts(impl_zmq.zmq_opts) def main(): CONF(sys.argv[1:], project='manila') logging.setup("manila") with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: reactor.consume_in_thread() reactor.wait() if __name__ == '__main__': main() manila-2013.2.dev175.gbf1a399/bin/manila-all0000775000175000017500000000464412301410454020225 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack, LLC # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for All manila services. This script attempts to start all the manila services in one process. Each service is started in its own greenthread. Please note that exceptions and sys.exit() on the starting of a service are logged and the script will continue attempting to launch the rest of the services. """ import eventlet eventlet.monkey_patch() import os import sys from oslo.config import cfg possible_topdir = os.path.normpath(os.path.join(os.path.abspath( sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, "manila", "__init__.py")): sys.path.insert(0, possible_topdir) from manila.openstack.common import gettextutils gettextutils.install('manila') from manila.common import config # Need to register global_opts from manila.openstack.common import log as logging from manila import service from manila import utils from manila import version CONF = cfg.CONF if __name__ == '__main__': CONF(sys.argv[1:], project='manila', version=version.version_string()) logging.setup("manila") LOG = logging.getLogger('manila.all') utils.monkey_patch() servers = [] # manila-api try: servers.append(service.WSGIService('osapi_share')) except (Exception, SystemExit): LOG.exception(_('Failed to load osapi_share')) for binary in ['manila-share', 'manila-scheduler', 'manila-api']: try: servers.append(service.Service.create(binary=binary)) except (Exception, SystemExit): LOG.exception(_('Failed to load %s'), binary) service.serve(*servers) service.wait() manila-2013.2.dev175.gbf1a399/bin/manila-clear-rabbit-queues0000775000175000017500000000466012301410454023307 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Admin/debug script to wipe rabbitMQ (AMQP) queues manila uses. This can be used if you need to change durable options on queues, or to wipe all messages in the queue system if things are in a serious bad way. """ import datetime import os import sys import time from oslo.config import cfg # If ../manila/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'manila', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) from manila.openstack.common import gettextutils gettextutils.install('manila') from manila.common import config # Need to register global_opts from manila import context from manila import exception from manila.openstack.common import log as logging from manila.openstack.common import rpc from manila import version delete_exchange_opt = \ cfg.BoolOpt('delete_exchange', default=False, help='delete manila exchange too.') CONF = cfg.CONF CONF.register_cli_opt(delete_exchange_opt) def delete_exchange(exch): conn = rpc.create_connection() x = conn.get_channel() x.exchange_delete(exch) def delete_queues(queues): conn = rpc.create_connection() x = conn.get_channel() for q in queues: x.queue_delete(q) if __name__ == '__main__': args = CONF(sys.argv[1:], project='manila', version=version.version_string()) logging.setup("manila") delete_queues(args[1:]) if CONF.delete_exchange: delete_exchange(CONF.control_exchange) manila-2013.2.dev175.gbf1a399/bin/manila-manage0000775000175000017500000003410512301410454020700 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Interactive shell based on Django: # # Copyright (c) 2005, the Lawrence Journal-World # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ CLI interface for manila management. """ import os import sys import uuid from sqlalchemy import create_engine, MetaData, Table from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker # If ../manila/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'manila', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) from manila.openstack.common import gettextutils gettextutils.install('manila') from oslo.config import cfg from manila.common import config # Need to register global_opts from manila import context from manila import db from manila.db import migration from manila import exception from manila.openstack.common import log as logging from manila.openstack.common import rpc from manila.openstack.common import uuidutils from manila import utils from manila import version CONF = cfg.CONF # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator def param2id(object_id): """Helper function to convert various id types to internal id. args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' """ if uuidutils.is_uuid_like(object_id): return object_id elif '-' in object_id: # FIXME(ja): mapping occurs in nova? pass else: return int(object_id) class ShellCommands(object): def bpython(self): """Runs a bpython shell. Falls back to Ipython/python shell if unavailable""" self.run('bpython') def ipython(self): """Runs an Ipython shell. Falls back to Python shell if unavailable""" self.run('ipython') def python(self): """Runs a python shell. Falls back to Python shell if unavailable""" self.run('python') @args('--shell', dest="shell", metavar='', help='Python shell') def run(self, shell=None): """Runs a Python interactive interpreter.""" if not shell: shell = 'bpython' if shell == 'bpython': try: import bpython bpython.embed() except ImportError: shell = 'ipython' if shell == 'ipython': try: import IPython # Explicitly pass an empty list as arguments, because # otherwise IPython would use sys.argv from this script. shell = IPython.Shell.IPShell(argv=[]) shell.mainloop() except ImportError: shell = 'python' if shell == 'python': import code try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', # because we already know 'readline' was imported successfully. import rlcompleter readline.parse_and_bind("tab:complete") code.interact() @args('--path', required=True, help='Script path') def script(self, path): """Runs the script from the specifed path with flags set properly. arguments: path""" exec(compile(open(path).read(), path, 'exec'), locals(), globals()) def _db_error(caught_exception): print caught_exception print _("The above error may show that the database has not " "been created.\nPlease create a database using " "'manila-manage db sync' before running this command.") exit(1) class HostCommands(object): """List hosts.""" @args('zone', nargs='?', default=None, help='Availability Zone (default: %(default)s)') def list(self, zone=None): """Show a list of all physical hosts. Filter by zone. args: [zone]""" print "%-25s\t%-15s" % (_('host'), _('zone')) ctxt = context.get_admin_context() services = db.service_get_all(ctxt) if zone: services = [s for s in services if s['availability_zone'] == zone] hosts = [] for srv in services: if not [h for h in hosts if h['host'] == srv['host']]: hosts.append(srv) for h in hosts: print "%-25s\t%-15s" % (h['host'], h['availability_zone']) class DbCommands(object): """Class for managing the database.""" def __init__(self): pass @args('version', nargs='?', default=None, help='Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" return migration.db_sync(version) def version(self): """Print the current database version.""" print migration.db_version() class VersionCommands(object): """Class for exposing the codebase version.""" def __init__(self): pass def list(self): print(version.version_string()) def __call__(self): self.list() class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" def __init__(self): pass def list(self): for key, value in CONF.iteritems(): if value is not None: print '%s = %s' % (key, value) class GetLogCommands(object): """Get logging information.""" def errors(self): """Get all of the errors from the log files.""" error_found = 0 if CONF.log_dir: logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')] for file in logs: log_file = os.path.join(CONF.log_dir, file) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print_name = 0 for index, line in enumerate(lines): if line.find(" ERROR ") > 0: error_found += 1 if print_name == 0: print log_file + ":-" print_name = 1 print "Line %d : %s" % (len(lines) - index, line) if error_found == 0: print "No errors in logfiles!" @args('num_entries', nargs='?', type=int, default=10, help='Number of entries to list (default: %(default)d)') def syslog(self, num_entries=10): """Get of the manila syslog events.""" entries = int(num_entries) count = 0 log_file = '' if os.path.exists('/var/log/syslog'): log_file = '/var/log/syslog' elif os.path.exists('/var/log/messages'): log_file = '/var/log/messages' else: print "Unable to find system log file!" sys.exit(1) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print "Last %s manila syslog entries:-" % (entries) for line in lines: if line.find("manila") > 0: count += 1 print "%s" % (line) if count == entries: break if count == 0: print "No manila entries in syslog!" class ServiceCommands(object): """Methods for managing services.""" def list(self): """Show a list of all manila services.""" ctxt = context.get_admin_context() services = db.service_get_all(ctxt) print_format = "%-16s %-36s %-16s %-10s %-5s %-10s" print print_format % ( _('Binary'), _('Host'), _('Zone'), _('Status'), _('State'), _('Updated At')) for svc in services: alive = utils.service_is_up(svc) art = ":-)" if alive else "XXX" status = 'enabled' if svc['disabled']: status = 'disabled' print print_format % (svc['binary'], svc['host'].partition('.')[0], svc['availability_zone'], status, art, svc['updated_at']) CATEGORIES = { 'config': ConfigCommands, 'db': DbCommands, 'host': HostCommands, 'logs': GetLogCommands, 'service': ServiceCommands, 'shell': ShellCommands, 'version': VersionCommands } def methods_of(obj): """Get all callable methods of an object that don't start with underscore returns a list of tuples of the form (method_name, method)""" result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def add_command_parsers(subparsers): for category in CATEGORIES: command_object = CATEGORIES[category]() parser = subparsers.add_parser(category) parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) category_opt = cfg.SubCommandOpt('category', title='Command categories', handler=add_command_parsers) def get_arg_string(args): arg = None if args[0] == '-': # (Note)zhiteng: args starts with CONF.oparser.prefix_chars # is optional args. Notice that cfg module takes care of # actual ArgParser so prefix_chars is always '-'. if args[1] == '-': # This is long optional arg arg = args[2:] else: arg = args[3:] else: arg = args return arg def fetch_func_args(func): fn_args = [] for args, kwargs in getattr(func, 'args', []): arg = get_arg_string(args[0]) fn_args.append(getattr(CONF.category, arg)) return fn_args def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack manila version: %(version)s\n") % {'version': version.version_string()}) print script_name + " category action []" print _("Available categories:") for category in CATEGORIES: print "\t%s" % category sys.exit(2) try: CONF(sys.argv[1:], project='manila', version=version.version_string()) logging.setup("manila") except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print _("Could not read %s. Re-running with sudo") % cfgfile try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: print _('sudo failed, continuing as if nothing happened') print _('Please re-run manila-manage as root.') sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args) if __name__ == '__main__': main() manila-2013.2.dev175.gbf1a399/bin/manila-rootwrap0000775000175000017500000001106012301410454021320 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Root wrapper for OpenStack services Filters which commands a service is allowed to run as another user. To use this with manila, you should set the following in manila.conf: rootwrap_config=/etc/manila/rootwrap.conf You also need to let the manila user run manila-rootwrap as root in sudoers: manila ALL = (root) NOPASSWD: /usr/bin/manila-rootwrap /etc/manila/rootwrap.conf * Service packaging should deploy .filters files only on nodes where they are needed, to avoid allowing more than is necessary. """ import ConfigParser import logging import os import pwd import signal import subprocess import sys RC_UNAUTHORIZED = 99 RC_NOCOMMAND = 98 RC_BADCONFIG = 97 RC_NOEXECFOUND = 96 def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def _exit_error(execname, message, errorcode, log=True): print "%s: %s" % (execname, message) if log: logging.error(message) sys.exit(errorcode) if __name__ == '__main__': # Split arguments, require at least a command execname = sys.argv.pop(0) if len(sys.argv) < 2: _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) configfile = sys.argv.pop(0) userargs = sys.argv[:] # Add ../ to sys.path to allow running from branch possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, "manila", "__init__.py")): sys.path.insert(0, possible_topdir) from manila.openstack.common.rootwrap import wrapper # Load configuration try: rawconfig = ConfigParser.RawConfigParser() rawconfig.read(configfile) config = wrapper.RootwrapConfig(rawconfig) except ValueError as exc: msg = "Incorrect value in %s: %s" % (configfile, exc.message) _exit_error(execname, msg, RC_BADCONFIG, log=False) except ConfigParser.Error: _exit_error(execname, "Incorrect configuration file: %s" % configfile, RC_BADCONFIG, log=False) if config.use_syslog: wrapper.setup_syslog(execname, config.syslog_log_facility, config.syslog_log_level) # Execute command if it matches any of the loaded filters filters = wrapper.load_filters(config.filters_path) try: filtermatch = wrapper.match_filter(filters, userargs, exec_dirs=config.exec_dirs) if filtermatch: command = filtermatch.get_command(userargs, exec_dirs=config.exec_dirs) if config.use_syslog: logging.info("(%s > %s) Executing %s (filter match = %s)" % ( os.getlogin(), pwd.getpwuid(os.getuid())[0], command, filtermatch.name)) obj = subprocess.Popen(command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr, preexec_fn=_subprocess_setup, env=filtermatch.get_environment(userargs)) obj.wait() sys.exit(obj.returncode) except wrapper.FilterMatchNotExecutable as exc: msg = ("Executable not found: %s (filter match = %s)" % (exc.match.exec_path, exc.match.name)) _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) except wrapper.NoFilterMatched: msg = ("Unauthorized command: %s (no filter matched)" % ' '.join(userargs)) _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) manila-2013.2.dev175.gbf1a399/bin/manila-scheduler0000775000175000017500000000362712301410454021433 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for manila Scheduler.""" import eventlet eventlet.monkey_patch() import os import sys from oslo.config import cfg # If ../manila/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'manila', '__init__.py')): sys.path.insert(0, possible_topdir) from manila.openstack.common import gettextutils gettextutils.install('manila') from manila.common import config # Need to register global_opts from manila.openstack.common import log as logging from manila import service from manila import utils from manila import version CONF = cfg.CONF if __name__ == '__main__': CONF(sys.argv[1:], project='manila', version=version.version_string()) logging.setup("manila") utils.monkey_patch() server = service.Service.create(binary='manila-scheduler') service.serve(server) service.wait() manila-2013.2.dev175.gbf1a399/LICENSE0000664000175000017500000002363712301410454016530 0ustar chuckchuck00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. manila-2013.2.dev175.gbf1a399/requirements.txt0000664000175000017500000000076712301410454021006 0ustar chuckchuck00000000000000pbr>=0.5.21,<1.0 amqplib>=0.6.1 anyjson>=0.3.3 argparse Babel>=1.3 eventlet>=0.13.0 greenlet>=0.3.2 iso8601>=0.1.8 kombu>=2.4.8 lockfile>=0.8 lxml>=2.3 oslo.config>=1.2.0 paramiko>=1.8.0 Paste PasteDeploy>=1.5.0 python-neutronclient>=2.3.0,<3 python-glanceclient>=0.9.0 python-keystoneclient>=0.3.2 python-swiftclient>=1.5 Routes>=1.12.3 SQLAlchemy>=0.7.8,<=0.7.99 sqlalchemy-migrate>=0.7.2 stevedore>=0.10 python-cinderclient>=1.0.6 python-novaclient>=2.15.0 suds>=0.4 WebOb>=1.2.3,<1.3 wsgiref>=0.1.2 manila-2013.2.dev175.gbf1a399/README.rst0000664000175000017500000000132312301410454017176 0ustar chuckchuck00000000000000The Choose Your Own Adventure README for Manila =============================================== You have come across a storage service for an open cloud computing service. It has identified itself as "Manila." It was abstracted from the Nova project. To monitor it from a distance: follow `@openstack `_ on twitter. To tame it for use in your own cloud: read http://docs.openstack.org To dissect it in detail: visit http://github.com/stackforge/manila To taunt it with its weaknesses: use http://bugs.launchpad.net/manila To watch it: http://jenkins.openstack.org To hack at it: read HACKING To cry over its pylint problems: http://jenkins.openstack.org/job/manila-pylint/violations manila-2013.2.dev175.gbf1a399/doc/0000775000175000017500000000000012301410516016254 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/.gitignore0000664000175000017500000000004512301410454020244 0ustar chuckchuck00000000000000_build/* source/api/* .autogenerated manila-2013.2.dev175.gbf1a399/doc/source/0000775000175000017500000000000012301410516017554 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/conf.py0000664000175000017500000001747112301410454021066 0ustar chuckchuck00000000000000# -*- coding: utf-8 -*- # # manila documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set # to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'ext.manila_todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz'] # autodoc generation is a bit aggressive and a nuisance # when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1" # in your terminal to disable if not os.getenv('SPHINX_DEBUG'): extensions += ['ext.manila_autodoc'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. # Changing the path so that the Hudson build output contains GA code # and the source docs do not contain the code so local, offline sphinx builds # are "clean." templates_path = [] if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'manila' copyright = u'2010-present, OpenStack, LLC' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from manila.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [ 'api_ext/rst_extension_template', 'installer', ] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use # for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['manila.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/manila-manage', 'manila-manage', u'Cloud controller fabric', [u'OpenStack'], 1) ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme_path = ["."] html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" html_last_updated_fmt = os.popen(git_cmd).read() # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'maniladoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Manila.tex', u'Manila Documentation', u'Anso Labs, LLC', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'python': ('http://docs.python.org/', None), 'swift': ('http://swift.openstack.org', None)} manila-2013.2.dev175.gbf1a399/doc/source/devref/0000775000175000017500000000000012301410516021027 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/devref/manila.rst0000664000175000017500000001016012301410454023021 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Common and Misc Libraries ========================= Libraries common throughout Manila or just ones that haven't been categorized very well yet. The :mod:`manila.adminclient` Module ---------------------------------- .. automodule:: manila.adminclient :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.context` Module ------------------------------ .. automodule:: manila.context :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.exception` Module -------------------------------- .. automodule:: manila.exception :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.flags` Module ---------------------------- .. automodule:: manila.flags :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.process` Module ------------------------------ .. automodule:: manila.process :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.rpc` Module -------------------------- .. automodule:: manila.rpc :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.server` Module ----------------------------- .. automodule:: manila.server :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.test` Module --------------------------- .. automodule:: manila.test :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.utils` Module ---------------------------- .. automodule:: manila.utils :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.validate` Module ------------------------------- .. automodule:: manila.validate :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.wsgi` Module --------------------------- .. automodule:: manila.wsgi :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`declare_conf` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.declare_conf :noindex: :members: :undoc-members: :show-inheritance: The :mod:`conf_fixture` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.conf_fixture :noindex: :members: :undoc-members: :show-inheritance: The :mod:`flags_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.flags_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`process_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.process_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`real_flags` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.real_flags :noindex: :members: :undoc-members: :show-inheritance: The :mod:`rpc_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.rpc_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`runtime_conf` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.runtime_conf :noindex: :members: :undoc-members: :show-inheritance: The :mod:`validator_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.validator_unittest :noindex: :members: :undoc-members: :show-inheritance: manila-2013.2.dev175.gbf1a399/doc/source/devref/addmethod.openstackapi.rst0000664000175000017500000000535212301410454026200 0ustar chuckchuck00000000000000.. Copyright 2010-2011 OpenStack LLC All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Adding a Method to the OpenStack API ==================================== The interface is a mostly RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. Routing ------- To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ for more information. URLs are mapped to "action" methods on "controller" classes in ``manila/api/openstack/__init__/ApiRouter.__init__`` . See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. - mapper.resource() connects many standard URLs to actions on a controller. Controllers and actions ----------------------- Controllers live in ``manila/api/openstack``, and inherit from manila.wsgi.Controller. See ``manila/api/openstack/servers.py`` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. Serialization ------------- Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML based on the request's content-type. If you define a new controller, you'll need to define a ``_serialization_metadata`` attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. ```` list contains ```` tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. ```` instead of ``4``). See `manila/api/openstack/servers.py` for an example. Faults ------ If you need to return a non-200, you should return faults.Fault(webob.exc.HTTPNotFound()) replacing the exception as appropriate. manila-2013.2.dev175.gbf1a399/doc/source/devref/threading.rst0000664000175000017500000000435612301410454023537 0ustar chuckchuck00000000000000Threading model =============== All OpenStack services use *green thread* model of threading, implemented through using the Python `eventlet `_ and `greenlet `_ libraries. Green threads use a cooperative model of threading: thread context switches can only occur when specific eventlet or greenlet library calls are made (e.g., sleep, certain I/O calls). From the operating system's point of view, each OpenStack service runs in a single thread. The use of green threads reduces the likelihood of race conditions, but does not completely eliminate them. In some cases, you may need to use the ``@utils.synchronized(...)`` decorator to avoid races. In addition, since there is only one operating system thread, a call that blocks that main thread will block the entire process. Yielding the thread in long-running tasks ----------------------------------------- If a code path takes a long time to execute and does not contain any methods that trigger an eventlet context switch, the long-running thread will block any pending threads. This scenario can be avoided by adding calls to the eventlet sleep method in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: from eventlet import greenthread ... greenthread.sleep(0) MySQL access and eventlet ------------------------- Queries to the MySQL database will block the main thread of a service. This is because OpenStack services use an external C library for accessing the MySQL database. Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, the resulting database query blocks the thread. The Diablo release contained a thread-pooling implementation that did not block, but this implementation resulted in a `bug`_ and was removed. See this `mailing list thread`_ for a discussion of this issue, including a discussion of the `impact on performance`_. .. _bug: https://bugs.launchpad.net/manila/+bug/838581 .. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html .. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html manila-2013.2.dev175.gbf1a399/doc/source/devref/gerrit.rst0000664000175000017500000000125212301410454023056 0ustar chuckchuck00000000000000Code Reviews with Gerrit ======================== Manila uses the `Gerrit`_ tool to review proposed code changes. The review site is http://review.openstack.org. Gerrit is a complete replacement for Github pull requests. `All Github pull requests to the Manila repository will be ignored`. See `Gerrit Workflow Quick Reference`_ for information about how to get started using Gerrit. See `Gerrit, Jenkins and Github`_ for more detailed documentation on how to work with Gerrit. .. _Gerrit: http://code.google.com/p/gerrit .. _Gerrit, Jenkins and Github: http://wiki.openstack.org/GerritJenkinsGithub .. _Gerrit Workflow Quick Reference: http://wiki.openstack.org/GerritWorkflow manila-2013.2.dev175.gbf1a399/doc/source/devref/scheduler.rst0000664000175000017500000000305012301410454023536 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Scheduler ========= The :mod:`manila.scheduler.manager` Module ---------------------------------------- .. automodule:: manila.scheduler.manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.driver` Module --------------------------------------- .. automodule:: manila.scheduler.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.simple` Driver --------------------------------------- .. automodule:: manila.scheduler.simple :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`scheduler_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.scheduler_unittest :noindex: :members: :undoc-members: :show-inheritance: manila-2013.2.dev175.gbf1a399/doc/source/devref/api.rst0000664000175000017500000000716312301410454022342 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. API Endpoint ============ Manila has a system for managing multiple APIs on different subdomains. Currently there is support for the OpenStack API, as well as the Amazon EC2 API. Common Components ----------------- The :mod:`manila.api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.api.cloud` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api.cloud :noindex: :members: :undoc-members: :show-inheritance: OpenStack API ------------- The :mod:`openstack` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api.openstack :noindex: :members: :undoc-members: :show-inheritance: The :mod:`auth` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api.openstack.auth :noindex: :members: :undoc-members: :show-inheritance: EC2 API ------- The :mod:`manila.api.ec2` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api.ec2 :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cloud` Module ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api.ec2.cloud :noindex: :members: :undoc-members: :show-inheritance: The :mod:`metadatarequesthandler` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api.ec2.metadatarequesthandler :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`api_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.api_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`api_integration` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.api_integration :noindex: :members: :undoc-members: :show-inheritance: The :mod:`cloud_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.cloud_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`api.fakes` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.api.fakes :noindex: :members: :undoc-members: :show-inheritance: The :mod:`api.test_wsgi` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.api.test_wsgi :noindex: :members: :undoc-members: :show-inheritance: The :mod:`test_api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.api.openstack.test_api :noindex: :members: :undoc-members: :show-inheritance: The :mod:`test_auth` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.api.openstack.test_auth :noindex: :members: :undoc-members: :show-inheritance: The :mod:`test_faults` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.api.openstack.test_faults :noindex: :members: :undoc-members: :show-inheritance: manila-2013.2.dev175.gbf1a399/doc/source/devref/rpc.rst0000664000175000017500000003212112301410454022345 0ustar chuckchuck00000000000000.. Copyright (c) 2010 Citrix Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. AMQP and Manila ============= AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two Manila components and allows them to communicate in a loosely coupled fashion. More precisely, Manila components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). Manila uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: .. image:: /images/rpc/arch.png :width: 60% .. Manila implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Manila service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Manila-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. Manila RPC Mappings ----------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Manila component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute, Volume or Network). Invokers and Workers do not actually exist in the Manila object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rcp.call operations. Figure 2 shows the following internal elements: * Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). * Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). * Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by Qpid or RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in Manila. * Direct Exchange: this is a routing table that is created during rpc.call operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each rpc.call invoked. * Queue Element: A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is 'topic' are shared amongst Workers of the same personality. .. image:: /images/rpc/rabt.png :width: 60% .. RPC Calls --------- The diagram below shows the message flow during an rp.call operation: 1. a Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. 3. once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. 4. once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as 'msg_id') and passed to the Invoker. .. image:: /images/rpc/flow1.png :width: 60% .. RPC Casts --------- The diagram below the message flow during an rp.cast operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /images/rpc/flow2.png :width: 60% .. AMQP Broker Load ---------------- At any given time the load of a message broker node running either Qpid or RabbitMQ is function of the following parameters: * Throughput of API calls: the number of API calls (more precisely rpc.call ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. * Number of Workers: there is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. The figure below shows the status of a RabbitMQ node after Manila components' bootstrap in a test environment. Exchanges and queues being created by Manila components are: * Exchanges 1. manila (topic exchange) * Queues 1. compute.phantom (phantom is hostname) 2. compute 3. network.phantom (phantom is hostname) 4. network 5. share.phantom (phantom is hostname) 6. share 7. scheduler.phantom (phantom is hostname) 8. scheduler .. image:: /images/rpc/state.png :width: 60% .. RabbitMQ Gotchas ---------------- Manila uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): * Hostname: The hostname to the AMQP server. * Userid: A valid username used to authenticate to the server. * Password: The password used to authenticate to the server. * Virtual_host: The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". * Port: The port of the AMQP server. Default is 5672 (amqp). The following parameters are default: * Insist: insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. * Connect_timeout: the timeout in seconds before the client gives up connecting to the server. The default is no timeout. * SSL: use SSL to connect to the server. The default is False. More precisely Consumers need the following parameters: * Connection: the above mentioned Connection object. * Queue: name of the queue. * Exchange: name of the exchange the queue binds to. * Routing_key: the interpretation of the routing key depends on the value of the exchange_type attribute. * Direct exchange: if the routing key property of the message and the routing_key attribute of the queue are identical, then the message is forwarded to the queue. * Fanout exchange: messages are forwarded to the queues bound the exchange, even if the binding does not have a key. * Topic exchange: if the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (".", like domain names), and two special characters are available; star ("") and hash ("#"). The star matches any word, and the hash matches zero or more words. For example ".stock.#" matches the routing keys "usd.stock" and "eur.stock.db" but not "stock.nasdaq". * Durable: this flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. * Auto_delete: if set, the exchange is deleted when all queues have finished using it. Default is False. * Exclusive: exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies auto_delete. Default is False. * Exchange_type: AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. * Auto_ack: acknowledgement is handled automatically once messages are received. By default auto_ack is set to False, and the receiver is required to manually handle acknowledgment. * No_ack: it disable acknowledgement on the server-side. This is different from auto_ack in that acknowledgement is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. * Auto_declare: if this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: * Delivery_mode: the default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: * 1 or "transient": the message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent": the message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. manila-2013.2.dev175.gbf1a399/doc/source/devref/services.rst0000664000175000017500000000456512301410454023417 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _service_manager_driver: Services, Managers and Drivers ============================== The responsibilities of Services, Managers, and Drivers, can be a bit confusing to people that are new to manila. This document attempts to outline the division of responsibilities to make understanding the system a little bit easier. Currently, Managers and Drivers are specified by flags and loaded using utils.load_object(). This method allows for them to be implemented as singletons, classes, modules or objects. As long as the path specified by the flag leads to an object (or a callable that returns an object) that responds to getattr, it should work as a manager or driver. The :mod:`manila.service` Module ------------------------------ .. automodule:: manila.service :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.manager` Module ------------------------------ .. automodule:: manila.manager :noindex: :members: :undoc-members: :show-inheritance: Implementation-Specific Drivers ------------------------------- A manager will generally load a driver for some of its tasks. The driver is responsible for specific implementation details. Anything running shell commands on a host, or dealing with other non-python code should probably be happening in a driver. Drivers should minimize touching the database, although it is currently acceptable for implementation specific data. This may be reconsidered at some point. It usually makes sense to define an Abstract Base Class for the specific driver (i.e. VolumeDriver), to define the methods that a different driver would need to implement. manila-2013.2.dev175.gbf1a399/doc/source/devref/share.rst0000664000175000017500000000262312301410454022667 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Shared Filesystems ====================== .. todo:: rework The :mod:`manila.share.manager` Module ------------------------------------- .. automodule:: manila.share.manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.driver` Module ------------------------------------- .. automodule:: manila.share.driver :noindex: :members: :undoc-members: :show-inheritance: :exclude-members: FakeAOEDriver Tests ----- The :mod:`share_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.share_unittest :noindex: :members: :undoc-members: :show-inheritance: manila-2013.2.dev175.gbf1a399/doc/source/devref/launchpad.rst0000664000175000017500000000321412301410454023521 0ustar chuckchuck00000000000000Project hosting with Launchpad ============================== `Launchpad`_ hosts the Manila project. The Manila project homepage on Launchpad is http://launchpad.net/manila. Launchpad credentials --------------------- Creating a login on Launchpad is important even if you don't use the Launchpad site itself, since Launchpad credentials are used for logging in on several OpenStack-related sites. These sites include: * `Wiki`_ * Gerrit (see :doc:`gerrit`) * Jenkins (see :doc:`jenkins`) Mailing list ------------ The mailing list email is ``openstack@lists.launchpad.net``. This is a common mailing list across the OpenStack projects. To participate in the mailing list: #. Join the `Manila Team`_ on Launchpad. #. Subscribe to the list on the `OpenStack Team`_ page on Launchpad. The mailing list archives are at https://lists.launchpad.net/openstack. Bug tracking ------------ Report Manila bugs at https://bugs.launchpad.net/manila Feature requests (Blueprints) ----------------------------- Manila uses Launchpad Blueprints to track feature requests. Blueprints are at https://blueprints.launchpad.net/manila. Technical support (Answers) --------------------------- Manila uses Launchpad Answers to track Manila technical support questions. The Manila Answers page is at https://answers.launchpad.net/manila. Note that the `OpenStack Forums`_ (which are not hosted on Launchpad) can also be used for technical support requests. .. _Launchpad: http://launchpad.net .. _Wiki: http://wiki.openstack.org .. _Manila Team: https://launchpad.net/~manila .. _OpenStack Team: https://launchpad.net/~openstack .. _OpenStack Forums: http://forums.openstack.org/manila-2013.2.dev175.gbf1a399/doc/source/devref/unit_tests.rst0000664000175000017500000001303512301410454023765 0ustar chuckchuck00000000000000Unit Tests ========== Manila contains a suite of unit tests, in the manila/tests directory. Any proposed code change will be automatically rejected by the OpenStack Jenkins server [#f1]_ if the change causes unit test failures. Running the tests ----------------- Run the unit tests by doing:: ./run_tests.sh This script is a wrapper around the `nose`_ testrunner and the `pep8`_ checker. .. _nose: http://code.google.com/p/python-nose/ .. _pep8: https://github.com/jcrocholl/pep8 Flags ----- The ``run_tests.sh`` script supports several flags. You can view a list of flags by doing:: run_tests.sh -h This will show the following help information:: Usage: ./run_tests.sh [OPTION]... Run Manila's test suite(s) -V, --virtual-env Always use virtualenv. Install automatically if not present -N, --no-virtual-env Don't use virtualenv. Run tests in local environment -s, --no-site-packages Isolate the virtualenv from the global Python environment -r, --recreate-db Recreate the test database (deprecated, as this is now the default). -n, --no-recreate-db Don't recreate the test database. -x, --stop Stop running tests after the first error or failure. -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added. -p, --pep8 Just run pep8 -P, --no-pep8 Don't run pep8 -c, --coverage Generate coverage report -h, --help Print this usage message --hide-elapsed Don't print the elapsed time for each test along with slow test list Because ``run_tests.sh`` is a wrapper around nose, it also accepts the same flags as nosetests. See the `nose options documentation`_ for details about these additional flags. .. _nose options documentation: http://readthedocs.org/docs/nose/en/latest/usage.html#options Running a subset of tests ------------------------- Instead of running all tests, you can specify an individual directory, file, class, or method that contains test code. To run the tests in the ``manila/tests/scheduler`` directory:: ./run_tests.sh scheduler To run the tests in the ``manila/tests/test_libvirt.py`` file:: ./run_tests.sh test_libvirt To run the tests in the `HostStateTestCase` class in ``manila/tests/test_libvirt.py``:: ./run_tests.sh test_libvirt:HostStateTestCase To run the `ToPrimitiveTestCase.test_dict` test method in ``manila/tests/test_utils.py``:: ./run_tests.sh test_utils:ToPrimitiveTestCase.test_dict Suppressing logging output when tests fail ------------------------------------------ By default, when one or more unit test fails, all of the data sent to the logger during the failed tests will appear on standard output, which typically consists of many lines of texts. The logging output can make it difficult to identify which specific tests have failed, unless your terminal has a large scrollback buffer or you have redirected output to a file. You can suppress the logging output by calling ``run_tests.sh`` with the nose flag:: --nologcapture Virtualenv ---------- By default, the tests use the Python packages installed inside a virtualenv [#f2]_. (This is equivalent to using the ``-V, --virtualenv`` flag). If the virtualenv does not exist, it will be created the first time the tests are run. If you wish to recreate the virtualenv, call ``run_tests.sh`` with the flag:: -f, --force Recreating the virtualenv is useful if the package dependencies have changed since the virtualenv was last created. If the ``requirements.txt`` or ``tools/install_venv.py`` files have changed, it's a good idea to recreate the virtualenv. By default, the unit tests will see both the packages in the virtualenv and the packages that have been installed in the Python global environment. In some cases, the packages in the Python global environment may cause a conflict with the packages in the virtualenv. If this occurs, you can isolate the virtualenv from the global environment by using the flag:: -s, --no-site packages If you do not wish to use a virtualenv at all, use the flag:: -N, --no-virtual-env Database -------- Some of the unit tests make queries against an sqlite database [#f3]_. By default, the test database (``tests.sqlite``) is deleted and recreated each time ``run_tests.sh`` is invoked (This is equivalent to using the ``-r, --recreate-db`` flag). To reduce testing time if a database already exists it can be reused by using the flag:: -n, --no-recreate-db Reusing an existing database may cause tests to fail if the schema has changed. If any files in the ``manila/db/sqlalchemy`` have changed, it's a good idea to recreate the test database. Gotchas ------- **Running Tests from Shared Folders** If you are running the unit tests from a shared folder, you may see tests start to fail or stop completely as a result of Python lockfile issues [#f4]_. You can get around this by manually setting or updating the following line in ``manila/tests/conf_fixture.py``:: FLAGS['lock_path'].SetDefault('/tmp') Note that you may use any location (not just ``/tmp``!) as long as it is not a shared folder. .. rubric:: Footnotes .. [#f1] See :doc:`jenkins`. .. [#f2] See :doc:`development.environment` for more details about the use of virtualenv. .. [#f3] There is an effort underway to use a fake DB implementation for the unit tests. See https://lists.launchpad.net/openstack/msg05604.html .. [#f4] See Vish's comment in this bug report: https://bugs.launchpad.net/manila/+bug/882933 manila-2013.2.dev175.gbf1a399/doc/source/devref/fakes.rst0000664000175000017500000000431612301410454022657 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Fake Drivers ============ .. todo:: document general info about fakes When the real thing isn't available and you have some development to do these fake implementations of various drivers let you get on with your day. The :mod:`manila.virt.fake` Module -------------------------------- .. automodule:: manila.virt.fake :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.auth.fakeldap` Module ------------------------------------ .. automodule:: manila.auth.fakeldap :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.testing.fake.rabbit` Module ------------------------------------------ .. automodule:: manila.testing.fake.rabbit :noindex: :members: :undoc-members: :show-inheritance: The :class:`manila.volume.driver.FakeAOEDriver` Class --------------------------------------------------- .. autoclass:: manila.volume.driver.FakeAOEDriver :noindex: :members: :undoc-members: :show-inheritance: The :class:`manila.tests.service_unittest.FakeManager` Class ---------------------------------------------------------- .. autoclass:: manila.tests.service_unittest.FakeManager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.tests.api.openstack.fakes` Module ------------------------------------------------ .. automodule:: manila.tests.api.openstack.fakes :noindex: :members: :undoc-members: :show-inheritance: manila-2013.2.dev175.gbf1a399/doc/source/devref/jenkins.rst0000664000175000017500000000273612301410454023233 0ustar chuckchuck00000000000000Continuous Integration with Jenkins =================================== Manila uses a `Jenkins`_ server to automate development tasks. The Jenkins front-end is at http://jenkins.openstack.org. You must have an account on `Launchpad`_ to be able to access the OpenStack Jenkins site. Jenkins performs tasks such as: `gate-manila-unittests`_ Run unit tests on proposed code changes that have been reviewed. `gate-manila-pep8`_ Run PEP8 checks on proposed code changes that have been reviewed. `gate-manila-merge`_ Merge reviewed code into the git repository. `manila-coverage`_ Calculate test coverage metrics. `manila-docs`_ Build this documentation and push it to http://manila.openstack.org. `manila-tarball`_ Do ``python setup.py sdist`` to create a tarball of the manila code and upload it to http://manila.openstack.org/tarballs .. _Jenkins: http://jenkins-ci.org .. _Launchpad: http://launchpad.net .. _gate-manila-merge: https://jenkins.openstack.org/view/Manila/job/gate-manila-merge .. _gate-manila-pep8: https://jenkins.openstack.org/view/Manila/job/gate-manila-pep8 .. _gate-manila-unittests: https://jenkins.openstack.org/view/Manila/job/gate-manila-unittests .. _manila-coverage: https://jenkins.openstack.org/view/Manila/job/manila-coverage .. _manila-docs: https://jenkins.openstack.org/view/Manila/job/manila-docs .. _manila-pylint: https://jenkins.openstack.org/job/manila-pylint .. _manila-tarball: https://jenkins.openstack.org/job/manila-tarball manila-2013.2.dev175.gbf1a399/doc/source/devref/development.environment.rst0000664000175000017500000001210512301410454026446 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Setting Up a Development Environment ==================================== This page describes how to setup a working Python development environment that can be used in developing manila on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with git. Refer to GettingTheCode_ for additional information. .. _GettingTheCode: http://wiki.openstack.org/GettingTheCode Following these instructions will allow you to run the manila unit tests. If you want to be able to run manila (i.e., launch VM instances), you will also need to install libvirt and at least one of the `supported hypervisors`_. Running manila is currently only supported on Linux, although you can run the unit tests on Mac OS X. See :doc:`../quickstart` for how to get a working version of OpenStack Compute running as quickly as possible. .. _supported hypervisors: http://wiki.openstack.org/HypervisorSupportMatrix Virtual environments -------------------- Manila development uses `virtualenv `__ to track and manage Python dependencies while in development and testing. This allows you to install all of the Python package dependencies in a virtual environment or "virtualenv" (a special subdirectory of your manila directory), instead of installing the packages at the system level. .. note:: Virtualenv is useful for running the unit tests, but is not typically used for full integration testing or production usage. Linux Systems ------------- .. note:: This section is tested for Manila on Ubuntu (12.04-64) and Fedora-based (RHEL 6.1) distributions. Feel free to add notes and change according to your experiences or operating system. Install the prerequisite packages. On Ubuntu:: sudo apt-get install python-dev libssl-dev python-pip git-core libmysqlclient-dev libpq-dev On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: sudo yum install python-devel openssl-devel python-pip git libmysqlclient-dev libqp-dev Mac OS X Systems ---------------- Install virtualenv:: sudo easy_install virtualenv Check the version of OpenSSL you have installed:: openssl version If you have installed OpenSSL 1.0.0a, which can happen when installing a MacPorts package for OpenSSL, you will see an error when running ``manila.tests.auth_unittest.AuthTestCase.test_209_can_generate_x509``. The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) or Mac OS X 10.7 (OpenSSL 0.9.8r) works fine with manila. Getting the code ---------------- Grab the code from GitHub:: git clone https://github.com/openstack/manila.git cd manila Running unit tests ------------------ The unit tests will run by default inside a virtualenv in the ``.venv`` directory. Run the unit tests by doing:: ./run_tests.sh The first time you run them, you will be asked if you want to create a virtual environment (hit "y"):: No virtual environment found...create one? (Y/n) See :doc:`unit_tests` for more details. .. _virtualenv: Manually installing and using the virtualenv -------------------------------------------- You can manually install the virtual environment instead of having ``run_tests.sh`` do it for you:: python tools/install_venv.py This will install all of the Python packages listed in the ``requirements.txt`` file into your virtualenv. There will also be some additional packages (pip, distribute, greenlet) that are installed by the ``tools/install_venv.py`` file into the virutalenv. If all goes well, you should get a message something like this:: Manila development environment setup is complete. To activate the Manila virtualenv for the extent of your current shell session you can run:: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running:: $ tools/with_venv.sh Contributing Your Work ---------------------- Once your work is complete you may wish to contribute it to the project. Add your name and email address to the ``Authors`` file, and also to the ``.mailmap`` file if you use multiple email addresses. Your contributions can not be merged into trunk unless you are listed in the Authors file. Manila uses the Gerrit code review system. For information on how to submit your branch to Gerrit, see GerritWorkflow_. .. _GerritWorkflow: http://wiki.openstack.org/GerritWorkflow manila-2013.2.dev175.gbf1a399/doc/source/devref/il8n.rst0000664000175000017500000000241012301410454022431 0ustar chuckchuck00000000000000Internationalization ==================== manila uses `gettext `_ so that user-facing strings such as log messages appear in the appropriate language in different locales. To use gettext, make sure that the strings passed to the logger are wrapped in a ``_()`` function call. For example:: LOG.debug(_("block_device_mapping %s"), block_device_mapping) If you have multiple arguments, the convention is to use named parameters. It's common to use the ``locals()`` dict (which contains the names and values of the local variables in the current scope) to do the string interpolation. For example:: label = ... sr_ref = ... LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals()) If you do not follow the project conventions, your code may cause the LocalizationTestCase.test_multiple_positional_format_placeholders test to fail in manila/tests/test_localization.py. The ``_()`` function is brought into the global scope by doing:: from manila.openstack.common import gettextutils gettextutils.install("manila") These lines are needed in any toplevel script before any manila modules are imported. If this code is missing, it may result in an error that looks like:: NameError: name '_' is not defined manila-2013.2.dev175.gbf1a399/doc/source/devref/database.rst0000664000175000017500000000327012301410454023330 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The Database Layer ================== The :mod:`manila.db.api` Module ----------------------------- .. automodule:: manila.db.api :noindex: :members: :undoc-members: :show-inheritance: The Sqlalchemy Driver --------------------- The :mod:`manila.db.sqlalchemy.api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.db.sqlalchemy.api :noindex: The :mod:`manila.db.sqlalchemy.models` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.db.sqlalchemy.models :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.db.sqlalchemy.session` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.db.sqlalchemy.session :noindex: :members: :undoc-members: :show-inheritance: Tests ----- Tests are lacking for the db api layer and for the sqlalchemy driver. Failures in the drivers would be detected in other test cases, though. manila-2013.2.dev175.gbf1a399/doc/source/devref/auth.rst0000664000175000017500000002014112301410454022521 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _auth: Authentication and Authorization ================================ The :mod:`manila.quota` Module ---------------------------- .. automodule:: manila.quota :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.auth.signer` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.auth.signer :noindex: :members: :undoc-members: :show-inheritance: Auth Manager ------------ The :mod:`manila.auth.manager` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.auth.manager :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`auth_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.auth_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`access_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.access_unittest :noindex: :members: :undoc-members: :show-inheritance: The :mod:`quota_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.quota_unittest :noindex: :members: :undoc-members: :show-inheritance: Legacy Docs ----------- Manila provides RBAC (Role-based access control) of the AWS-type APIs. We define the following roles: Roles-Based Access Control of AWS-style APIs using SAML Assertions “Achieving FIPS 199 Moderate certification of a hybrid cloud environment using CloudAudit and declarative C.I.A. classifications†Introduction ------------ We will investigate one method for integrating an AWS-style API with US eAuthentication-compatible federated authentication systems, to achieve access controls and limits based on traditional operational roles. Additionally, we will look at how combining this approach, with an implementation of the CloudAudit APIs, will allow us to achieve a certification under FIPS 199 Moderate classification for a hybrid cloud environment. Relationship of US eAuth to RBAC -------------------------------- Typical implementations of US eAuth authentication systems are structured as follows:: [ MS Active Directory or other federated LDAP user store ] --> backends to… [ SUN Identity Manager or other SAML Policy Controller ] --> maps URLs to groups… [ Apache Policy Agent in front of eAuth-secured Web Application ] In more ideal implementations, the remainder of the application-specific account information is stored either in extended schema on the LDAP server itself, via the use of a translucent LDAP proxy, or in an independent datastore keyed off of the UID provided via SAML assertion. .. _auth_roles: Roles ----- AWS API calls are traditionally secured via Access and Secret Keys, which are used to sign API calls, along with traditional timestamps to prevent replay attacks. The APIs can be logically grouped into sets that align with five typical roles: * Base User * System Administrator/Developer (currently have the same permissions) * Network Administrator * Project Manager * Cloud Administrator/IT-Security (currently have the same permissions) There is an additional, conceptual end-user that may or may not have API access: * (EXTERNAL) End-user / Third-party User Basic operations are available to any : * Describe Instances * Describe Images * Describe Volumes * Describe Keypairs * Create Keypair * Delete Keypair * Create, Upload, Delete: Buckets and Keys (Object Store) System Administrators/Developers/Project Manager: * Create, Attach, Delete Volume (Block Store) * Launch, Reboot, Terminate Instance * Register/Unregister Machine Image (project-wide) * Request / Review CloudAudit Scans Project Manager: * Add and remove other users (currently no api) * Set roles (currently no api) Network Administrator: * Change Machine Image properties (public / private) * Change Firewall Rules, define Security Groups * Allocate, Associate, Deassociate Public IP addresses Cloud Administrator/IT-Security: * All permissions Enhancements ------------ * SAML Token passing * REST interfaces * SOAP interfaces Wrapping the SAML token into the API calls. Then store the UID (fetched via backchannel) into the instance metadata, providing end-to-end auditability of ownership and responsibility, without PII. CloudAudit APIs --------------- * Request formats * Response formats * Stateless asynchronous queries CloudAudit queries may spawn long-running processes (similar to launching instances, etc.) They need to return a ReservationId in the same fashion, which can be returned in further queries for updates. RBAC of CloudAudit API calls is critical, since detailed system information is a system vulnerability. Type declarations ----------------- * Data declarations – Volumes and Objects * System declarations – Instances Existing API calls to launch instances specific a single, combined “type†flag. We propose to extend this with three additional type declarations, mapping to the “Confidentiality, Integrity, Availability†classifications of FIPS 199. An example API call would look like:: RunInstances type=m1.large number=1 secgroup=default key=mykey confidentiality=low integrity=low availability=low These additional parameters would also apply to creation of block storage volumes (along with the existing parameter of ‘size’), and creation of object storage ‘buckets’. (C.I.A. classifications on a bucket would be inherited by the keys within this bucket.) Request Brokering ----------------- * Cloud Interop * IMF Registration / PubSub * Digital C&A Establishing declarative semantics for individual API calls will allow the cloud environment to seamlessly proxy these API calls to external, third-party vendors – when the requested CIA levels match. See related work within the Infrastructure 2.0 working group for more information on how the IMF Metadata specification could be utilized to manage registration of these vendors and their C&A credentials. Dirty Cloud – Hybrid Data Centers --------------------------------- * CloudAudit bridge interfaces * Anything in the ARP table A hybrid cloud environment provides dedicated, potentially co-located physical hardware with a network interconnect to the project or users’ cloud virtual network. This interconnect is typically a bridged VPN connection. Any machines that can be bridged into a hybrid environment in this fashion (at Layer 2) must implement a minimum version of the CloudAudit spec, such that they can be queried to provide a complete picture of the IT-sec runtime environment. Network discovery protocols (ARP, CDP) can be applied in this case, and existing protocols (SNMP location data, DNS LOC records) overloaded to provide CloudAudit information. The Details ----------- * Preliminary Roles Definitions * Categorization of available API calls * SAML assertion vocabulary System limits ------------- The following limits need to be defined and enforced: * Total number of instances allowed (user / project) * Total number of instances, per instance type (user / project) * Total number of volumes (user / project) * Maximum size of volume * Cumulative size of all volumes * Total use of object storage (GB) * Total number of Public IPs Further Challenges ------------------ * Prioritization of users / jobs in shared computing environments * Incident response planning * Limit launch of instances to specific security groups based on AMI * Store AMIs in LDAP for added property control manila-2013.2.dev175.gbf1a399/doc/source/devref/index.rst0000664000175000017500000000307212301410454022673 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Developer Guide =============== In this section you will find information on Manila's lower level programming APIs. Programming HowTos and Tutorials -------------------------------- .. toctree:: :maxdepth: 3 development.environment unit_tests addmethod.openstackapi Background Concepts for Manila ---------------------------- .. toctree:: :maxdepth: 3 architecture threading il8n rpc Other Resources --------------- .. toctree:: :maxdepth: 3 launchpad gerrit jenkins API Reference ------------- .. toctree:: :maxdepth: 3 ../api/autoindex Module Reference ---------------- .. toctree:: :maxdepth: 3 services database share auth api scheduler fakes manila Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` manila-2013.2.dev175.gbf1a399/doc/source/devref/architecture.rst0000664000175000017500000000501612301410454024246 0ustar chuckchuck00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila System Architecture ======================== The Manila Shared Filesystem Management Service is intended to be ran on one or more nodes. Manila uses a sql-based central database that is shared by all Manila services in the system. The amount and depth of the data fits into a sql database quite well. For small deployments this seems like an optimal solution. For larger deployments, and especially if security is a concern, manila will be moving towards multiple data stores with some kind of aggregation system. Components ---------- Below you will a brief explanation of the different components. :: /- ( LDAP ) [ Auth Manager ] --- | \- ( DB ) | | | [ Web Dashboard ]- manilaclient -[ api ] -- < AMQP > -- [ scheduler ] -- [ share ] -- ( shared filesystem ) | | | | | < REST > * DB: sql database for data storage. Used by all components (LINKS NOT SHOWN) * Web Dashboard: potential external component that talks to the api * api: component that receives http requests, converts commands and communicates with other components via the queue or http * Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system. * scheduler: decides which host gets each volume * share: manages shared filesystems. manila-2013.2.dev175.gbf1a399/doc/source/man/0000775000175000017500000000000012301410516020327 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/man/manila-manage.rst0000664000175000017500000001765512301410454023567 0ustar chuckchuck00000000000000=========== manila-manage =========== ------------------------------------------------------ control and manage cloud computer instances and images ------------------------------------------------------ :Author: openstack@lists.launchpad.net :Date: 2012-04-05 :Copyright: OpenStack LLC :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== manila-manage [] DESCRIPTION =========== manila-manage controls cloud computing instances by managing manila users, manila projects, manila roles, shell selection, vpn connections, and floating IP address configuration. More information about OpenStack Manila is at http://manila.openstack.org. OPTIONS ======= The standard pattern for executing a manila-manage command is: ``manila-manage []`` For example, to obtain a list of all projects: ``manila-manage project list`` Run without arguments to see a list of available command categories: ``manila-manage`` Categories are user, project, role, shell, vpn, and floating. Detailed descriptions are below. You can also run with a category argument such as user to see a list of all commands in that category: ``manila-manage user`` These sections describe the available categories and arguments for manila-manage. Manila Db ~~~~~~~ ``manila-manage db version`` Print the current database version. ``manila-manage db sync`` Sync the database up to the most recent version. This is the standard way to create the db as well. Manila User ~~~~~~~~~ ``manila-manage user admin `` Create an admin user with the name . ``manila-manage user create `` Create a normal user with the name . ``manila-manage user delete `` Delete the user with the name . ``manila-manage user exports `` Outputs a list of access key and secret keys for user to the screen ``manila-manage user list`` Outputs a list of all the user names to the screen. ``manila-manage user modify `` Updates the indicated user keys, indicating with T or F if the user is an admin user. Leave any argument blank if you do not want to update it. Manila Project ~~~~~~~~~~~~ ``manila-manage project add `` Add a manila project with the name to the database. ``manila-manage project create `` Create a new manila project with the name (you still need to do manila-manage project add to add it to the database). ``manila-manage project delete `` Delete a manila project with the name . ``manila-manage project environment `` Exports environment variables for the named project to a file named manilarc. ``manila-manage project list`` Outputs a list of all the projects to the screen. ``manila-manage project quota `` Outputs the size and specs of the project's instances including gigabytes, instances, floating IPs, volumes, and cores. ``manila-manage project remove `` Deletes the project with the name . ``manila-manage project zipfile`` Compresses all related files for a created project into a zip file manila.zip. Manila Role ~~~~~~~~~ ``manila-manage role add <(optional) projectname>`` Add a user to either a global or project-based role with the indicated assigned to the named user. Role names can be one of the following five roles: cloudadmin, itsec, sysadmin, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. ``manila-manage role has `` Checks the user or project and responds with True if the user has a global role with a particular project. ``manila-manage role remove `` Remove the indicated role from the user. Manila Logs ~~~~~~~~~ ``manila-manage logs errors`` Displays manila errors from log files. ``manila-manage logs syslog `` Displays manila alerts from syslog. Manila Shell ~~~~~~~~~~ ``manila-manage shell bpython`` Starts a new bpython shell. ``manila-manage shell ipython`` Starts a new ipython shell. ``manila-manage shell python`` Starts a new python shell. ``manila-manage shell run`` Starts a new shell using python. ``manila-manage shell script `` Runs the named script from the specified path with flags set. Manila VPN ~~~~~~~~ ``manila-manage vpn list`` Displays a list of projects, their IP prot numbers, and what state they're in. ``manila-manage vpn run `` Starts the VPN for the named project. ``manila-manage vpn spawn`` Runs all VPNs. Manila Floating IPs ~~~~~~~~~~~~~~~~~ ``manila-manage floating create [--pool ] [--interface ]`` Creates floating IP addresses for the given range, optionally specifying a floating pool and a network interface. ``manila-manage floating delete `` Deletes floating IP addresses in the range given. ``manila-manage floating list`` Displays a list of all floating IP addresses. Manila Flavor ~~~~~~~~~~~ ``manila-manage flavor list`` Outputs a list of all active flavors to the screen. ``manila-manage flavor list --all`` Outputs a list of all flavors (active and inactive) to the screen. ``manila-manage flavor create <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>`` creates a flavor with the following positional arguments: * memory (expressed in megabytes) * vcpu(s) (integer) * local storage (expressed in gigabytes) * flavorid (unique integer) * swap space (expressed in megabytes, defaults to zero, optional) * RXTX quotas (expressed in gigabytes, defaults to zero, optional) * RXTX cap (expressed in gigabytes, defaults to zero, optional) ``manila-manage flavor delete `` Delete the flavor with the name . This marks the flavor as inactive and cannot be launched. However, the record stays in the database for archival and billing purposes. ``manila-manage flavor delete --purge`` Purges the flavor with the name . This removes this flavor from the database. Manila Instance_type ~~~~~~~~~~~~~~~~~~ The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from manila-manage flavor can be used. Manila Images ~~~~~~~~~~~ ``manila-manage image image_register `` Registers an image with the image service. ``manila-manage image kernel_register `` Registers a kernel with the image service. ``manila-manage image ramdisk_register `` Registers a ramdisk with the image service. ``manila-manage image all_register `` Registers an image kernel and ramdisk with the image service. ``manila-manage image convert `` Converts all images in directory from the old (Bexar) format to the new format. Manila VM ~~~~~~~~~~~ ``manila-manage vm list [host]`` Show a list of all instances. Accepts optional hostname (to show only instances on specific host). ``manila-manage live-migration `` Live migrate instance from current host to destination host. Requires instance id (which comes from euca-describe-instance) and destination host name (which can be found from manila-manage service list). FILES ======== The manila-manage.conf file contains configuration information in the form of python-gflags. SEE ALSO ======== * `OpenStack Manila `__ * `OpenStack Swift `__ BUGS ==== * Manila is sourced in Launchpad so you can view current bugs at `OpenStack Manila `__ manila-2013.2.dev175.gbf1a399/doc/source/_theme/0000775000175000017500000000000012301410516021015 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/_theme/theme.conf0000664000175000017500000000012212301410454022762 0ustar chuckchuck00000000000000[theme] inherit = sphinxdoc stylesheet = sphinxdoc.css pygments_style = friendly manila-2013.2.dev175.gbf1a399/doc/source/_theme/layout.html0000664000175000017500000000706612301410454023232 0ustar chuckchuck00000000000000{% extends "sphinxdoc/layout.html" %} {% set css_files = css_files + ['_static/tweaks.css'] %} {% set script_files = script_files + ['_static/jquery.tweet.js'] %} {% block extrahead %} {% endblock %} {%- macro sidebar() %} {%- if not embedded %}{% if not theme_nosidebar|tobool %}
{%- block sidebarlogo %} {%- if logo %} {%- endif %} {%- endblock %} {%- block sidebartoc %} {%- if display_toc %}

{{ _('Table Of Contents') }}

{{ toc }} {%- endif %} {%- endblock %} {%- block sidebarrel %} {%- if prev %}

{{ _('Previous topic') }}

{{ prev.title }}

{%- endif %} {%- if next %}

{{ _('Next topic') }}

{{ next.title }}

{%- endif %} {%- endblock %} {%- block sidebarsourcelink %} {%- if show_source and has_source and sourcename %}

{{ _('This Page') }}

{%- endif %} {%- endblock %} {%- if customsidebar %} {% include customsidebar %} {%- endif %} {%- block sidebarsearch %} {%- if pagename != "search" %}

Psst... hey. You're reading the latest content, but it's for the Block Storage project only. You can read all OpenStack docs too.

{%- endif %} {%- if pagename == "index" %}

{{ _('Twitter Feed') }}

{%- endif %} {%- endblock %}
{%- endif %}{% endif %} {%- endmacro %} manila-2013.2.dev175.gbf1a399/doc/source/images/0000775000175000017500000000000012301410516021021 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/0000775000175000017500000000000012301410516021605 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/rabt.svg0000664000175000017500000010200712301410454023257 0ustar chuckchuck00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.27 key: topic key: topic Sheet.28 key: topic.host key: topic.host Sheet.26 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.58 Direct Consumer DirectConsumer Sheet.59 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.56 Sheet.60 Sheet.62 RabbitMQ Node (single virtual host context) RabbitMQ Node(single virtual host context) manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/flow1.svg0000664000175000017500000010610212301410454023357 0ustar chuckchuck00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.56 Direct Consumer DirectConsumer Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.60 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.64 rpc.call (topic.host) rpc.call(topic.host) Sheet.63 Sheet.66 Sheet.67 Sheet.68 manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/arch.png0000664000175000017500000006410212301410454023234 0ustar chuckchuck00000000000000‰PNG  IHDRpó ß HsRGB@À}Å pHYsÄÄ•+tEXtSoftwareMicrosoft Officeí5qgÂIDATxÚí½ ”\gy 9gâ3¶q[j[’­à X×hqÂ()Xãœ)Ï´~ÂØ=òð³:3mV±ƒqhÛêV9ƒ•x³Ty2^´ò˜‡°)Ea| #p)SK43:vŽâÚ šìY¬9KHÛ0±w©½où{›·¿þî­¿[?·ê©sžÓU÷·ºïßÓï÷}ïû·~í×~ío@và€ÀÀT \ô*Š…¶!ë9à€À Gàe#qU¡Ëm, ½îŸ¸î®˜–˜qŸ æ}Î}Λågt¾ûœwËä¼í®Y×½oôõ@àÖÊXÃÉ–Fäê.ʶàÞëôŠ[§½sï+n^É-;ï¦Ûuõç¢ÛW‰“¸î®nhxbVñ–ñ>çMókÞ6‰ºÏu÷¾®9'‡‹º N@ຸלé7–ltÌ—-'e#pò¾é¶¹J’¨!p€Àõ&p…˜y½\Ý[¦³n nðWv}Ü ®ÿZÝ_Îô—+¸e+uËfzCûÉ p \Éökóæ­Ëïæä¬dÓ†DoÝ2 ëÎ3ˆ¸áK`¹×NàÜ|êŸ7lsÕê¦8ý‚ÖFuI{+¾À¹ù“Ü7ÇÁn°çSòæÏxŸó^¿9[ŠkÁ~@à#p÷¾ˆÈ5¼å뮹Ô~ð°ÊÁnç>KD­Þ¥À•Íü%¹¢À¹iUÓ¿­ìú·¬Üy˹mÌ»eVûÏ pƒ¸’íãæ¦å½~p‹ÚßÍL[Íç$N…nž p€À p€À@ÖÎ¥Yô¦uT¨>íböîs屺)Íù†À7«xÕJ^¸ŠKî[UÑsïuݪ·ì¼{_Ô* n›º Ÿw9æªn½’I LMU@à®àU_°"UÕ&V'dZ‰¡`ÞKõ…œB÷¾ìD¬d¦åÜòE· ­â7É€ÒŽîL¢ÀiI¬¼'p@áú‚'peI+º÷«bgÏÖ[Õm‘¿ºÛGޏsïç@% \«ô–'pyÓ ªÑ¼ymZíRà*NËœ@€Àµ8÷¹¢Òfä«èÞÏ89› ÈW+rfDlµ¨½Û¦­£Zuͤ…¸¾wv¿\²ÀÍxWp2UÕ蜛žóú»ULÄ­ 2ç žn£l–«Æ\‘~p0×Lj&Œ†™™Ë¾Äyt+pGÞÿÁÛ›ñ}†Ìg?÷‰æWÌžá<8@à8@à˜‹«=jò´-ºQ£:‚´¤©AÜrU?Õ‡[g‘ƒ p€À æáTôÓ}¸é¶"CÝT^(zùÝ~ª Ò pƒ@Õµ@½‘ºº‘1_ÐÌü†[¿‚À pÃ{@-zV1¶ÀUL"Þ†‰ÊÍ#p€ÀÜðR˜÷%—½a°•¦iµáª8 p€ÀÜRU'a æÐz’øi.Sˆ8@à†ð*š:¥ù^Î}®Ûz© p€À öA7¢´Ë»i p€À7„•ÈWΛ6c#ru 1Û™áà p£{€muM¬Ê6, p£áÈ}KÍ;-öµož­¯‘¤~·‡À7¸‡Ð¬°ey(]y啟߼yó™M›6u#Mƒlܸñ…BaEÙ²eË…¤åe{²ÝˆÇe?Ý÷VN @àzgÛ«_ÕºÎ|辞ÖYÛ½÷†5Ÿe›ÀN¢c{å¡ Ô“³³³ß–›ý¥—^ú#°C‡½xï½÷6}ôÑf­Vkž>}º™æK¶'Û}ì±Çš²A÷-2(ßçòË/ÿN$‚§Üí‹ØÎÉ\2"m"[ïÜ¿¯ù†7îèi²ž]²À¹¦Í‘¤ý®D¼DŒ$:výõׯˆ4?~¼yöìÙæ8¾Î;×|â‰'Zr·k×®g¯¹æšç4z'‘Áèý4Õ·‰œ "6r½ÈO¿iôÄãŸY·žNûò©êªÀé4+p"ˆò¹×è\üÃdçÅ_\¾âŠ+¾%Ѭýû÷_8vìX+â5 /‰ÞIdðÖ[oýȨDgff>&QEN&˜f;ýÔ—Z¢¥r¥‘8¿yÔ¦‰¨‰ìÉOY^æ "q"tºŽŠ Ë÷áCà{éáq‰DÙ¤?Y$n?¼îºëVxàæ™3g&BØÚ½$‚xôèѦD/ºè¢¥YØEçf9¹`šîÝï¹eœ…d­ÀÅ5¡Ê|aJÈ4<¸îÛ¥itffæ{e“þd+++S!mq¯çŸ¾Õ,,ѹ 6|ß5µîä$ƒi8?â¦9+^½ \¨œL 5Ç"p€À…¤˜ô “¦Ñi—¶$™“¦V‰Hº•»„&Qà´Ï›mæ 5u"pC8‰)#3%Ú–ö¨Ðihf•¨ÜÆ¿;33ó+œt0i'D¨D¶,Ò¬j3 pC8I*#He$¦ŒÌäÕûëüùóÍ|Ïåµ#- LŒÀ©¼ÅÍÓ¼n!Óè’Àmذᖫ¯¾ú¯&e鸼$‚¹mÛ¶ïF"w˜².pÒÇMdJú¼…æëÈR™¯²¦ýâ4mˆ8æ…Òˆ pm.—˽÷ÆoükéãváÂ…æý÷ßß|æ™gÖɈLöK„RöK'ßyœ_ÒGîŽ;îxîšk®yˆ“²,p~ÄÌG$ÍFèt´ª¢¨2öñG~kužLCà:8‰ ½ýíoÿ®ÊÆSO=Õúùæ›o^'"×^{íÐåG-îûÈ4™'ß9 ¯C‡­ qeë•QË%pRtçÎß‘‘/p"k?üpGŠÖuòzúé§;8Ùohß2íúë¯ \;©KúÎIë¶ûÎíæK¤Sš«9ašn@à`lN*(øIxE^DŒ>ýéO·~ZÑñ%J"`"PŠŠÏm·Ýּ뮻ÖÉ–lKÇ®#ÓOœ8‘(pº¬|'}É{Ù/pö;ɶí÷ÐÏ*~‚ýý’ÖÕ¿‹Î—em“²ÝnÒï$ƒd„*iFCàº8)%BÑ'5$ÛtiN¦Ë|}I´NäEû¦ÙeE¶tž/>*híN¶o¿‹¼—ýXó—ñ¿‡¼·ßÙ~—NÖU”>w²_]W?í‹§ø(Ÿ¤‘\qœ€À!pÝ Üò¡C‡^L8Û”ê ßÔ(Ó´ QÞëÀ#=Ó&LY_"W*p²?™¦X“åýýË+© U¾wÒw–m‡føëÚ¿Ih]•YûÝýˆ¡}I9®Wnúý‡kE€nx厛>¹øÁ÷!TÀ0ÿqÜ*]®`dÌúwäÞ{ïm& œ¼¤)PÅ,$P¾ÀYñ©ñÅK›fm3¤ œíï¦ëØù*EÚ|ê œß<«ÛëDà’ÖU!‹8m6µëkÔ0ô:ö;Ÿjþý[çüž¥Z  ~þöÏœû¥;¡Bà†Æì솯¾ú5¯ºð3¯Ý —«®Úü¼øš/pûçççŸk'p¶)Õ—!?µ‡m6TqS‘³ËØþa¶é5© Õ6s é³'Óm¿µtÆ \Òºþvôo·®ŠoÂhÔ%ÊhBEàÆ9ïµÂ ¹ß‡nVаÛ¨q§M©vº6}ª°ùýåTl|ѳM²Ú‡­SÓõí÷ðNeQ¾WH:“.i]ÛLªÍ«º®ö—ÓßSçÇõ“Ú²Tg€Q œ$Ü•|nq4É®,#¹Ý¸‘"p€Àq®ºêªß<|øð? F(çšD”üév„¨²%­#Ó´ÉQæ…–±/‘!»mùÒÏÓï®Û–¦V­³ïuÛ*“íÖµÒ*?û=ìï×/O^<òÈ Ñß½ÊMF)pZ!”4Wxð¡ûZómÕÆDà©}úÄO4y%¿ü&R‘»¸>nq/IÙÉ[ƒ"0'ò&‚аIôM@à8nLNšRs¹ÜŸ>þøãƒ¦Å¿ì  Í?×ÍKj¢^sÍ5gu4 À¨N‘rWþ|‘;ÂY“›¸6¿ ZçT‘&Ùož­·ÖÕeä½]Fê¤Ê>íþýÚªv?²M‘L¿¹×.#?­ˆÊwÐõDDe¾LCฉ8÷P¸äꫯþÚ‡>ô¡u}âxýä%ͪ"pqéAâ^÷Ýwßó[·ný&òã&pZ›Ôo>ÕÚ£Và¤n©|9Ò:§"HVüd»2M–‘ee•85]Gæ  ÔmøûÑ‚÷v•K]F~Ê|}ÈÈwÖïÒ®~+€ÀeVà”ÙÙÙ#Û¶mûîñãDZµ^2¨áºë®[Ù´iÓoÓl ã(p*T6z¥rå œF²üˆ›+•¦Ð¾ty¾¿¨ùƒ.üýø9ÆÙm†¢‹7qçÛ7oÞüd¡PX¡o\o/i.ݵk׳ҿ0ú{îäã*púÞŠŽÈ‘ˆ/V>2½±ÒæÒ¸mHP÷£Bé?,ä³îG¿—Dáä½¢Q9‘Ñvß@à&Nà̃¢¸eË–ÓŽ;Ö\YYÁÌ^Òôüè£6_ûÚ×>»iÓ&éë¶ ² p¶Uû®…"cÚŸL›3-IçGÝTú«"/mÕíéÚIÛAฉ8óÀØ6;;û»333ßÛ¿ÿ‰.ñúÉëìÙ³­Ú¦’SïÊ+¯ü|ô÷ÚÁY8ÛŒ*‚¦BM›‚•¢Pj’À©¼I?7¸`÷#ßAÞ'Eàô½?ð!©Y@à¦Nà̃㒈]’¨œÈÜc=6u‘9‰´IA‘6IÈÉí·¥0= «gûY1²d›-»í›fNæûýÛüÑ®íúÀ©p†FÀʺ2Ú@ธ¨œÈÜæÍ›¿øâ‹(͆RjR£sò{=ðÀ­ ]tÑ‹ÒGÐIÕ`"Nû¡Ùi¾is§ ŽBíFà´ V"pÚÿMóÑéÂ…j›\m¿:YF[èv´/€À!p=TvHmO×÷«U*J:ñß{ï½MqîܹLˆÚùóç[#Gå{KXSù}ä÷ŠDµÌ€˜)²Q.‰j‰d‰<é4iδ¥´D†4凊š.£7wÛkåÐîËö£“é²oùjeIKyÉtù^~º›‚Ä/ æwCà:{Èl—Nü²Sqùå—GDHF¶Š‰ "K‚ˆÓ0^ÒÜ«û”ˆš|i –ï%‘µË.»ì97rôHÄ~ú³Á¤ Ü8£bèO‹+û5,8@ઉ¸„‡Oщ‘Ò‘%AÄIïÒK/ý‘È”E#y"‚èocãÆ?íKs¯îÓEÔä{pß‹ËûªDòDàb¾Ç¢îÓ߇À p€ÀÁT \ô*;I*8™ÊyÓênZÕ}^t¸º[·à„lÞ‰V%°²›WÖœ›>o¾[Õ}¶Óêî³,[Gà88˜F«›i¥81Ras2VM¸‚ ™Y¯î¶[uÛ(鲡&T'sÁ«xÛÎ{ß—€Àœ‹ŠÙió®Y´â„«èËW‡·`¶QpŸÛ \ÕÛvCà8@à{IÚÊNÔêiÚŒ©òUéEàÌüy·í²i¶Íû§ßщޢ׬ª}åhBEà8@à`ª.oßûi<üiö³®ë§ñ¶Yˆ›ø<ã-_ðæûß­à¾iD88@à8ãd@à8@à88ˆ¸7½¹Ðº¨³Èmº·ù+üåL~w¹ #p€À½Ä‡›»÷Þ¸Ì;÷ïk¾û=·t´½#÷-µÝÇÉ e›•‹9«Üð?Z™}ÅëÊðïPä<îÍ/ŸªŠX4|è¾Äù¹îT·½úUãÈî¥Ú¹½÷Ô¶ñ·È¶À oxãŽØ¨™DÞº2† pÒì)Òuú©/­›'ÓýæS‰Ö‰ð‰øÉ¼ož­Ç œ|ö£w²?AÞ˺²Œü”mÉ6¥ÉV¿‹æ·¤ïÀ p-p*j*UvÀ/v"L*u"^"Ov_àä½Ló#~‚¼?ñøgZ˨¤éúºŒö¿“i6J(ÓCßcP‡Àc'p"D*UVÖ¬4IÄ+ÔÎ6Áö"p²MåÓýX¡Ô(¡ŠeÜ÷Eê8`"N¥H-Xù²ƒB’çK[¯'?u~Ü42Í¢Q¹A Ü+®yyÇ£q8@à`àç÷w³¯xRàd]™/ŸC RàzM‘‚À€À +_¡æH00jëvd,M¨ÀÄ œ D1úø#¿Õúi›S“F«Z± œß쨃z8iÖÕ&Tÿ{И*³£LC‘6=“>sÚïÌ(ðN·'Ò%Ëh?µ~N¿‡nWæ‰$Ê:"Ÿ0U'$r”$B*ašÚÃŽ •ҲˋàÉ2)“(ŸìÏFûâ¦%m·ÝwFà8€‰8@àCà@à*@à88˜f“d¼~¢^@à€18M´+h=ÑAU/IL#7›-Çå' Fà8€‰8­^ $Õ -KWy!-ñBà8@à¸"o¶ä•E¦É¼4d Cà€”NË^ÅÍ—òV¶Oœ–ÔÒæV¿ ½lKk¡ê2¶)Væk­îW~ÚíJô¯“}µ8Û,,Èï¢ó´—F…^úþ!p€ÀÀPNú¢u ÒˆœHš–/€¶¶©,cבùZ|^dL‹Ò[Ñ“éò½:ÝWœÀÉvlQ{ÝžJœLS‘”e}9Dà€±8•˜Pói\´ÎؠͬºPDO¢h¶//¡u:ÝWHàTL5’çGäìïn£r pü-Æ^à¤É°S‰Ñe5j×”ŠèÉçvg?÷²/+g*zU“éŠ|ÖíªÀ…öÀc+p*AI͇Ú™$ p p™8m¶TÑ4GÒr"EþV™çO“Ï~6X·N·û’Ÿ¾Ê4ù=ô÷±ëól½µ¼üDàCà8 “ p‡À pS#p[_~õ ò|é×7{CàCà8¢ÀmÜxù)y¶ôÁ%€ÀÁNþþ©<û9™8@à88@à8@à8@àCà8@à8˜B®ù»oüâýåÉ} ×ÜríÈ®»¿òãƒ÷ÿ/Üç8@à8@àÆ™—Nn®·³»—žüþÜ]ÿîE`Œ™[:õîèZû«ˆ¦0w×—8®#¹(E4õˆ.(`ðQ·]KµÓÑuö¬Ê[Kà>ôÅä!îó\¢X´Í¸Ï9'ry..nlî!Û7nÜð+®˜=3É\}õ¦÷N͵uwmo+êvè'â†À!pÝÜ#*Þ´ó~>¢ê(™é•ˆ²›^±Q7½h¢{ºþ¼›–wûÕuóf™*ëî!v¾å?üìç>ÑœT~õÞ;›"q“~,ç>rò’躪F¬øâ†À!pÝÞª.êVñäM£s9#m î}ÃIXA¦Y ”uŒ¼ULd¯î~ÜúóFÞTî¬("p€À!pð’ÀÝôOv_˜äëY$nN®§èºúVÄ…X»óK/ÜûÑrŸGà:º9بXÃ}ÖèYIeÌÈYÃ[¿nL¥­îÖ)yÛ*èò&bWwó&²éCà8@à¸5‘¸¥Ú‡cî®/½øëý÷y®mjΛVH—²#p'oUÓ|Z÷¶UrQ½5g¢}e'e8¸‰¸Ãµ×ÏM¡èWqb·`û¤©„Åœ6·Ö½íøýæŠ\Å4ÓæümO¯ÞùÏ/îBÿ¸tªùÕÿô§ p7>¸ëï×§^hý<ô•ÿïo_ôwšÑq‡ð²—½ìcc/pF 4…HÃëϦ͛U¯?\#°ºFèÌ´ªYW›V žÎ{û˜¸4&¯ßwäÔ§NžkŽêuç±§š_<ýu8#vªUö,Ÿú'pgö.ծ߽üäã’Ì—ó~®Û!Þ H‚À!p€À!pÜ0"p/F='¹àBó8ï8@à8@à8nü®­ƒG9¿8@à8@à8.hÝÓ¹¥“³œß p p—ëê¨Dà8·8@à8@à8.ÌÝSÛ.èç†Àõ{S˜ñGŽÂøÜ3Ï<Ó¼í¶Ûš7ß|s‹OúÓ‡À!p\¶¯©ãû8¯¸~o ë’êÂxœÈ›H[­V[#s÷ß?‡À!p—ÅèÛK¹ß¦*U 7“hœû)e´ Þ2¹Àz±Óu;fºÖ@Í{ËçCÓ¸fKÔTÞìëúë¯Gà8Cà²y=‰ãœFàÒ¸’)U1IxµÄ•&ñµ%²¦tVÅQ¶U\E‡º)ÍU6û«˜ýÎ#pkî©§žZ7]¢r‡À!p\殥}Ò|ÊùŒÀ JàÊf^ü¯k¤ÌŠ–¹¼WkÁ¾÷¢qu‘[S¹a’£p½œôwó›KEè8Cà¸lá’öž‘ œÏÜ ®”$pf¹ª)½UrµSë1ÛmP¬ºÏUnýKú¼I“©ˆœ¾úé§8Cà¸l]G$íEàF+pNÔ*œ_ ¾è ܌ݧýéÞ/Nò`Š~ÒˆÈà‰¼…šS8Cà¸1¾-œ•¤½¡’Y€À ]àÜûyÀ™eJNÞªf»e³Î¢™®ýåtzk¶"liS¤ÉTß÷}CàCฑ^C$íEàrSXÍçÄ«h敼ÙŒy¯Í¦«}ât[Žb O\É1ãm·4é¹èºÀI´MšM~øá5)EˆÀ!p‡ÀeW2ëI{¸q¾¹è „]#Ap§£P/\¸°úYd®×fT8ÉõóXÄ~ÎanÜo0ó&õH‰ƒ×ŸÀIs©DÝDä"p‡À!pÙ¤½ÀaJ.­  p7|ö,Õj{׸?!p0'©C®½öÚÖ†^ë "p€À!pÜЯ’ö7­'}Þ4™¯DàDæ$"‡À!p‡ÀýuCÉ,J‘u‰vݼªÍÕÃ8­Â WZ Cà:¼Ö+ýœŽ:Oá{äõþ2¬ßCà†,o$í…¡ \=f^ÀI!{Ä Ñ71 p}^ë=åY4Wê÷~àî7š7²1¬ßCà†…+™uޤ½06gÊgUõ¿`Ííf–Ÿ77g»¼æ‰Ë{UM:Mê[5Ÿ«“최ŒD•T" b@àÒ8wÝêµµšæÒµ>oÈm>_|ï8w¨š$Þú>§ûTÌuÝp÷‚†Y¾œðÝÊv?7ÖÑ·åÚ{ Kà&ÉnIˆ¦úBÁþç«9Þ4÷›}@¸¾%#yš®(·U²ÿ»mjU†²ù~™o6鵜­È@%.%³×`=0mÁ]¯­¦ÎWýÜÍÏnÙöÆý1×ðÿ4÷‘²ùçmޔܳ×üLÂ÷¨{?gˆÀ!pã I{aìÎÜLäCฤkäîÚÞÝ˵'8Oa¨ã+pÚlªNœ8ÑúÜK2_8Ø5BÒ^@ฟ¼DÜ$ÿ›¦‘fT‘¸^òÁ!p€À!pÜ@®ýqއÀ-ØH‡À!p7zHÚ ×Vàdƒ6ö2˜CàR¿6(™·þ%Â&²&ͨڔ*Ÿ5‡À!p‡À’öÂØœú_Òò5nÚŒM2àý—ü}º÷%/\&Ó‰ô“ŽQ¨ÜØ]G%ǹ #8—²C“ô–\jMœYVݘ„ ¡*u›ƒn’N.H3ª"}ߤƒ¹Gà8CàFÃÜ=µí­‘§Dß`Ä·èçYÓĹ*SNò š¨×,Wð£búÞ«ÄY_§Ï$ \`Æ4œ\°¥´äs/õP8@à8.µk‚¤½0§e²b­aJeÙj U3½îÕF­ ¬˜:ˆu“]}A×sóÛ œ„«wøø'ó7¼ç»»—kG²Äß¿ý3ÿå_U»/Iä+ÕzÉû†À‡À!pˆ¾-ÕvF×ÄiÎI¹À‰«š’X%_¦ÜgmfñJe½Bö ¡h™Vtpïe?3þrkÊçK7\ýµ°ø¿>/òVØÿ`5‹wì ÿGWÒ%ò&Q7i:Õ ‡À!pÜH¯’öÂøœw“вY‹mŠÏ/˜H[ݸ‚'` ‹ œ·ß¤>p­¦ÚK®È½Ùf…^›Pm:ŠÙ#p‡ÀôZØ'ͧœ0§…æ½ivÔg=0=ï5§®îm»Ð­ÀMÂAìEàTڤߛDã$•H/ýß8@à8®?\ÒÞ³\ 0NW4£P .òÖpÓÛ \Á tX'pæ³Fó¤?\ÅM/»Ï:ó^ZJKG¡ŠÄÑ„ŠÀ!p7’뀤½0^çn 93˜ d"k¡¾nE÷~Þ]Îän[Ô¾mf=Í1·˜^ÖmùûÔo£ešËûܺ+_õ³_‘¼oÞvªNä8@à¸xy“ä½>½ô…CàCà¦\à*.iÁK0_uÓÜ@½E¸·üâï|gïáZÑÛNÉœæ7­ú%)¸)8‰ºi T7­‰ÚK?88nÊ®n2'TL¶ƒª9MsU•fÓ7þóõŒÉ¼3ÛÉù8[…Ó‘¨ÒŽ&TCฮÿv 6%•°z@àªFàμn÷ÝÿÄM/jß8#y p\¼¼Ù‘¨’Ø—Q¨‡À!p]ÿíæcò“ΘÚÝ«yNwÜ|ßg4i¯‰¶•œj?º¢¿MKý…À‡À‘®=®dÖ9’ö‡À—‹Î÷"–9烞N1 pÜp¨›œï…ãœz8M#"ýàzMà‹À‡À!p±g¹vln©vç pЗÀéKÓˆÈhTÈ€À!p‡À¥K$n;ü¤½ô%pZU¢p*s‡À!p\ªçùq-™€ÀAj'HN襤‡ÀÅžãûDà8σTN"n’¼÷Úk¯í»‡ÀÅžãg¤ •ó 8HEàNœ8ÑsŸ78kÏÜáÚ=KµG9ǃÔNšJµùÔCà8ëSÞHÚ  Bà´˜½ ͨúžA ‡À!p©œÛµd© œ}‰ÀéK2 p‡À!p½CÒ^@à`(g£n‡À!p\ßçõQ‰Àqn8Éÿ&#RyÀ!p‡Àõ†KÚ{šè p0p“—öc*‡À!p\_ç4I{ƒÁ œÖBõ¡ Cà8®çèÛN‰¾qN8; ÕCà8ëé|&i/ p0XKó…À‡ÀM»ÀQ2 8@à8¸ œKÚ{–s8@à8¸ŒI{CàËÀ¹¤½g(™ p\Fޤ½€À‡À!p€ÀeHà(™ô-p¿ùûg›ö+#á=} ›:#i/ pЯÜqÓ'ÿÙ‡ÿ¨ùý'#áCǾÚ<óçO!p€À!pS#p®dÖÎ@à Ÿ›ï‘÷ðö‰½ñ"pdSà¾|ª:±תwºTÛÉù p!p>t_sÛ«_ÕÂNçþ}Í7¼qGs÷Þ†.wi I{Cฉ8¥h{-Y³’&Ò&B÷î÷ÜÒ’8_î²"p.iïiJf×ϱß&Ç?«Üð?Z™}ÅëÊðï0‚Ÿ¦À‰¨‰ ùM©"u™“Ïß<[o Ü‘û–2'p$ím/Å,ßw&€mÙ¸#ozs¡)Ç?‹üÒ¿šÙïþ¶wÜ4ðâèY8³;-®™&¢æGÜ´)5Kç¢oç¦=i¯üåüÏêµ›eä~/÷}ƒ 8Žý䤥˜TYó£r¡iã~¼"y{ byÚŸ9òw”¿'÷á#÷{¸ÔN¤Ì8íûÖNôÆùx‘´Cà€‡8DZGà&Vàt‚ôsÓæSéÿ¦|é'DàN?õ¥Ì¯=KµGç×ðÌAà8à!ŽÀq츉8‰ ÉO‘êÏÓ Y8^$íEà8à!ŽÀq츉8¼É ¤é¨’ûös¼(™…À!pÀCãØ#pS!p“r¼HÚ‹À!pÀCãØ#S!pÚÏÍ6¥sÀBÊw†¤½<Ä8Ž=7Ñ'ò¦e´d@ƒˆ›ZfÚ4Ž— ZÁ 3fg7~~מ"25äï.ud8à!ŽÀq츱8m*‰³²6ÊþoÝ/—´÷ô$—ÌûèƒG¸ŽG€üÝ8à!ŽÀűëï„ÀM¯ÀeùxMCÒ^âÁþ/BhžŒNÓùI-àÿg>ìB×\wHdE§4‹éqôû> ë8Žâ|AàÂhþ·Nð«4Œúxí=\+îYªÕ&ý™À!p€À­AnÆ*e¡¦Mê)Z‹ÜðEìßc/’&ÇGNŠô8ÚcÀ!pŠý‡m\nZ’ö"p pëMnÞò`Bóãyê ÿc¯ò–eEàhB 5µËq’|pãØ„:M%³8¸5Mj}‘›¤m–$pÚéïc/ѵ¤‘„2=)g›^Uí=Tü\þ)°‰aýmhäoÓ{À8¦™¶¤½Üš‡¬/`~6öv8;Ïcïçn"cZbI¶¡Í®yýæÙzìqòYÖ—‡²¼÷›à8®›ã%)C$uÈ´<38¸5U‰¢ÅEÔl3©íÿ¦ËÉØv€GàÆóØ'Ix;“¨™¿®ˆ›=wÚ œFÛTø-Ñ”5‹^‹3æsQ¦yËÌÛsЭSŠX0ÓfÜ4%?J“cä÷sµ}]G•Á?îx¹¤½gú¾Éß=püäxäüiæ}η¢w.èôE‡À!p©5«iöu¬hS›mNSó;7kgø¸?7>ÇÞïãÖÀÅS+hí.$£Šìô#pîÁ]÷æò°nxË­.½ª"n÷P¯»éò¹ì~t¹Q \ÒhóQ^ÓqÇ+­¤½Ñ«¢ÇÄLkȱ1Ÿô;á«:Y+¸õËæ\Ðc]tçACà8n õ¸›v/Ñn<½fÙ›/‘1‘öP“hwePàÊ.ºVõ®¢òåÜó0/¶¡y+‚ÙqiB%¡ãµ÷žÚ¶HÞΦ‘´×IÖ‚­[©sÂV7ÂW lC£rïX8CàRC›¯Bédšm2Aà²ìu°J\F}ƒ/p~¿Èn.Ô4?ªó¥O«Z 37oæUÜç’ÿ@÷šÙ4ê¦Ë‰ÌZàäÞ çƒ—qHö:^i%íµâ¶Š‹¶åÜûª=R^pDZìS"p‡À /"cÚÜd{¸úÍÞš&¦±ÇQ@ØþkºŽþàKžö‘ÓóFG8Û}ë´¬œ¯†FcÌÝÊØŒûYõ‘2Òà \qÔ}àì±¶‰ž“ŠÜâx¹’YgRºëz,ݱ-Ë;[tÇ'Ià*+ÐCà¸ÔIÊ foäÜd{ù)?%*fû7&G=컎L“íÊycçÛeôœ’ùºL–"p*fqÍfNÌ*¶yÔ{ðÏÛ¨L/M¦ƒ8¿˜½æ~Óc5Ê¡J?Z’OGªË|Ù†VY³£Ù}Óm¤‘CàÚ Íb¨6` f`ÉäëÑœJ‹f;þ²%nzNj%J‡çqâ•;núä¯|ð—9^Càw'»—Ÿ|ÎI~×ÿw»>ðÌó{¿ŸæßK£$ò`ÕÅc-p˧NŒÛõÝ)»ï®ýR‡¿g+ ÷ú_øàDδÀÙÔRŠJšFÉü&T=‡ìöì4ݧDÖâ’Ûmª¬Ù¨œ8]?ˆæT®½ÐTM]À¢7ÜÞÖ ,8©[0 ¼ƒ7¼»0ȼ<ã.pãI&ÔÁ{¹ jXÏ»>òÇÝÁç×Þ:Ò&ÔC­*IQÂÕ~pi'ò Õ=v3êM÷œêæx—Y~îEÇ÷X°_ Ü F¡Zy·M©Vàô\ÐÉŠÍ Úž~–TötZÜ{ÿ¦‚j.Õi¡è7dó>[i+–/yõë¼ùilB•‹O/6 7ËçQçgCàÒ=ö¶¯HRGä´$1lüšG.t³õ3ÚÜÓ& ·K”‘8µË»6´¾Q™9”<5MÓè…ßoHÓ4 kêO½“'?”h¥D×'áÙý.·Ë š¤ ƒÄà_ƒ¶)Õ/©ç˽¢÷{6Â&ËhþGM ­9!“îÝ\¨j 7¼‡ZÝ4{ÚÚsSÂFÉ›¨]=¦ÀðÔ \¨Ÿ‚ˆ›^”Ó©™³ÿ ëqÖŽ½ƒ1˜VÅÛgf˜#ÖŽþþŸ¬Ë#æ)’¢'Ÿí&¡ïêÍv¹à™Η»aæûÿæ«í$îè$=û$Šýñä"Ã8}†è3ÃFàBÕ{l…ŽÐö4=_åCïWXÐÏ~ÅNÎö»Ä?²\wM¨U“ݼmÉÛä:Í—ôPň2.ýc¯CÿCÕAÕͺÀi$îmGNE"÷¤JÀç–O}jŒ£2/&%ñM[à’¢ôòàî´KZÿã¯5ßZ 'ò•fÇIzöÉhÔ]‡jÿ%)‰ï°Î6¥Ú{‰.kåIeÏ_ÆnË–Í󣽡hŸ±}ïÚ œ¶@…FÔ"pClBu™­ëI¨MXö>O­À…BÈÚQ4tq„šYå"Õ¶ôoò²M™®ÿ9% 5×}jGéAHå¤ \»:¶ò·µÇ¹ÝñÏÚ8”:Bæk­î7î¶ÛW;ë6…@/å^k¡Žéy”šÀÉß5t}k.¸A4Ku¯ì÷ÖGàî®íÄg`;†%pöº÷k"ëP¿ÿ›ýRÓˆèu«Ó칦µ“ýsP§kz[)¢³Ïµ´ÓˆlÙ²¹çŽiìW2ýàê^ê‚Jž™V¦ õK«7¹È’ú½Y!³p}(Ú T;¬ú£ŠìSqM¸~"P½ h}Å´ÿSšd‹û/5íoâ{s³Eåã†îëù¤7ÉÐ1ìd_I§ÿ(ø)ôAJ!ÐËùÀ}#ñ­c¿¥e˜Í©r¼~úÝþK42âY©;ò6“¿mܱ”{M»šÈ¡¨¬Ló×óïóIõ¸eš\Ûš^Äþ¨ëY1 M‹ûîý ÜÏþìÏô|î“nò„u ƒlÄb°P\¿“¹¬o£¡‘B6RÒÉPs?tNjç¨ÄtzcÒt¡ã¡ŠèùÒí?´Cǰ“}Å œŠi·)¸ôN®sïv 3gW$pû"Ž#p“Ý f¡ †"pþ_#6ª¢Óü!á*~Iáuû Õ¡áþƒ=i¨ù KóL²ÀisD§-±Ç ´ŒF[“Î?†ì+Nà´éE›fùœÔK7È8â/I2)£O8¸5Â÷߱ޅò=)íÒ<Èvt±³ÍqIC͸þûÀ%5jd–N£½¡óFÐè7X“¿³ÿÏ—2ª4D{ b&Jà’øÌœµn£nÚçÉÞØ;jŽÀõwìm”4î°õ,Cçà®Ý¾âNå,iP7XÓä~·Qô{K:^­âïKµ³ p0Q§Q6`€DåìÃS›¬ü>GDà¬$Ä"LjŽÀõwìõØ…dI¦Ù>r¡ÑX:(A›aÓ¸Nö•4ˆ!$€íR péæÇ4C¡ã Üé¹¥ÚN8˜Ó‡¤¼ Í¥¾XéÃ^‡lërþ(ÔÐ>´oR¨)¯ÝPs®ÿcoSih6sý’rIlSÂ$5}úgG&ÃvûJ8éÈV=?õ÷Aà/pãXz/t¼"y»cÒrÁ!p p«CŠ”„S–Ó4 òдM 1÷G±*ICÍåó “‚NK)-=*Eþû¸ãéé÷o’Ï~6í Õî&íË®:¿üö{…Ò pé¦U3i×÷‘“—ì^ª­$%½Eà’ïrmʵ˜…zÙ¶o·üÅwFà`h7ÍL[1{@àÒø»H· ›ïo”©C:9^{–jή@àº7›hÛ:„©$¦Nªö€ÀqŽcÀMu.nÃ8 bPö®#‰«!p¡9ýj"uÚE".‚Ÿ†x!p p¸,¯ÝKµsRkö“ Iš­ j¾lW²N£´ œì/´Í¸}u"p²MY?IBeÛ½J*<Ä8Ž=‡À¥'pË’ë,Š•4ÊØ'[„^#wv]nÉÙšØ~6»ŽÎÓæÕNö•$p~ôØþž¶ ~Ÿ^j(#pÀCãØ#pc)p:˜©9©›ã%¤2—ŒVpé´9S…J›Zý4T*Uv%+O~ÎŽ”×õ:ÝWœÀi ?G+lúY“„Ë>z­½ÀqŽcÀ!p)¯Ý˵'vß]Û‹À%7qv“‚'Ôôé×+EôüzÈ!ó×ét_!S1õGÑkš%û»û#â8à!ŽÀqì8šPG)pKµý!pñøÉÔ{•=»^€ûŸ{Ù—8Íú'#íÊðGà8öÜX œŸrT5P;=^.'Üù¹¥“³\rT-©œ&tÏ’Àiº¸ú½š3âÇ›hÓé¶šK\Å•q:^‘À8ˆÀ%7‘û¥îM1¢‚gËßùRÕO ¾Ð:Ýî+TCÙ—3iZµ}ó8à!ŽÀq츉8ºÙQ„òSüŽ*×Éñš[ªíˆn"Žé N›Q;ɧ}Ù¬ìùiH:8+ÿ¡uºÝ—?ˆA›Líú¶/<Ä8Ž=7ÑçKš}HjóÚ8/89.‘]MªÄ`£±¡:Ëœˆ”î+nn÷å œŽ:õ××ßâÇ›x³9û”(Jî 4¥"p‰œöKЬêr¡œ¡òjòÙϱf“ü&•dët_qµPµ¶rè÷I£Lkx厛>ùÏ>üGÍüë? :öÕæ™? Cà8®½ðsxÙÇ87¡ .'ܹI.pŸ–À}òú}GNýæïŸmþÙ_¬Œ„÷<ôµæOCà8®õw‘¦&Û‘]›¤’F/ŽÓñ’t"’V.pŸ:y®9ª×ÇžBà8[ó·ÑNçڼߨC¸»k{%±/ p‡ÀMÀMÂñ’œpÒœŠÀ‡À!p/p’s+.!ê(úö pH‘{¸²g©öht¡5§njî€ô7Ê*;þé}Í‹.žÉì÷—!·i2µ©%-U4Ìú§ýÜÜ=µíѽõ,i Üe—½¬ß{ÏI¸ÚÞõ±~ÐCà å8·÷žÚ6þ“ß„ªåŠüôã1Ôg 7ZKë?Cà8J8­‹š›;\; -< p‡À7µ'Í«£Ú“À½Tà~eÒrÂ!p‡À!p€À!p™ ×A'“H‡À!p€À!p8y¶È3Cà8@ฌœ;OÏʨT8CàCà²#pË’Cà8@ฌœ+pCà8@ฌ\ë\]®=!5R8@à8Cà8.+·TÛqCà8Cà2"p.'ܹI(pÀ!p‡À‡ÀM…À¹óõhÄÁ¬ï+®Øp8Ë5”³Îå—_öCà8¸! ÜÜRmGtΞᜱ¸n8Cà8ëøœ=#"Çy‡À!p .;wPšR9oCà8ãoÀeDàæ–NÎJN¸I+p—»páBó©§ž ‚À!p‡À%ž·EìçܺÀÝÿýͻõóæ›o^}ýõ×#p‡À!pIçíݵ½’Ø—s8n$§Ñ6û^dCà8k{„œp€À!p7=·\;"pþ‡À!p\FNÎ[99Cà†*pµZ­5Á=ýôÓ‡À!pÏ@à¸q8}}úÓŸnEà„gžy†4"‡À!p2w¸v zþ<Ê97TÓѧÒ|úð÷>Ÿ8qCà8ëDà^*p¿BN8@ม œ›DßBR‡À!p‡ÀuøüY®›[ªÝÁy7³8CรpKµÑy|šó8nh8Á¾¤‰|8Càº>ÏÎÝSÛι7p“×m·ÝÖê§/‘7Ji!p‡Àu}/G<À¹7p ¥éõ…À‡ÀM³ÀIE9—Ì7pÓº§6â&ýßB8n‡Àµ=—Gìã|¸ÀI8¿Ïƒ8Càz:—÷‰Äq>‡À \à$ú&ˆ´i“*‡À!p×=.'Ü9 ܇À Eàä%?%'ЄŠÀ!p×óù|4â ç pÜÀ.͇ÀµrÂíˆÎé3œS€À!p‡À—sçôiIîËy‡À!p p8)«%åµ8¯Cà"p2UùÖj5Cà8.5;9×çÉ 7°œÈ›Œ:•Á "tRJ Cà8ëû¼~,b?ç pÜ@ÎŽF•ѧ"s¤Aà8ëó¼¾»¶w÷rí Î-@ภ܉'V+3ØÚ¨‡À!pç6 pܘ œDÝDÚz-¡…À9‹9·—kGÎ/@à¸TN1<ýôÓ b@à8K9¯åüæüî'7š¢Ï ïÿÃ3ÛçÞw00o* ½œ”ÎÒRZ>‡À!pÁ@à’n³F¶–#Ž\y啟߼yó™M›6>7} …ŠÏ[æßûƒ7¼éçŸõ§oٲ傿þe—]öœl_ý9ö»ï°cZNF J¿7áÚk¯]}ï·Gà8Càz>¿÷ˈTÎ1ȬÀE¯Kœ Ý133ó1‘§‹.ºèÅK/½ôG*[‡zñÞ{ïm>úè£-¹8}úô@’Ξ?¾µ}Aö'ÌÏÏ?'ßᵯ}í³"y³³³ß޾ã“NîöFlŸ4³/8}1 Cà8.\ûó’Žó 2!p"<·lÙrúòË/ÿŽÈšÒ­·Þúƒ£G¶äéùçŸoŽëëìÙ³ÍãÇ·äîúë¯_¹æškž±ñŒäîw£÷ûDJ'EàlÔ Cà8K©Ê Õ8Ï`,ÎEØö‰Üˆ°‰ð¼ï}ïûÑO<ѯô^+++Íw¾óÏnݺõÓLC‚À!p pã-p.'Ü9iN圃\.—{ï[Þò–ïŠltó’Nñ:’Ñgle~ ³ûÛ”¾bi¿þàþà…Hâ¾™Ö CàosçûËœsºÀmذá–o¼ñ¯{yˆËèE[’i˜wâĉ־5—™tƿ뮻úÞn¯¥¥:yI¥‰«®ºª‘F$Cà8@àÆ_à$•ˆ¤ᜃTNFJnß¾ý¯z]ª—Æë™gž‰÷ôÓO¯›&#'mÔ-ÖMÚgHà’¾[·¯Gyä…H⪇Àñ·@à&_àÜ9Z’ûrÞAj'µ?¥|T¯¯NN„ÈOSaבmØ”·ÝvÛê<‰¨É²2]~ÚíøR%?»]]_öâÒhXùë7ÝFèåê¯î@à8¸É8)«%åµ8ï ‹^Û¶lÙr¡Ÿ‡xRªô‹‹hYÑ’y¶Ï™|Ùy’åt;òS>Ç%µM¨šïL×Õï©Ò§M¯úiTq´ßW¦ÙfY8iºí÷%£S%ŇÀM"Ñé1QŒãð#vþ?>ý×ûcæïàoˆÀMžÀœm¸''¤$p{¥hû(N%­“ýˆÔÅ œFÈìg¿D”Šb(jh§Ùï+ïõ÷KjŠíö%}á6mÚÔWŸc+öRÝCƒéü ¸, \ô*xŸó3{–jήÐie 9[ ç,XÛ'µLÓ¸¤W’Àµ[_"^Ú *‘0ùi Qó›]UÖüH™J6Ÿv"pòÒú=úx0­¾Îœ9ÝT®ø‡ÀMªÀ…®k½N埡„¼—Y“{GDÊXôªGT÷®#‰«¹i •²èUq”Üϲ‘·ª›^rÛáÞ„À¥Û„ÚÀÙ¨›˜¿¾,/ò¥Í ¾ø©@麡‘§"~þt¿ivÍÃôŸ³ßW¦Û¾vòJc„*M¨Ü4œ\or=É5¤] äºÖ~±2M–Ñå8nN„kA%ÌL«Ë{éÿ¹å5o:覜ðU¼mTÜt™_òä°Ä¹‹ÀµfÄZA,޲™Cà8€‰½FŽFäoø¼'H¹€ •mSdŸ—sѰ†Û_>°|ÅÍ«ÚæÚ«ûƒ LT¯n¨"p p©Fá–j;¢ë$3£gaBÎÐb ?\ÃJž©žÎlõû´U½}¼fÜN#p žæ½ï£ `@×É9þ0l[pѪbHàŒ°üŒ.hÒÌ{BWðšksfCN—±Û ô›ó¿_!´_7=?ÒƒˆÀ!pÀÄâr£À= ]àCàèëZyLÒŠð·Èʵrwm¯$öåo­ëåü8ä„»úêM²0V‹pQ p€À!p™¸^"÷£þ"¿zïÍÏ~î0dÞöŽ›šòÌŠÀ™Žþ¹!ÉL~Ú.jCà&¹Väš™à91|äY?si=l²ÛÊ€÷WðJ`•¦á¢Fà8`:سT«í=\é} ›psùß*Þ´F(¥È€ö_BàCà&‰¹Ãµ‘Ä=ŠÀ!pƒ8¿|V>0¿ê•½*xÓ‹*1‘¶ª)Ÿ¥Åíóîs#æ{Ôc¢wºßªIä[rI€¦x½ý~ æ{„—¼åËø[ˆhBºüÔOýí¢ŸÛyh pc#p/¸_eN8n:šPKF¤*FÆ´"‚&æ-!³ÒV4âª/ÔM…;ÝVn(©8Ù}yßµn’ú=«Æ,7ã~\B%‡’D:‘#ª€Àô„Dà$‡À!pÇEÄìÌǃoø¢æm#Ià m.g¦We½rþ~=+…¾‡Ý+yR;M»À¢pKµÑµsCà%m!Q*yÍKŒ WìFàLäk¾ADà8€1¿vÎÎÝSInzúÀMÎökñZôú´-º÷ófº]¾Ò¡À•½ïSëƒæíw1AÀªæw(˜å’Ö¯k-W¿?‡Àôpí,K^8´Ä5¬¼¡«rž$­™®2§eúÆÜ¼™ž쳑$On[ºý†‘±Å€ì)ùÀïc…´d¶ÛŠró@àúÁ¸?À!p¹š³ƒ*ê)í—&S`×Ïñˆ}7Éò¦iFŠ ËøiM )í@àqýì‰Cà8 #¸œpç†]àCàCàú»†ŽFDà8 #Ì-ÕvD×ÑKЈÏPÂÜÀzÕaÕK`®£3"r.¿aÃËn ë™+gø3¯]óMo. •W\“knÞre³ðÆëæý½?Ýš'?û݇0Èßã¾ß8œ‰D¾~úE¿À}`½:€À@ÇQ¸;ö,׎uºüììÆÏßñÞM‰¢Áp‘¿ûÛÞqÓØ ܺT^Ñz›7­â œÍéfäoÑÈaɲ/„Fzù×ò\èÀä ÜÉYÉ ×i{¸>x„&É ÷±¸€°­:¯‚¢W̾`«*¸é¶¶iÃTC¨šuL%[!µ¼n€ÀŒáµôXÄ~KKàµt•‹†ÙT~t®ÑÀÅL·¥´¦öjÉ–â`¢®¥»k{w/מ@ิ.gê€Z…À•´Ò px=ï$'‡Àu*qÚO­â‹ù\ô‹Ó®Þ¥ÀÕµò‚È*€ÀLìõ´\;" p\ZWtѰ¼7=XºÊ¦1óT;¸ª•9³ ÊY p‹\KrM!p!ö,Õj{׊€Àd„¹Ãµ‘Ä=ŠÀ!p@Vî¥÷ç%7‡À pAª2Hu@ಅ[ªíŒ®­Ó×.™oI“ø ·_Ò† pÓxm»§¶Càz¨œKRvWf9+¿.j‡ë4¸ð8€ ¸¶–#@à¸^#`å€TimTÍõVäˆËk2_3mÆ‘÷ÅÌÉ¢¿¼M\|?ùå¶8€I@*2Èõ*pŸ¶À~êKͺ¯yä¾¥æ‰Ç?ƒ¨M€Ài_IØ› E»\’Ý’Wœ¾d¦WLbÞ’‰è•MÅ…¢&êuË—ÀÙé#‡u?*脳AÂ_ÈÑk_D3«üÜ;ÿ§æÅ—]™Ùï/BÎCãŽGì”À‰¸íÞ{CsÛ«_µ†7¼qG¦DNå[/qU'FkäÈ}ž±¦"勞¸²™^qrXòÊtŒÀͶSõ¦WtŸ4¡Bîˆ\Ðüg;|>û¹O4¯¸bö ç!Œ±À퉄À}ól}UÖäZ°R'Ódž¼Ïµ,ßõÎC‹\ƒF#_ëdÉ«aZôJhY³hKhUœ®J›ßÎlÇŸ^ •è@àƒ,ãrÂó ܧ!pïÜ¿/VÒdšÌ“eüy™kûò©êºeBÓºÝn·'¿‡l3MÍJjÕy'KžÀ5´} g®ìš=ófÚ¼[?×…À•8@àƒ Â8˜¶ÀÅ š.ûYš(ý¦Ö?ò[«ó¥)öÝï¹e5z§Ñ=Ñ“n ò¾Ý:VäBr&ë :_¶«ëÛeìwMú]'¹\Ù Ú¢•³67秊=Óyófù'‹ozœÀÍ›«Âg÷ÍE p0Q¸{jÛý”"ý œÈ™HO§ÍŽ"j²¼íg¦<=•0]F$LåI§É5g?ûëèv­ä…Næ'-#ò¦òh¿‹Èâ4õËyƒr6gÞϘ¦Õ¼FÛÜûE7¿dR’øÛZð§ûyà¼ýÍÇlgžA €À“‚ŒòŽîG͈)t¿õUù/¼ÿ¾O4O}ãë=û"4"d6YŠ iÄ+.Ч2–$Z:Me+´Ž6ßj¿¼nNåÔöëÓþ¾z¸]»‹Ùê—ƒª„\ p@çH8'oÊæîúwÿ'›¿ó…Ó=û*IFàâ–µ"'kí.³ûëVàDÔ´©Ö§iM¸Ë.{YÏ2˜U[ôG¨ p}k†xëòÉæç¾òÖNæ%ITZÚn?§}õäs}›FÎðM´À‡À´GFŸJ1ûÝË­èÛC÷#ûâé¯÷|þk¶ÐhŠÛ<jBµÓ{8~¿M¨qÍÃÒ´Ú«te² pÀNŠÙ/מ‹¾ ¿°üåæ×ÿóŸö|þ«(u’NäH>[)Ò¦J‰Ú«ÀùÛU±´rfåQ$øg›bu¿VNu`0H‰ÛIÜ8é÷ççú»D†´oX»J *V*GqÒÔ‹Àévõ½/Šþ2þHU‘3ýü°qÛh³é9Ì´œIçÑ·×¾é b@à8€N%îžÚö=ë1¸ÜWR»Dl´XR3£4CJ3ÁOŽ+óüÜqqÓ42¦‚'Û’mЬÅE í|Yßßn(°ü.í~§I¸J ,Ö¢©Iª•l|‚ØÍØ´ €À!pɸ ßòî¦ÃÙ(s•D(B7îd)‘o=]Ë+YÉ3yÞB·hrÅUL~¹ŠKÊ[µÉ{ V óÞ~tÝŠùNófy¢}€ÀÙÄ-œ¤­±géÔߨÀ½íž/ p\âÃÅ Û¡³%¯¼å 1·¦ —–éRárïL%†ª©‹Zð*:TÌwj¸ùEtb·ÀÅ py‰“º¨‡jñ‚Ü-þýÌ_ƒÚ‹À æá"Q³rHˆ¼Ú¤91«t*p1Ó ž昕j¡ª4ÚŠ%÷]ê\ø€ÀÅ÷y‘>%¡BÏÒǤ“‚Õ~?`°·kéÉ/î>ôäßýkŸâ1.ñá¢EågÍ© +yN˜Ši œm ÕH›‰Þu"p%·;¢ËâçxÒ´Ãlþ‘L«à4“Ì[ûöÝ¿ñ0B…Àµ}À¬ö5‹¸œ¸¢ÀÙe¬ÀUL_ºÓ„º`G¿2ê¸õò&CíE’lÔMò8ùy•:¸´›?:Ù'Ð1{˜Ó~fùÀÉ+ «ö!pU#ju#þþêeozŽ‹¸Ÿ$팋piÞ$oeJÖí&’4«¶[>4ä®Àɵ-ÿØÉ?biäHF×MAÒI7©¸1|à-x©K5@à:è,l3«‡°ÐU¦tšbó4I³«Ÿ!Ý&Í •Àñ›p5a¨îO“qvZd®7q“k×ïJJà›Vô?h½½7 û>.Þ¼“¶R¨i[Þ¬ÛIž”DÐäæ¨Í­¾à©†êÚåýR;"LòÙÞÈCÒH`p§uN;)¡•iýS†ÀMÆC/çš[ó\Ì€Àµ§“¶´ÞýB×¶|ŽÝ¦FøBQ=]&T‹P²/Ù0xóÿë´«…ÜÚ:uH8Ù_h›qûêDà´ËG’„ʶ{•TÆ.§3À…–·ÒfßË _ÖÕi¶^¢nG>ÛÚ‰ b¾À%õ…U¹±Ÿåšõ›Yí22Mþó뚪 Ù®z}ë::O£òì+Iàüfaû{j?_Ù‡~Ÿ^Ò !p0Të¥\(b'pzãÔzŠ>º|R3 0Xayé´9S…J›Z%Rê:a—ñD…"p¶O­®×é¾âN#üz³Âf[´éX»vL¤À¹6]‡Í¯Öwó¥Ë7˜.£^‹\¤€À oªÜØãF¡vÒ„·¼Ü$µI%®Wn¼IQ? ³Ý$zí»¦÷к¯øÝ%Bç¯Óé¾B§bê­×ASöw·±&Yà ^n·†'qõ~ò¬ùÌtò·7´‰ÔoÆ”›žþG¬}Þtƒp ëëMÒ YHýiz3µ{Žëw‡À¤#pzMvK’=»Ðöü~Hàìç^öeïzoò»pºÝnåuâΛŸ÷§yó ¾ ™Á¹@I¬‚[Ïæ…Ëë:Þr…˜ï[ˆù v€©8?‡öY‘›šüÇl›Wõì÷g³ÿ½úµÐ¶ýˆ›6Ó†¶©ÿE“F`0×I8™'×_–N»ˆÄuáМqÜOæCç•´ª˜Ä¼y“þ£jKb¹ée“¼·d¦×Í6ên™²W¸^·[6ßY÷¯ÑÂy.|˜v³ò"76¹ñ…:òÊ O§k'¿ÿ\¨ITƒê ®¹V«8øÛÔ$ÀÈÂ!p0§ÿD…®1M1¢‚§ƒ BR¥ÓÓ¸^öe.NÎä~bûæM³À5PÕ€-Ö[ô>k ¬ª•(SkQ«8˜åJf•´E›óÍ%ô­˜æ]].o¤nÑFã¸ðnj’qƒi8mFí$œöe³²ç§!éTàl×Ð:ÝîËï3«÷#»¾í‹7õ8m¦ÔfИõŠ^©«†ª¸"ô%/‚Wr«áQ ̳ͯé«"p€À¥ƒüW«iCÒ¬…ŠÀ ^àTf´X»J ¶;D¨"K''"¥ûŠ[§Û}ù§£Nýõõ÷¡ µ³õviWN8©Ë9I›±Ñ7·Ýo_‹ú» pý‹ü7eyCà`šÎJöóGp†– à uy°]0ìrV¦âºItº¯¸Z¨Ú=$ôû¤Q&lnÞ­S67o¤mÑ5ÉÚ óŽFŒÀå¼m«¨éúE3½húÊéþª\ø€Àô#p7üßoÝ?zAäo$yà:\/oRäm¾7'_e7ÍNÏkäÍ-SÔ¨›·íœÙ¶íOW4Ó‹^ä­ä÷Ë@àz¸èï¿"÷Ž^‰îûû*p€À!p¬¸Ï§rÿçd@àúíϦe®âúÁ +¹.€À!p€À%`G|Ù¬å2bÌ.§‰=8Cà`„*k%HíR?=Ȱ*# p—î&ß&ï[\i«|hÙ¸}pQ7<’ ÇKÄM3±kn%ifõeÏVsð+*è²2ÏO •ú-*À‡P!pñ—ªW}Á¯„Ðð*3ŒÔբ׈@r´7äœDÚÚEÖlNmZµÛC5MUúdyM¦)ë„mÊO_þ8ëïÁ²`…ÍMóë—¼å«Àø œFÚTÎD¤$ÊÄà7¡¶+y£Ï5ЧËÉ:~m™æ÷¹Cà8®«Û¦SÛÔé×uÓrý\HMpÁì×~'丑léZöFLÄKÞÛ,ê¡BÔ¡æO-:­ge04MûÜÙZ‡‡À¥óp±5F+^’\[kÔ/_p•üÚ¥±§Âè bѽŸ1’·ZRKë®rA—î࿵ \R=A].´Œˆ_wQI£>! pᇌVThØ f¾–®ª÷SÁ[0Ó^ÿ»†-½å–Y#•\gH³e\-S€C§b(Ò¤5-4¡ p\z–urä•ȪÖiô(pu·Í†)¯¥ÂVðGººÈß|§õY¸o¬ë{f£lé 'púY–±ëH¨6­†Nšdeš4™úM¨²­Q$ FàƒIïW4Q8¿/ZÉô{[ì#g›G+FÒCëj_9mJ@àºCó½‰Ä©t "j2Ý šˆ™ >P!SÁÓþl:*U…0.J§£Ru=‘:;º@à¸ô%®aåÍÍËi³©¦12—Œ`͇ú«ùË:1+˜(`ÕáGq¹é¸Î$Î^Ð>j~j6•³ÓTÂtàƒ¦ 5ÚÔ"£”78˜Š>pcøÐ+0x¸ôÙ²#O»Y¯—¤£ê÷†ÀÜèx}+r!L£ÀÝñÞ­s†‹üÝ8@à8€®Ù°áe·È¹ £Aþþ‡À!p0÷þ p€Àù˜Tª˜Y‹~ ®¸eÓ+nÔjÞ¤©h‰/›+¦Yà4•ÇBŸÛ)éO}Ÿ°l#fú¢+«esÇ-Ú俜€ÀÜK9Þ ^Òb .ê¼™§’Îå’Óem-Õ†ÙÎB'gJ}•ß/ç—ó¾§¢"¸øNEWýaQ÷ám¿äO@à8€¡ œ™Š¹¢‘™º¥[Ð:¥n~Á«ÜМ±/¸mTŒÀUÌô’'“Vàì4-¿•}?ïw›1ë•ÍöÊfß%¯JDÃIÜ‚ù}rZÇUÿ.H pÀ(ÎJ[É«˜P7•FŠl5†²‘¯Àå-YjxߣÑNàüõ´¾ª¾Oø ¡ß%\ÓôëM¯˜ïµH’a@à8€‘œ)._54¼Ú§%/J—sËÕMß¹$ÓR\U/ –†À-ºm.øe½Ì2=+øûêRতX=n0h«øƒ ¼æÌœ–¼‘³rÌòk.ЯnMÎôGËÅ4—& œÙN5®IÓ¯ïêïÛ|Î'Ü¢™,úýí8`X·®X¼“åªz6ïĦä¢_Ë™A%•AO¼tú|§ÈzÌïW5ßuuƒŠ¤i­ø‘ÃÀ~ëÚ\L™/@à8€Q \¡ÝtÛçÍLËë@ýl×sëØèZÁÌ[ýiæåì¶ý÷Þ´œ÷ýJq9çÌ÷,Øïúüïíï×|ç‚ÿ÷@à8€¡ \ÆŠ3v) pÙx0’“ 8@à@à¦^àÞôæBS$†ËÛÞqô$p;Dâ`dìã<8€IãÿQSú‹i3þ‘IEND®B`‚manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/state.png0000664000175000017500000011321712301410454023441 0ustar chuckchuck00000000000000‰PNG  IHDRÛˆ `JmsRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÃÃÇo¨d•øIDATx^í½˜Eö÷Ÿ]`v‘ØDfâîBˆàƒ»[°Áƒ… IAƒ†¸NŒ$$Áƒ\qA–eq]Yvÿïó¼¿ß{þç[ÝÕ·ºnÛÌô¹sç„={çzßêoõ§Î©ªsþРAbóýۦѶ þÀÿ9ÿsnÝÿãWãåäÜÜÚŸ%÷¥¤¤¤¤êS ü¿ÿýŸÀŸëГ!K»6nLcÆÞB_|ù} û öµßÜçðš/¾üÒ±/üö9ß·/ø5)ØçüÕ°Ïø½Ùö9?–À>ã×TÙ>ã÷Û?øñDö~Û§YöúôSÇþþ÷Oé“ ûäïô·@û„ÿ„>þ›iãûŽ}ô±mÓ‡ÙGôÁ‡Aö!mý Û¶lý€²m+mÞn›¶l¡M›ClÓfÚĶ1Ð6цá¶~ÃFŠ· üš ´n}¼­]·ž’Úšµë¨6,éñŽnÝúõÜ&°èvAÛE¶ñF>lçiÓ&>¯°àó¤‰Í¬[K[¶neÍÁüÚÛúÁ Òó‡}”¥ÿ¸?|ô±¶LÑ}(sk÷3§ï9–Ý?ûíßtÊ)§Ðÿý¿ÿ—þçþ§F쨱ï›xÙyô?5øÛÒlÃÓ9€þýûï4zÌXÚeׯ´MÃF[µS«h´í¶tÛm·Óo½M÷?ø]3ê&º–íºQ£éºFÓÈÇ(Ãß׺Y=wÍõ7Ò5#o «¯EWÁ®Ågî¸özçq×®ÆkÙôûðÞkù3<Ã÷ÝbêX²Í÷~ã³ÔwĘ>óV+Žvå5ávÅÕ#)c×ñߎ]~ÕµvÙˆkvé•W{vÉWìâË+»l„g]z%]xé»ärºvñeÊ.(»”οȱó.¼DÙ¹”);û¼‹èìá*;ãœóéôsΣÓÎN§žy|úÙtâ©g*;þ”Ó験N£cN(º÷þ4d¯ýhðžûÒî{ìCƒ†íC» Ý[Ù€!{*ë?xê·û0ê7h(õÝͱ>‡Pg¿AÊzôÛº÷HÝú PÖµ÷êÒ«?uîÑWY§î}”uìÖ›­—gºö$Xû.=©]çžµéØ`%º*kÝ®³ÏZµíD­ÚtTÖ²MjQÒžŠŠÛSóÖíÕyÅyî­Î»ÖAWÖ¬[ïþÔuÒƒ5Ó“µÓ«ÿ ÖÓîÊúî6Di­?knkoÀà=ià½X“{±>÷V½B·Ð/t¼Ç¾²¶R­Ã ýývìîrßÐv0÷—C¸ßÀå>t÷¥Ò£Ž£Ã>^Ý¢¿yvÿÍvô '+;æÄS”{Ò©êúܾc'5p{â©§ÙžñìIþûɧŸõì)þ[Ù3Ùöô³Ë)Êžyö9‚½¿f-¸ê*5XfùsôìòçÃí9~.Æ–?ÿ…Ùšuëèá Π­«VÐê)+[3õaZ;ͱ l]ÛÊ·°Ù>ž6ží!ú„íïlŸ&° ×á1Øß”á3ÇÓGl°meÛäÚ¾]çÚÚ©ãé½)ãiËsOÓÉûïEソ†n¿ã.ZõÊkt+ó\ÕάŠƒÀ·Ý^N /qÁè@R€4 åɨT-`{• X´€*À0^wÃÍÊF²]Óu3l,Ý0z,Ý8ú–`»™°nâ÷Y6 Ÿ™À®¿q4™6òF>&Ã0˜ˆ²0GmšøH¹k í+î«Öl—¸VA9ÎW1¤çÊ.¾`¾R™ó%WÐ_îÙye—Ðð‹à‹éð™ç^@§Ÿ èžK'ŸÁÀ=íL:á”3踓àjвê"À=Œ/¡€=8°¸èàäö@ê© `¸â‚ÃÅMÁ•­¯ìW Ø^ýw÷ 뀕/ l€«lÏ~>Èâ‚‹ oæ" ¸òE™ €mÛ©»g²Åí»LCVÁÕ,àªÍ„¬¬†¬lã¢â@ÀšpÅßIL Ó°Û?ýµ1Áâ^d bÀæý`·àA[3ÇviÎ fÛµ9×MÃWà5áëد†/À«á« ð–tx»1x»+ð*øò@JƒZx»°F`]{õóÐÕàí3p°Þ=¾¼{zàÅ1 ¼\Bÿlî{Сt^ ߃x€ªÁ«¡«Á è¢ïyvì ºG’2€÷õ7ߢvØ>þäSlO+sÀ«¡ ððõÀ[Iø¶—_q%Ãöï ¾ÊºQ–ä¤Íyhøé´å¥èÝGî£Õ®½Ï·k'Àî¥ ï¥l›Ù¶Nrì#¶]û„o“Ù8~]¶}ÌiûˆÿÞʶ…móÄqü½ãh=ÛZ¶5î§÷ÙV»¶ñé'èø}ö¤Õッœ 8CsçÑíåwPCŽ{ž-\Þ7ø$–úVÃ3ã¡2ˆ]¯Ïç¹´–«ËÐ °¹`U@s Ý<ö6}k9¹­œÆÞ~ÝR~'ÝZ~ÝÊ£Ÿá± »·ì–Ûï¤$6ö¶;È´18ÃFßz;[¶Ý|ËítwÆnå¿]÷f nÀ ‚MÐ&ÚÐF0dzǀÄñððM6ÓÏ@û:†¶ãY_v`}5]Ì^3¼e€ðpïÙç±·{.C÷ìsé”3Îq€Ë°=–=Û£àÑr'/eØšž¬öb•'ëBV{²/6²Ž'뇬öbƒ kz±Ú“5AkBV{³ ´ìÉjSÀ €,€éɺ^¬òdK2ž,@«M{µa-€ ƒ7«Íôjñ·íÙ.‰gUÔQàµçùêãÔÐ5‡®aÊóu¡k·ªÐl5p‹;Àãí¬<^Xtµ·«¡«=]íí†Cw˜‚î@ޏÀÓÕÞ.€]7 º®†.`k×]î‹èº¯¿ñ¦[×® [üm^oe½Ý÷Ö¬¡‹/¹TÁž°ÜèV¸€ígD›^|ŽÞ¼ÿNeï²­f{ÿÁ;i ÛZ¶ ÝI¾ƒ6³muí£ñwÐÇÊÊ铆×ýM½îÏœ÷ßI±}øðüÙwÒ¶®mxè.Z˶†íýî¢ÕlﲽŶññ%tÜ^C<ظ°W^}§f›d`;†cÌ÷Üÿ `Ok‡M•@ û[‡Ž•Gk„ŠMÈÂk½\V+àyû÷Ðw£;ï¹îº÷~ºû¾Ô1Œ{ࡌÝÏ[vï}RÝÃÇÙÝã Ûîw?ùì^O°áxƒ¬ü®{)Èn¿ëRvçÝžÝÆƒ ˜@ÜÂØX(dŒÜNcø¦¡ýnëß»†¹‚8›†7Âý5Bü8¯o_zÅÕTÆð<ê~C÷ü‹”§{ ‡•Oâò±'qùø“¹sŸ¨<[À!c€ÈjÐ"d¦ÃgÑÓõ¼Y+T Èšž¬&ðdáÅšž¬.¶=Ù(ȶC¨˜-È“Õáâ(u@ÛÁ¬ Û0¯Öönël“†œÃ¼ÝXè&ðtn®Œ§kC×ôtáí†yº¯éé¸vˆY{¹ˆ² ¼ àšÐ5ÃË€®^Ö!fô x¹ÚÓÝŸ§Y‚¼Ü0è¢*/×îk ¶ÕE}áâÇhácK|¶è±¥[lÚ’e´˜í1mK§Ç,[²ìqrì Ÿ½ýλtÁ…ñÚ‹iéãOdÙ²'žd¢œŽ?éÂDvÃMåôø<8Ц ޽»ú=ºÿÔ£iÃò§éÕ»ÆÐëlo²½u÷z—m5Ûû÷Œ¡µ®m¼w Á¶°m½w4}ÈöÛÇ®»ì >¦ <ÃýîMã8èüJ~ÏeªÏ‚¥ÍlÙ6°­uí}¾]}ÏXz‡í-ØÝcé ¶µ-¤c† Ê‚ímÌ:Ìá*ÏöÛ4T?°ô<$º¾Ö­ÛábxaðÐི7ßr›ò^á¹*ÀÞ3Žw¿ë}>L÷?ü=8~=ôÈ$?q2=2qJ¨Ÿ0™l{xÂ$Jb=2‘¿#ÚpaöçýÙx~<ÛðÛ`zðp/lÓƒsàßþÜf0èhO˜Š¸Þÿ-| ƒçŽèÎÎæ»õ9ÆœñE,¸óË.£sp¹Ü¸˜·=’çlÑÁ5láÍpˆ37«çc½Y†¬.òd“†‹MÈêùX´nØXÏÉú¼Xöd=È"dÌÖ†½ØÌDz»s²²6l[sè¦çf1?ëÌÑf,ȳ5çiµGk†’õ¼­ Þ¸ùÚ°yÛ¤s¶IÂÈA¯©®§ øfæw3¡å$áe ÝÀùݘ¹]g^—çÝ+^ótý¡å t1õ¡½\3´¬×&àÓ)®†®örõœ®ZtƒBË º.pÛ¶í;Л“|Æß?%ÄplÚ&MA“¦NÏû$Ëð{´M˜<•”MšâÙ#ü7l<ÿvsà  ö€@Ãÿ‡'p»MÈÿÁñ40ϺãáßÅxáh{xÉôÜÀ!|„¨]xº˜ó=ŸÅ§€Ë ¨°p !eÌ Î#jÌ×\z´Ze.~r>9ž¬š“ \ø”™—Å…)i¸Ø^ôä-|` Èš Õs³6dMв°ÖîÜldh1?èBÖ„­éÕâoÛ³pMÐÆÁ6Éâ¨4a›t7jqUeÃËt¹Ü¤sº•nfN·3‡˜ÍUx“„—íùܨ…TAó¹fhYC×ôrET™Ð²ééb.7h>!e½ ÐÕ°}iå*z˜e&òµΊßQÎËdßõ×¢‰|M²mÒ”©|-ƒe®i“§N£V¼D§žvºšOÅýÉ|ýÓ¦¯™€@›ä^;•¯¿Sgø ×ñ/¯¤;ßÞ_²ˆž»æzm…k+¯»„V]w1½ÆöÆueô6Û;lï,£÷¯/£ulë•]D]»óÖN¾ˆð:ç=xÿÅüYóg:¶ší‘Ó[lo°½6òz…mÕµ—ÒK®½È·ïTL§Ò¾=³`‹P2v«T ¶mпW^y…ÆŽuÜfx³ðÆ&†W/€&3¸¿³*x„†QFbu-´,{$†‘Ùœ¤fîð=¦Íæû¡æŽ1rÄ1j›Y1—möšÉ6Cÿ6ü>˜9ˆ° h‡),>{à‡<ƒ~Êô,kx;gÏŸ à~püD^U>žÛÁËíó€ÂùX†ðò<·«‹2V-c…2V>q̉lÝ0²Zeì®06ÃÅzu1F÷•ñd£BÅldõÜl`moVÃV×\iìz²²­ [Ó«5CÊæœ­½Ù %yºUY•µBÙöD“ÌÏVÆëMÓÓÅ C·²àÍZÉlyºA ©lo7ÉB*ÓÓÕÐżn§kÏçjOW‡–õêå /ÀÕÐÅÊe¸X¹lÎç*ؾþ†òlŸqO§=ÀýÛ±q˜‚C´§áüÆÑCD­èm"ºèÚCãù­Œ£lËyáÒI'Ÿ¢ òð# sAϰ7x…”5Œñœ<2‰†=ÿ‹tç!{Ò{‹çÓ³—Ÿ¯ì9¶ØV\y>½|åpe¯±½1b8½ÅöÎUÃi5Û¶µW;¶Þµu|{çðó²€ë-¯]Y{Õ¹ê=úýk®>V»öß¾uÕyôÛk#Σ•l/³½tÅô"ÛólËÙÞš1™íÕ•Ûé=5WkZj°u²\Àرd¶ün´§ÂCƒ' ÏЄ,B],X¸X…°"Ú³Eüw€Ùaˆ û ø³²Œ¿ߣm>‡BBÍ™Ì[°ˆÑ±¹|¬æ†gæÌÏÝTÌ]À€_à@}Žß RÆ×æ{ƒ›mº‚õle¨(€‚ôtƒÚôÀÊмð”ïcè"T OáfÌý"´ ‹©°’yø…«ÕÊðn;ù45w‹ER•òö7|¼—»§:žldõåÁZž¬³…Ç1„µ7 ïU Y¬6Ö+Žu¸Ø…«†¬ Û0Èšaä°ÅQa° ›¿­Œ— èÆmª H“¾6(tÔÓ5·©ßÊž.¬ºÐ [ÁœY¹ìé†Íéš ©¢<Ý •Ë&t5põ|®oXhÙžÏ5P™[…^eضi×A-TºýN¬}Ý̓imX s/Ýé3ž†â¾o¯E¹›§ª0¥§¦õøú`š9ÛSO?CÇaîVÝ»}klT0ûùçŸé—_~Qã1Óìçü]{€Úð›ÊL«CO]t–²gØ–³=ñYôâÅgÐ ¶Ul¯]r½ÁöÖ¥gÐ;lï±½»ì ZkØþûN^jŽƒ‚ò³Ïá÷œî½ï…½ÇöÎeg*{vé™ôÛ+—œI/±½ÈöBÙÙôÛ³lO³½1õ:¸{§ôa‹p³öl5h·Y°h1g‰.ü¯ Ú›…wŠy€G¨‹ó¤>Oà/]ö¤2{¢÷ã üD¦&ÿ sþðúƒiÃn¢©KÅQæ,2¸Îâ÷žu¯³à¶ÑoKèQžkðLžanš ws¾Ïcs Íž»aÊc7¼qí…ÒïÚð¬µ7­ÀìxÐÓÊhg´7<^\ßã| ¬<†çsuHádìóÅþ]¬R>•÷â»=ša{ø1Ç«…QûTÊsµ‡©½…¥û÷É&kÐêùX2Îòb­9Ù ÈjV-€²æfM/V‡Žd]ÐbKö€íhÛä¾íhÛL¶Ù}ÛÒÜþmiAÿ6´¸ -PBO ,¡gØžg{‘í%Øn~sèÁ¾cÔÇ9š_a½÷Ÿg[ÎöÌnmè ¶%ùû´¡G´¥yÚQE¿v4£_{šÆ6…mrßöôÌÈ+iðÎÛÑjžŸMͳÅÅYÃÖ e¯Ü-Àkz¶˜;„W…‹=.ú‹ÁV¸-¼R€«ãÙe?Å+Þ¸ mÆÍ{ÐöM3Ü×+Hß?\AvÈM³øsyžkÓnʧ{Vã9«ó2¯]¼ä~:‹ÐYã–Ñ"†s ¹v@lxäü7Ž1€¦ÑuCø˜®Ÿ–ñ¦Mï™%Ø3Âê¼h/e:ì­ÃÚ:¤m„¯á ÃëE{cŽó:#Á»õÂɼp ó·˜Ç*eìÏ=çü2µ:ûo±çsEH^±/‡ºö>à`­“Âc¯¬o…±µO66…áÅš«‹mÈš€ÕòfMÀzûgyÛˆ Zu߬ Z:6XàosÎÖLf[{ßmÜV š‚­½ 9)lÃæuƒ¶)Å­`Vóº!ž®½ *lë/›Ðm®B̼’‹Ûp~]è&™ÓÕÚ3W.csXr ]½rÙIŒ‘YD6Ÿ«CËI¡ûêk¯S›¶í•—¹×~*Û‰5`ûDûx°2{PÔâ%ò Ãjì>’A× ¡gï¾|[L¥GÃvt–á»AðKÎ$ˆU¾A€7Ÿ?‚÷ïÃl¸ßÅ‹@¯i¾­¸ëVz¨[+z„mB÷V4™mÛŒî-©¢gKš×³-ìÙœ–ôjNK{7§§Ø–³=Ïö¢¶>ü7Û˜ƒTÇóùçÎñiÃñàXGóó/¸¯Åëñ÷r¶gØžb[ƶ¸w-ìUDó{µ¤9½ZÑÌ­hjÖ4™m"Û„î­é©«/¥Ai˜luv$¶€, Ð…´>ز àÕbÑôjNÅÜ,¹ÃË$Ú'°_Ì2½q;èvÖè=¨Á°±4“—Ž/Söû‡t΃ ìYch˜áÙžý ƒøAñði)/Y÷™÷%ʳ~ÎfØž}¿)¼ö¸[Êž¶2Ò¯Y{ÄÎí º~Hׄ®ZF_23QièêT¸µçr1Ÿû ö¤m;âÝ“#M{óÔll3‚ñ ÀüôÓOô¯ý+ÖØ: ´4dE¦+6„¨Û…€íQØ àrö+Ãð]H ûé§ŸªÛ Ãs0ÎYì!vMÁ¶h{zéîÛh|÷b†X1옦ô,¦él3z¶¦Š^­ôönAKû´ e}[ÐSlËÙžg{Ñ0€ÇöÉ'ÎwÃN8Ù¬¸/öã÷²½À¶œíY¶§Ø–±-æÏ\Ø»%ÍïÝŠæô.¦™½ŠiZ¯šÂ6©g c =} Ãö¯ª[Ö^y¬AkÞâ5¼z‘”Bvæjç0$î¤Ó‚gžuŽò@‡Ý8‹–1÷0C¾{Ž¥Y:YÅØÀçfÙ“¸F‚:6|Îð‡ž¦Ycß1 ߇i8Cwèè ZöÄÃt¿æœ±Ÿë!þ{8=€½]³ñ{Ò¹ÃùsôwïÁïå=`ËžxÈ-¯ÒõžcH/a(/yüA+D=œÆ©ýiÎãgŸãxÖÊιŸ3iÔ°?¸mˆÇÏ¡{°?î^ç÷{vÖ½î|ò½t¦jŸ³½çœBó&^OƒÝןv'‰M¢«ÿv¿f¢o>s¿)Oåù\,ªz˜·La%ó=ÜA±…°Å¶ ‘¼ž-ÂÈz²K!Œœ™³=Jyµ{ì{{´û:iÕÅ*ëS5]0ò}wЄþhÂÀ4™m ÛŒÝ:ÐìAh.Û|¶ElKÙg{rpzvp{znH{zqH;Ïp\~˜9&€vté¡ øúxq‹û/å÷±½0´=-g{fHzŠmÙîh Û"¶»w¤9l³ÙfêHSwëHSØ&²={Õ4hçí«[ Z¬à@“@Vô-Œâ‹?ÿ(Ø*HžM÷²×÷ØÒ•7:lô7Ïç\½ôüñœçs<‡çÆà9dI™c<Çpu=ÛYH[Æp~p¸ûZô°ç1T‘ÎlÝ< °Åßïtólþ›a;Ç8ÃÓ5Zݦ (ã‡ÓýðzÙ#nÐ`Ý8Ë qg6;ðЛg)Ø"ôÜ Á9t/BÏ÷Ã¥QÓùoÛ³z# áßuæ½1cÑÖ½Ü&3OuÿÆçœEw²G;aä`þPL×N|”Æ_Ë÷w¿ŽÆÏŸH×ìÞ€v»f‚·¢Ú[1Íí¬çm1_ØbE8­!y²]!CU¶—ÒYœGùdÎ(užó¥Ÿp:ð!êÇù£ÅÃ}Øq'!ãœcÇŸÌùÕOƤLòˆÈ¿OÆšäf϶û8'ÿÕmv¦—¾‡&ïÙ›¦ìÕ‡¦±Í`›ÍV±WošÏ¶pß^´h¿´”í ¶gèAϲ=ÇöÛ‹®>êßqá>ž}töã/Èïc{m¹kOíϟ϶”m1¾wþÞ}hÛì½ûÒL¶élSÙ–aGh׫[:¶CéyZg®Ö1Ó£Õ^-Gy°e ž– Û3îyŒ3pù¾sÔôryr^χÏãÇÎã…î-¯Š{R›~Œ_ïx²Ú ~:Û‡ø}x\ÁÖ¬÷·ãÙe€Þ4 ž¬c÷ŸË¿ç܇<Ø"Ç—²7«<Ýœ,*÷ Ïx¤øýj~xÙÎ<ï}Jæðòâ¥÷Ó™|ÿLÌû2lG!Œ|à µÈjò¨!Ô`È 4ÉÝà àNÉ ExÐî#';Í٣ݽÁ`ºfÂ'„|çéÜ6§ÓmJVÛ’Ô*gwkæpgó½NÔ^ó€KéNÈqåQ|ÌGŒPûŸ±@ ¦ô~Ûá\Ä@­FfØb52’¢rÄÑœÈâölU ÝS-¢P@Xîâ´!kÏÇjÀêäj!”Îiì.~±A‹û€­ Z Xó_ ÚªÂÖm]‡mu@[™B¡ƒ†o×^Á5¯¼W×-AU ‹ÜËAsºæ|®™þўϵ3QÍçš ¨Ì\Ë€-æl'MžÁ •ÎàLoئwª²#E¿åçü¸iÇœˆí|Èw–²ãNÂŽ†ñ)X#]ë¹ÊN<:vÒiçñš›‰4€³g=þijœiî|Ÿzæœ çε~aˆ]Ä;vúÙ;ãœ2Þf˜m=2•FvhF«&?HÓJ3Øf6Œ*ØæÎvÄ0ZÄöØ‘Ci)ÛlO3”ž=f½pÌîô±»Ó ¶—ÙVº·øÛ´•Çñ}¶—Ü[ü Ó¿ÈÖ;DÙSüùO5”–±-f[tä0ZÀ6ÿˆ=¨¢tš ;jzþž[iX“]*[s~Öôlí•Ǧk‡™õÞÚø"ä ˜³luù4þ°Óïv¶ú<¦<@„v*€%`«oêOxé°±†ªë騩.ž5z˜¨02«»sJ:€ÕQìI k/|2·ïØs²aõ­2NYg^Î4@׆¬¾àúnz·AÛ€‚’]íà ›Ë òr£ `7É*åÊìÅÕ°¬l"Œ$û{õqÄ.¦ra‹ðruC̺òPVV*-«ÁU äé†:À|®žÓ5QÅUÒÅ tƒ•¯¼Jºtåõ<^Ðx™ Êá| @rbšÓÎ÷ìÔ3xwi ½S¿³±6õs.æ<éh_ªì,mÃ/ãH×e¼‹d: ¶/Wz¿¯– ¿Û¯äŠc#‚í"~ܵóË0]åØcQ¦ß&MM#»•Ðë3'ÓÜ“£ylóO•Ò¢S£ÇØ–²=~ú¡ôô™‡Ò³lÏŸy0½pöÁôÒ9Ó˰s¦W\{•om8¿†íUËðØJ¿ïEþ,ØógJËÙža{âŒCiÙi‡Ñbלz8Á ?™–k×JÁ9èí³µAë‡møÊcäåÅ< Î…=]XŒ‡X 3q2oû™ænû©¸CÁäô»°í‡W#c› æ]yNÔYå„}šÊåçÎÃßOó¨ ºI=ÇÞ'ƒt&@ªá ¡àa¼:™ÃÂ÷±—ê@f8 Uyz žé†‚gަ!ðÌ®já“{ßY億ϺO/‚rW'³×zïÙüž³1ËÛ•¦ßHƒùuðXñ;”'«à Ggàw²¿`¯FÌÞ*‡‰ç3`çOÅÞ*¿ï:ž‹U{xïVm2è:Úü»èTþûÔ;œ=»\G»5ØFŒw@ÝvÃö4ºeöxºrÐhЈ‡½-?zîäiÓU–*$·ÐÛ~B¾…³I¡BÈXU®ækyqÔYÃ1BÎ!¡Ó9”Ä!ä#ŽUé÷äEC8 #¼Z\$PîÎ\üXæ.áÙš‚¬>6ÃÆ  É…BZ´BYï¿­lH¹6a«çq«;Ÿ[éÌTls]œ'5ïÝ 4f…!{»Pü|nøV!³œŸéå"‚µÏþ¼ðç„y-Ætî¿×ÓYç]ÎÀ¼Ô3@Áιé+3Æ`<—á7ܵó|Ê|ç+øa±$Û%×*›:cí¹ÏÁôÜ +y?þuTvùõYv1ƒ¶K®E¶]:âÒvÿíÙU7rÁ”ŒÍ¬X@7öåT”ófÒ¢óN¦EçŸL"-½àDzœíÉ‹Nâ}·'ñÞÖ“è…‹Oâ=·'ÒË—žH«.;‘^½üeo\~½iÙ[|?Ë®àÇì5~ìµ+N Wø³V^Οɉ¼¯v-çïÄw?uáI´ vñiôäˆ èe.”ðÐͼ³ãýUò]öT70©Eh5lõ*csÕ±¹òž.âȃŒ\½wÜ5Ž7I»[ø‚„*k” %ß®`Ï Ûb¦EÓnbð¡Ù¡£i:öÜ«dõ=w3Mã9gÞôåqªçÜç$Øžz#oñ9÷•x;³Ÿûxµ¹O¿‰A9”ó–g‘’æU^©'oëQɾÇe@ÊàÉ “ô{à5ïÎ[zœyWxì Sµ¢ØõPïrVÏ»óLwîõLº½ªñ›O½ƒf*oµœNáÏ9ù6cKÏC×ÐÀƒèŠœ=´ÓÆžÂï;…FO€.Ø€\ñ€j[JÒÙ_˫٫Ŗd‘R«9£Z\Ïu‰Q «‘» -”W{ª³ ™£àÅ_aÅ$VQb{@«kÊš{eò=%¢¨Ê¢§¨ùØÌ¼l°[%к^N˜§k¯B¶H%õlmàjO×®kg› ªåÝV&åc\è8-à†>ˆôvC<ݨUÌæ¼ne¶ UÖÓ +ë´ˆJ×ÐM²?×^µŒ…ŒÈ]Œb¥G©¦•–<þ4à—ò€~);/Ë[ {œ¯­Ž-Zò„ß–>ÉÓ\ڞ⿟âi0Ç–,{Ú1þ\ØA¼ðjëóµøÇžx†§ÖžõÛ“|ߵǟ\ÎŽQ€=ŹöÄSÏñúמæ[×nß½mzå%zsÆz›íл3{íý™hÍlÇ6̯lÛf×¶ðmmådzl.?`[ø±Íl]Ãw¬­˜@kgñ÷²­Ö6w-Ÿ6Ƽ†ëÖQÕ ÆöJ [íáfÁ^Nl‘a(hN6hŽ5i¢¼å6'²JjÁ‹rJ†w;€LGH¼ ænuö(^V®>M%}ûSo¾5 …Yß~¨RÖ 'ÏHg´oN‡·-¢ÃÛð-ÛmÙÚÑQíZÐQí[ÒÑZÒ1ZÑ1]ŠéØ.%t\W¶nméøîméÄîíèÄí]ë@'õèÀÖ3ó÷ñüø xÎz ÇŸq\÷ötl·vtL×¶t4ÏQlGv*¦#:¶¦Ò­è°ö-”Õ³=bZþü \4þnh5tÁVƒ6(rlá1¡ä.ê'›y‘½¹[7œ¬’[0`œ=·\q‚a¥S6šéãR.: H»È£¿y0·’…Þ§êK»È 0zÍD7¡„——Ûj@šæesòK8©ͼɘCÅ^W/?2IC£v\HâÂÅQÛwªëÉV²qÞ®^¡,°m¢Ò=Á6ÍÐrä¼® [7 ºÕñt3íàj òtÃc˜†ô"ª°âõðtÑ·Z¶ó-‡%Ä0kçÚ¨€F—ð³ ØåûT­\£^îQœz†ª_0$9ö¤Syõòiʰž†Ê`0ìLJ! ®#¦rÆÙªl' {÷mCòm˜Â2íÌsÏçh[°am‰iH1k*˜%1$ð± ÓiAˆVÖLè†z¶&hMàF•ÓC¢«¹€ü5#¹–-Wy¸7ߢL¡®êmåwó®®þÃó¸ xdðÎÀ`¾PdÙeóbËä™eóŒryÙ%ò0·i™b¦4žY&ÏG³<ƒÒñ0Ù(¹Ò§ETæÖÅõÕÀu=N]O“çRaH®êØ24501ÇZÎ…çafÝZU¯VšGqymå*|ïÔ¯½EE°âZ^u‚PÝÅhXoñ9å ^îªSNi‘ø…á÷Ø™¢xAƒV­×^µTà@/ 2KøÙusÍ Tfa»’ ݪе‹ûºAÀÅcaЭpߪN¸° mØcj~÷*Ç®¼z¤Jû§{íõ7©‹ý 7UÉVËó¸¨8s;Ã@¹[•‡ârP¨ú  È£.cp!øì¯Z˜e Àgc̹b`ƒh¶ï¨úÂnýáËù|àœ]Ê‹ŸP>  .Æ<³’ÛQ^.<\ÓËÕn—[ÈÀ„«âáfÁð¬ l/q­[¸€îU®—{ ]„–‘DåG3\à•</¶£¾N(垜Ш‚™öø<ϯ 7 ;ÜÂ3ÔVÎ+£m»=m|¿2ÎÿiÚm|lÀn¹Ý1GYtA ϰTÀäß ÅÀR“s–0 H`&V;Àt¡éž ¬†]zåÕj‹¬ì²lWò2ü+”§ŠÕ‰( Âk=—‚µ®§òÂ'U^Õ©åïÇä…gOÖ-§ ë&­PÛ{´½8¬Õ«!‹ CÈjoÖž“õÂÅ)A¶²žkÜëí9Û$àÍÅ®#h…²éÝÆy¹q ’T 2Aä‘F…–ÓžÓÍš×Ý¥)íÀ¦CËq!æªC·˜uHÝ ð²™ÊLŠ6Ÿ›$´UÜ@ïËÕÙ§]„•aa^n®ÃÊQ^nU<\x¿u!¬lÁö µh&1l´€­ Ü,O—½.x^ðÄ­häΜ.¼6„;'x½Ø.ˆ•áoÃð|˜á3´aŽR ¥W'Ä0C˺As¹I¼\3¬l{¹Gò¢I3´¬½Ü¸yÜ °2`[aå$!e¼&Wó¸ÿøìsÚ¦a#jðÇmÒç_$„­Y Û [ ] „8¯ºŽ=^/¼8 _ /x|×ks=@<æ™BÕ¡Tó ´ioÑñu¨Õ‹ï¿z$8žŒañ¶?9[ 2æ„e¯S°¼ °d/Óó4˜XÕ{±ámÂãD’E çl"ð8•]À^'àÉ+€ag²÷ ;œÆE-YPÀ“@°€çq O¬@!vä-D‘Rñp^à˜¢“ Ó ,ŒÅvb¸îÃpÅêbTîÑ€EŽc¬4Öó²ðd{ôãm=.dmІAÖ^üäÛÊÃéK\«N¨8Î+­Êóq«“x¶Y¯I0‡W)ÈN„V„>l×Þ“[EékÚÓõy»|ÓòvƒÃËÙž®^D¥“cØeýª2ŸknJ²€*,†®&×,l`{¹¸Ž$®^¬VNÛg–?¯öhyûµô¾-÷y:“š³ïË4ÿçêÏQßñ8–¨ï|™÷˜e[fÚË+ùoðOí¥—[áÙJþ{%½øìezAÛ þ{ÅK޽ø={a…²ç^xQÙrop~ö¹Œ¡aO/Žž~Ö±§žY®ìɧŸUžpíq®l¤ÒRº…T-.| « !C–*r  Ús‚•Ð#S¸^«Wæd±r’møëáêíU*ϲQŒ^m‘2s-»E ô¶(sï0êå¯&ŸajµyÀÖ)ßjsÎ2†LciV¹×ˆ«à‘¼¥ª†½Öq¦·”9·Ó²WÓs’Ô2¶m¯¨¯Œa{šß&ó}¿©ýßqÆ‹ÇGYÜû+ù¼}Œê¾»›@߆¶Ãdn#6_ÛMá¶ô™ÓææyÐçÌwÞ íAÏzŸ=ú€ê'³`¼/6Gm=DC?DT6—·Er?…!}«®i­ûµ“­.“c]å)à¤ó_—ó}e|½|îy¾~Úæ^Sk+®±{þE¾î&4ïš­¯Ý o_äë}–1 ÀÛV0+çkvЪXØ¢®y=5nZ$&m  ˆD¢ Œ¸êš@àÆÂ$@µ¿VžË””¶¶ ˆDuSÿçÿüúòË/éÛo¿¥Ÿ~ú‰~þùgŸýòË/ê>nMûõ×_é·ß~£~øvmÒ\y¶w [žëØÖMÑHg—ó& ˆ*§4ak/œ ‡­»J`[¹“%â–ö ˆDuSiÃÖn6l±ÅÅXq,°­›¢‘Î.çM4  TN¹€­Nû˜[lÏÁV ܼ„mi9­X±Â±Š2™OFáûÊX÷2ª(/¥ÜŽeÝ+÷ÞÊ|¼VÚV4 ¨C¨UØb{M^-(VTPYw0¥å+¨¼4lº—U(@'}}¥V‡ÕÀ…-Ú£&Ûçj…qîìö{Þ|=^[ ZƒÐaUŽ?éûëjûDõ9é¿É®s{ݪklZ°Å6#»A¨g«TÔlË*Ê©4¤aÑá|©Jzg5˜¨ãÏ‹Ž€È{¶5Ñöï-«È ”‚Ú"îù$íWÛíŸäû³t\É IußÕŽIŽ?ÉyÈÅkjC³¹øò™ù9pˆƒí¿ÿýoúÏþC¸Z Øê´‘addX ƒmY…¾ð2£l?õkœPo68CŸW^«6nMï«´\_¬K©Ü}†¯ó¹ü|™f¶<)ÕYËÌï±ŽÏ Q[Þ‡7².7?ßx‚ãG' üýú½ #çwñçzÇb·¡þíგ°Îìx–U"{þ#ÚÏùí|~JÍö÷Ã7îùHÏ.ªý+Ѿæù)/uÛÚh#_òãÞ!áùG;T–aïOâùúôÇç£Bÿ¶J(,ìÏ0µe<§ú´§¿BÛ×m·œõ_5àÉ\WÌk‰7Àú}î€)´}cT±×¼?¬%Õw‚ã¯Ï(Øbk@Û«W¯,àÚ[LØêr}~Ïöó/¼|¿:¥a g«O¸îH|_B³=Os^5îyç‚ [/|ly¶ŽÐmfî;‚ÎtîlO¹ÔçUãóLØ;ï÷žŒ:þÈßïv„Ì…ÈùžìÑ|lís£>3{@cÎ \*9ÊŒ8ÿ NfTÂn?o@äN¨¶q|qÏë A¬BÛ?AûúÏOh1ßí^8m=Åé7Éñ'¹ØÅÁ: Ʀ^جÁVu<[;2¡Ú' ¸nÿ3χ۞êx"Ú7×ý×§W÷øÌöŠû}v»µo|¨<âúÕ¿éÛ?°É:?•Œ°$Ñj]zMl5hKJJh·Ýv£ÓN;ÍÜ8ظ>Ø~[N{˜5gºÕ o¬æ]Ôãž÷F†ñ°õD[{Ò„U¸ìß0º·aëûü€¶ˆ¼Ø-B_û}ÑI:œÎ”ñp²Ã¶ú‚P؆Î7Æ´Ÿ _í噃µ¨ó—V‘íÕ¾8vÛËÇÀÂ{ŒÛ7A ¬â`wŠ{øó¦çyª|ÄDoˆgœÝò^gk3¾}sÚο¯ý¾øöƒmäõ%ª™ÇïNùúW¢ã¯äÀ»Àà[ ÚvíÚÑ¡‡J£F¢qã¸tëwxÀõÁ¶is•Ê×®ôÏ>ËTý±a‹äü+󶾎m\ƒæü’Ã6+ó¢^w`×Qì AfJ îâî{>tž<¾ýâÎOÜó5[ß8ùîÙfgß`ÂÑO’ÁB°^’µ õ¬âß ÛxýE¾?d°•XÆ_’ö­:lc~_l"O0X¬Ôµ À¡ãN8FŽI3fÌ eË–ÑsÏ=G«V­¢åË—+àÁõÄMàÂöâË商­ž“Ó!B= Ãlø.¸*d¶5Å¡Âa±Ju¶€0WìÈÚnëø³abüþØÎ¢aZõ9[s¤[å‹jTd#`ŽÎ¼ fŸ 0rÖgd{ZÑadÃc2Û?AûÚçÇf¯òuÎ…ÿ7†|¿qaŠóLã.xqï{>K†¤¿3ªÿÅWÜ*hÿ4ŒÓvvXÛ5ñ·o®û¯Ý>ö´OÜïKÒ¾U†­=°¯O ôwüqç·ÐŸòlÕ Ãb©0ØêJpNÙòlu©9†Ê9™0r°g䯄ƒBMYÛO|‹¬ mÀÅپЕó"*sЪ{q4¾'è5˜ÛÕŸ…ïHò~%ÆØã÷/PRÇo¶›»Ö^ˆ–ùÕƒ­W¶ñçßl#»ýôÖÿùñoã2ßãœC3ÔüýYaÊ öOÚ¾¶~y1œí ø¸ùA}þ¨·÷þ¸çíÅyÁaäHý&ðdìöÑç'iÿ kß$ïÒ_’÷Û×§ , ³ö¢‡ý>'*`/@L’Or|¡¿Ï\ôsý¨Î1ÖGØšù‘åFvÃÈðlÍ‚õl?gÀrtÃü° SÖÌó>ÁYs il)tA9°•R²ó,픬j¦ï×ô±ÄEjúxäûr«³¸­?I`Û¸2°-»ìJÒöòªWò+©EÄÈÚ¿ä^2KIǬ^Ç´· H{V¯=ëJûÉu¤~œç = l„®êJG–㬿Yνœ{Ñ@~k fa{ÙÏ«…wûòʺãÙŠó[Èr~êðù Ý6¢ç(“ÏMŠê° Üñ©Qتðñ¥Nù¢K¯ —V®ª3adéÄÒ‰E¢Ñ€h ªH ¶Ï¿ø}ÞE ¤>û\VÙ%Ž lE¸U®¼O´# Ô% ¤ ۳ϻÐnÖjdíÑ*ï–MÂÈÒYêRg‘c½ŠDUÕ@Z°E)sëOÀ>ÛϽð±·Yæl¥eÏÓTµcÊûä¢.(, ¤ Ûs/¸8"ƒ”F¾CÈ0_Ùª8TÑG‹/×™JrýùµÑ‰ôþᚬ1[¿S¾³°.Pr>å|ŠR…­›ÔB§lô…‘Q•àÂK.§ .¾LÝ‚æl«œÀ>¡‡TùÌF…#öäE ç7JG•ß!š Ôm ¤ ÛsÏ¿ˆ”e¥kô`뀶*°5³;…U†©N=ͨÏ÷<ðz³IêUÆ ì-R¯3»RN\Êó25! ä¡Ò„íð¬0òçFÕ7Œ È^tiÕ`«GvaiÎìÇ«ZO3êó£ ÄÕ«Œ™ÆÕ³tr«J½Î¸v”çë¶ çOÎ_!j -ؾ¸âel‡_˜1_=[3Œ|ÁÅ'W=ŒžS4¾Þc’0rlCëAÆÕ«Œi%­)õ:eÔ§%y^4"È; ¤ [=_ èfÁö‚²Ëèü²KÕ¼-lÅËÙI-ì9Û yÆÄ ¼«XO3-Ø¢ÂIh!ô¬Î0)½±LêuŠPˆ€ü&Ñu!j W°uæl0²òl±8Š«nÙ’,ª l“Ô{LRO³J° ¨·k׫ŒPÜ*h©×™ä"W"°ºÏ'9yMœÖåyÑH}Ó@Z°]ñÒËtþE—Ðy]ìY`ùBk‹Þú“©Ýèxp¹®§ýùIêA&©W'0©×iÖì¬JNÜêÂ4îýr‘ŒÓ°‚=ZÔ²u¬ÐçlE•¤´™´™h@4PˆH ¶/qÙ)UËb•$êŠ`Øf<[Àö #K9ï6ŸbG—ß$ Ô®RƒíÊU™Úðnø@ÏV÷R϶vO¾t>iÑ€h@4P3H¶Š£ ZÍS ¶_Ð¥W\Eó‹.áp2lå*ñlEè5#tigigÑ€h 65lᤂŸ`©æéçþ9[†í•WÑ%WŒP·°•¯¼šÇad7¹YyÇ ùÆezªÍ*ß-Ñ€h@4H ¶pR5KÁSØç_ ¤pçò«®¡ËF\M—¸FY~ÃÖÍZÛÚr’B µy|òÝù×ÉåœÈ9 Ô¾Rƒ-;©à'Xª- ¶W\u­ó¢+ñ¢ØIöW¬X‘Iâ¯+ÝTTP9?®ÊÜy¯õ§ô3“*”—†{§â³*ï¨÷°«§«¿Å2¯5Ž/ª^­á5ã·+ãïör9‡Tò#¨}AI§–s  ˆ²5:l™£Êqe'öó/¾ÌlýÁ׌$WÛ*;ŒÌp+5VçÕ‡UpQ u f*ð'þ¯$hUîe·V¬Y>À³LC¨ú}|_1®^­ª¿[^ê­Îu€mS†oUòKç—Î/ ˆjSiÁܼòêë<Žâï/²a{=]]ÍÀe[õêkþ9ÛïÍóÜÌz±š %¶õd”“†ƒÊá}¦ øÐÒyaeõbëÕ&+±'°• Fm^0ä»E¢ªi 5Ø27•ãÊ hl¿4<[÷ªëFÑ•ü¢׎T·¯¼úº[Û³ô{­ª¢Žá-&‚m,Å’kØÂsŽÜóÎ ØVMèrv ˆjSiÁöÀ–ªíªk¯·`Ëä½zä ¸Ú^}í lm0ºž`bÏ6 ž¬ Ëf.¬„÷‡hƒßï *íÙòñÅ­bÎ~Þ9Vs^6I=ÞÚ”|·\ÐD¢Ñ@îæl_}íuºÚå(na_~ùUfÎö ¾síõ7)à^3òFe¯¾þ¦/Œl.@Â\ey9æ,6eìÕêEC»ó¢æB$$;T[Îó¥‰a«ç‚zªx¿»P)¾žnp=\{SX½Z-Nûù¬P¾Ed2+Z.ì¢Ñ@]Ð@Zží«¯¿Á,u Ãß>ØâÎÈFÓu£nöìµ7ü°M¿Á’…fÓÿ^¿´©h@4  d4l_cØŽ¼!ÃQ0õ˯ ÏwFÝ4–FÞ8Ú³×s[6¶ÐÈI—Ž/ ˆDµ¥´` n^opôúÇ0l¿Î„‘qçÆÑciÔÍl7Qöú›oåq)em‰R¾W´' šÒ„í š£¸eûÊ„-îÜ4æ®cxño¾-°•ª?RõG4  ¼Ò‚íì¤ÂqÕ,ÅíW_“ñl¿úúk=ö6ºIÛ˜[éÍ·ÞØJ'+øNVh#tù=âuŠ*¯Ô`ûÖÛÌÑ[=–ÞÌLµ`û ¹¥œF+»]Ù›o lE´•­´™´™h@4P×4lÁMÍP‡§åôõ7†gû5»¹·Ü~'½íÏÞ~ç]ñlųÏV4  ¼Ò‚í[ÌM“£àê×ß|› #ƒ¼·–ßåÚ|{'½ýÎj­t²‚ïdum.Ç+^£h } ¤[8©KÁQ‡©ßøaû-Ý~ÇÝÊnsíwë'lã2I‰ÐÓº´©´©h@4P›H ¶à¦æ¨fê7ßž-È{ÇÝã莻îUVÎöîê÷óγ•ÜÃÒ!k³CÊw‹þD…©´`ûîê÷<Ž*ž2W¿ùö»Lä½ëžû”ÝyC—íÝ÷,ØÖf=Û„õbýé3éǹ^Êÿé„þòxqõpÑÉìdvY>鈅Ùå¼Êy ¶Rƒí{ï)†Â4S¿5a‹;÷Ü÷€²»Çݯlõûküžm­Ö³uNt”g믗ëæR6r/;Àµ‹Ågç/+d¤ž­tÈÂîr~åüŠ SiÁv5;©÷Œs8ª™úíw†g‹;÷=ø°²q®åXÄÛ ÔŽÒ‚íæ-[<†VÌÏLOÿüçOØâÎüGѼ iî‚GiîüGië l¶[Ñ€h@4PðH ¶[¶~ ø© LýçO&lùΣ‹£ù 9ÆàÝú¡ÀVF™µ3Ê”v—v ˆjRiÁvë(~j[°p1ýôÓ¿2ž-î,Z²Ô±Ç–ÒÂÅKèÃ>ÏVF´?¢­É-ß% ä§Ò‚í~D2?ÁP°ö¯°Å¥Ëž %l-}œ[ö8}ôñß ¶ÎV£ìtZøqÏ›$_ëÝ&©Z$=?;ºœ9/¢ÚÕ@Z°…“ºxé2æ(ìqÅÔýëçŒgû¯Ÿ¦ÇŸ|ŠíiZÆ·KŸx’>þÛ'[9.©EÜóI:C>ÔÛ K7™äøå5µÛá¥ý¥ýEµ£´`û1;©K‚–1C—=¦>E?3_·i؈üq›†êÎSÏùäïV‰=³ì òRê4‰T®jÅr¾b/­¢¿„™\¢œKö©×'L2¡ß‹ïÍxqÆç‡ÕÛõаg[ÊÕBêÙ*ØF<é9FÕÛ­¡öÑ4 ¶aõ~÷š)+3µýù k§#ÈHÚ]4 È¥Ò‚íߨIuú4=É·O2Sþå¶|çÙçž§g–ÞSö÷O?Í»z¶^nd hly©+¨z»^^c#”¬ P`Þ-É×Àz>̲ëãª÷¹ÀUÇjP0KüùëñVn bŠ0 ¶Iêýzm鯀V.p¹¼ÀÉg‹¾òEiÁö“¿JÏ<û=ýìru ¦þò˯Øþ°}î…iùó/({ö¹èÓ|æ‡mÔ³¨B[ˆ ºž-`äÁÆõ†“×»u:Md½]c€°Â­½ë}>ŽÝöðå„^,lãêý|¿„£åB˜/B9Ñb®5lá¤ÂqCaàé/¿š°å;/¾ô²²V¼¤ìŸ}žwõlÃa OпÊeМ¬ß³¬N½ÛÀ6À VGB² °Ø$8Ž\wù|¹‹Dii -ØÂI}þÅÊ4Kýõ·Œgû+Ãö啯ÐK/¯rlå*úìó/ò®žm(l0™a[oÎÖš #›žd•ÂÈ!õvÍãƒÇj{¶ ¸Ô é,ÏVÏ«†„«ùsÂÃÈqõ~ýÏg…°² J4 (P ¤[8©+^ZI+^v \ýõ7¶|gå+¯*{yÕ+Ê>ÿâË<ªg\oÖ ûš ˜°ÍG×®ÅkôÖ=Û \âž÷/2kÚZÐ ª·k†·Y÷5æB/õ;ì0x9ÏG'†m‚z¼1avûû+xÁ˜ÌÙŠç–ç Ÿ#ZÊg ¤ÛÏ>ÿ\9«/1dÁÑ•l¿™°ÅW_{ƒ^yíuzåUØkôÅ—~ئßP5[Ï6ýãÏuç©Ýö‘9Û\Ÿ_ùüº×'åœê9K ¶Ÿñ­b~ÂK™©¿ýûß™0òo¿ý›^ã-zí7éµ×ߤW_ƒ¾üê«œí³­éz¶uM µÕ>R÷W.¦u­¯ÈñŠfÓÐ@Z°ýâ˯”ã †ÂÀÔû`ËwÞxëmÇÞ|‹^gûꫯsÛ4G>C:™h@4  ¤¡´` 'Ž+ –Âþýïß3ž-Èûö;ïÒ[oÃÞ¡7Ù¾þúm.HCœòr‘ ˆ EiÁNê›ì´*cŽ‚©ÿþÝÛßéÝÕïÑ;°wWÓÛlß|#°-!Éï‹¢h@4 ×@j°e'Ž«2æ(xú»¶|°}÷½÷=è~óí·âÙŠg+[D¢Ñ@Ák -Ø~ÍNêÛï0d]ç\ýý÷ÿdÂÈ ï{ï¯ñl5C÷›o¿ØJ'+øN&£}ñøD¢´`ûÍ7ß:N«k`éïÿ±`ûþ𵤠àýö;­tB鄢рh ð5l9"¼Úp\ßc®þÇ[¾³výZ»n½²5lß}ÿ}^z¶¾m1\ÁÇÎg,£ð;†œc9Ç¢Ñ@šH ¶ßrDxÍÚuc–ú`«’Z¸{‚^ã[lÄý2çI-ª –*æ Nó¤D}V>Ô³­©ß*ßSýÊ´„LKˆòRiÁösNs¬Z0Cu¢(_R d‹ºúºhĵ#銫®¥ËÙð†ÆM‹2 V/¶†êµÚí »nD=[ʱœS ª÷麻–ðÃê½zïwËùécɪéë}¶óâu dP" ä¿Ò‚íÊU¯Ò¥W^M—ÁF\«x ¾zÅã?øð#ÚcßiÈ^ûÑî{ì£lÁÂÅùWÏ6ʳ©gë3“ËØNGWï5«Ü^@¹?ñló¿SÉ…OΑh@4`k -ØÎ[° Ù“²í6t/fé¾´õƒ3°ÅÁ{ì¿û0ê7h(Íå7ù<Û|¨gÛ˜Dû‘°Œ+AçVÓñyªÛ¼ É…T.¤¢Ñ@e5lçÌ[@½쮬ïnCO·lýÀÛAÃö¦ƒ÷P í3pÍÿhþÕ³ …m|=[­tÀÊv@y½hF4P?4lçSÏ~»Q¯þƒ<àfÁV{µ qïƒiÎ<¶qõbkª^klãŽ/gš]\Þ_uÇvv½h»ï3ÔrxÝYéÄõ£Ëy–ó,È ¤Ûйó©Gßpû ì÷lA^íÕ´½úïΰ]Gõlñ’»ÈÉ-Â1GÕ³5ŸS¡`c1UÔ"'_ØØ¦æZ¸eÎb+pƒêÙÊêC 7‹D¢¼Ö@Z°=guï3@™öp·lÝš #¶ˆ-#| Øöì7ˆ*,ئ?:«Ýz­éÿžü½Éo–s$ ˆ²5lg1l»öêGÝz÷÷€»y‹[ÌÕjÐöà˜3Üaß©GfµU¯UD&Ñ€h@4 ÈÕjäYs©KÏ~Ê\„”a‹ð1@Û]àÙsçå ¶"v»h@4  ä‹ÒòlgΞCº÷¡Î=ú*à"œìƒ-îh¯ íÊDFì9Wžm¾4°‡tvÑ€h@4 H¶»õ&˜î¦Í[2s¶¶=ú:^m—ž}¶)†Í¥3Kg ˆDù«´`;cÖjߥ§‚-<\x·Y°Å¢¨î_î“» 2bÏâÙæ¯8¤ãʹ ˆDéh -ØNŸUAí:÷ð7¶Ê«Øæõué\ét.iGiGÑ€h@k MضíÜÚuÉ7 ¶ðj1WÛ™CÈpgÎÏV:£tFÑ€h@4PøH ¶3gS›NÝÈîÆM›3s¶›¶lqæjÙ«h;tíŰ“7ad»êNîÅôœ8£¢L¼\™» ˆD®t`[DÓf΢â]¨¤cW¸~Øòj)åÕò\-&v1Á;#`«3DÕlÉ:Iº‘ûAMᘥ å‹ò_©ÁvÆ,jݾ3•tèJm:ÂÃíAY°íÒË-¼ZXlk¹ž­*$PVFaõhíª?¶WQïVuëýå\²¯ÜüŒ˜Ï×é$‘¾1“ÒŸÙ—r²¢‚*Äs¡À=MþƒFÎQJ¶ÍZд³Ù³íên¶g˰…W Ðb5ÕŒÙyUÏÖç$îîˆ7»mæ9ý¼/ YïÖ®”F¶ ¨ã º~œ¯½qûxU-­ÀV`+ ÔºÒ€m“æ­l[·ïê·‹š¿5`ÛHíêÒƒçk»õQ Åä.–0çS=ÛØyžÇë)P÷ Ï2ªÞm@mZåéj¼E²ªú}Ž×‘\€‡yæÒáj½ÃÉ_¼0Ñ@ýÔ@:°-æ9Û9 Ûî.p»©pò†M›ô)¶j¾¶ko[Ðxú¬ÙyUÏ6¶ âæWcêÝÆÁ6öó]FÂÖ1ÂÚâÙ `e% ÔºÒ€m³miúÌyìÕöfØöàÛîlÝhÃF Û†l± ¹}çž.l-Ï6®^l Ô³†mBXÖ®/ëRãŽ0µjÏÚaÞÈÏ×%¶Yõr[£D Œ¨ëçˆZλœwÑ@ík Ø6oÙÔ Ø~Tܾ—\¶Û4Ü–6mÚ¢¼Z¬BnÛ©»ZE5÷ ™aäÐz±æ¢%ÀÃ]ˆd.Ró–v(¶œç3³<;jõ/,JT–—]ó6ó9Qõn•Ø­T+p|ÖöŸðÏ·CÄN(Û\=ulÒÙj¿³É9s ¨¿H¶E­:1lò¶Ÿ Û> ]·»ëÙnK láÙò|-` Ж°ë;Í‚múBŒ ýÖߟ~[K[J›ŠD¢0 ¤ÛÖ]¶‹¨¸Óî [önÝp²ãÙº°Ýèz¶XÐb5VUå*7²Ô³ÑË…O4  ä‹Ò€m‹Ö]ØvÜÚZµsBÉY°U!d Ûv]r Û|i`9éì¢Ñ€h@4&l[wäÀ¶mOjÙ¦­÷{¶›2Ï×¶fЦr&Œ\y¶"n·h@4  ä‹Ò€mQëÎ4mÖ£Ôªý†lo¶îÔ¢„a»Á #7R›nQ¥ MG^ªÌ!äVí: le9~­/ÇÏ—Ž(Ç!P ¶Ò€mó–™›ó²}²=¨¨¸+[— lÿ¸[äpÄ|m+öj[¶íDS§‹g+¬°;˜œ_9¿¢Ñ4l›µlÇÜœKÍ‹{PóV]¨¯NnÖº#Ãvc&©Å†›•WëÀ¶3µhÄØŠg'Þ½h@4 ¨H¶M‹ÚДi³©YK†,{¹ÍZu ¦ àuë}°Ý¤`Ûº}Z¶3eζˆLFö2² ˆê»Ò€m“æ­iòÔ™Ô¤E{jÜ¢ ß¶¥¦|»ný†L=[,MÖ°EYÂÈÒùê{ç“ß/}@4P4 l›µ¤IS¦+ÐîÚ¼XYã¢âlØ¢,BÈ ¶âÙJèH¼zÑ€h@4PO4ls‰½‰“§Ñ.\ýGY³V ÜVÙ°EYÁaä’4Å#ëT‡åœÂЩvPñ&+]b&U¢™ÄBå/6S#åè2Ÿ‘縞œ|UןQµœk9×¢ÚÕ@*°mZD&M¥š´¤›¶dØò-ÛÚõë3adlºU°e¯ -²` !8Àõç6ÿ#÷¯¯€UÕÆ®çª€k&â¬7[»'B:‚´¿h@4 (\ ¤ Û¿6nAmÒ‚vjÚ‚aÛ‚Ö® ­òlÛtÈZ [O6 ‚Mi¹YÐÝ_Y'« NT½Yñf%œ% ˆD9Ò@:°mΞíúkã"†mQlu™A›Ø:Þ±ª„“U^.¦ÞlŽXFª…;R•s+çV4 Hª´aû—ÆÍ=àf{¶jq”ãÕÂÌ9[F6KƘþ0²éÅâ$Uõq ôjÍr{®—ëû>®ŒjE¢Ñ€h Èl5pý°å ­ÚaË[{Î6Q=Ù„a`õY!çØz³9hणyŒE¢Ñ@áj MØþe׿Ðj«lEd…+29·rnE¢ú®´`ûÈÄÉ”¶ðjƒV#×÷“!¿_.H¢Ñ€h 056lwܵY´g«S5 l SPr¡ó* ˆ²5&lwÜ¥)s›FFæ(ÀV<[éŒÒE¢Ñ@}Ò@Ú°Ýaç¦ ¶°@ØjÐ6/nŸµ¹>5¼üV¹ÐˆD¢ú£4a Ð*ÛÅîšuëŒ R¼Ùôj¶õGdrA‘s- Ôw ä ¶n(lZ­t¾úÞùä÷K Ô ¤ Û?ïÔÄñnÙÖ¬ ðl5h¶õGdrA‘s- Ôw 䶸°Åœ­x¶Òñê{Ç“ß/}@4P¿4+ظ[ÉH%ißD¢Ñ€h€5 °•Ž A4  ˆr¬mŽXBEõ+T$ç[ηh@4¤Z­$µ1ÊI4  Ô' lų•ð‘h@4  äX5 [I×(#Ùú4’•ß*z ˆ´Ò‚íø “ÕÞZ¬BÖ–µÙ„­ì³Ê…H4  Ô ¤ [´[¶Ò±êKÇ’ß)Z ˆL Ô8l%©…P.B¢Ñ€h ¾i mØþé¯ êÙê•ÈRÏV:[}ëlò{Eó¢ú«…­YõG`[E'9÷¢Ñ@}Ó@®` ï64]£ì³•ŽVß:šü^Ѽh ~k@`›ã½UÒÁêw“ó/ç_4 €j¶²ÏVD'Ñ€h@4P5 °ÏV2LjD¢Ñ@Ž5 °Íq'Áu/« +VPy©Œz“¶™¼N´" Ô lS†mYE9•Vñ3\­\<êÊÅCŽS´*H®Z-¶Á¦NŸE›eÂ¥åÊ»ÓVQÖÝy®{Uàñ †‘zžæ½Ö·²ŠÌûËKK×W”E‡H’~~Øñ™Çh§ZZž9>Wÿ¦²îÎ S°-s«þU·t‚ä@ÚJÚJ4 ȵò ¶¥>¯àô<=ˆê¾‚žYÓÄë=@7HZ ³Ÿß€ámz­¾ãs?'ʳU -/õÀý°µïg~t†\wù|јh@4+ älµ‡ix‡>Øj°u¡åÁïµ=X¼.Ϋ5aõù!Þ«ö ‡-Ã?æX²ÂÈ|ü[éü¹êüò¹¢-Ñ@Íi -Ø>21SõG§l ­úF†'šñò¼°ª^0dÂ4)lƒ–û|å)G_¬g+°•Ž]s[ÚZÚZ4_ÈØÚ`4úګŒñ<ÍùOM…m³¼I7¼ì†¡=AÆÁ6îø<Ø@6ÂÝúxüžªs,Ú;Ï6¿:‡\¬ä|ˆDii `ëοfHñb¡rw;Œ¹hácw¡ÀåÛ2c‡¡ËËB·°5ßñùú»œc4ŽÏÜ®ã[D•½2Ù\Àe. 2?;3/í,¦’P²tø´:¼|ŽhI4P;È+ئ/‚øÐmúßY;'R~‡´»h@4 È_ 䶺ˆ|%çlÓm${‹ˆ0Ýö•ö”ö ˆDÉ5P°°$´•´•h@4 È­҄펻4£vnª LJ×+‘“ZHÉO* ˆD¨mžT¡æv„*í+í+ TV[­Œ¢E¢Ñ€h ÇØæ¸+;ú‘×ˈY4  žÒ‚í„ISè/»6'=o‹¹ÛZ],b-<±Ê9•s* ÔU Ô(l±(ª¨¤ƒªøSû ¤*Y¨@<` 3‰D¢Ñ@5Pa‹¢$½¨«£D9nñpD¢º¤ú[+£ªwkåNö§S̤[ôÒBr ÈLJI+£.2iÅ¡*Ž”ê’ÐäXåÂ( Ôg ¤ Û¿6.òæm1w›5g[»ad»jOvÙ_—;†U¢Ï®ÀfÞb»‚z½WÂN2˜ ˆê½j¶˜¯m^Ü^ÍÛ¦LŸI›ÕÌIª kVòÁßFaw=+åbeÝidUž€Z¼Žœ]Œ >îä·‹w# ÔG ä¶zUr–g[а•ùßš4‰‡ í, ÔA Ôl×mب¼ZÓ‚=Ûz³^ãVõy¼ÏïeÚõní0°½€*®Þ,>/¾^Üñ˨·>Žzå7‹îE…­Ô`;y*ýµI 5g«míºõ´MÃFÔàÛ4¤Ú‡­;«B»®¡Þ-þÖóª¡`³°»~_T½Y»^mvY`+•¾¨Èù•ó+ÈÖ@öYëv¤­Fçlë`ØA+-Ñ€h@4PH¶zE2¼[¿g»~ƒYmaˆG.rE¢Ñ@2 Ô(l›¶jK0¶ÓfÔÜjdñleQ…h@4  Ô’Ò„íNM[ÒNÏwð˜†î¤|„-ò!ë}¸’׸òá-ö2(=¦ ,*ßžÒfÒf¢‚Ð@Âqf˜†î¤)yæÙª¤™\ÈÈ¥“ZÄü¼ª@¥¹!ÅK­>ïÂ6+ÛVŽ/*˜qîì6ˆ{Þ|}²L`ùwžµ1XŒÏd–}üIß_WÛ§VûEŽõ/¿-ÿú£}NÒ‚-¸ ‡u°Leï6˳ÍØ–U„ÀÅÆw‘ª¤wV€‰:þ¼èpˆ pA‡šh û÷f§ÛôwÀ¸ç“´_m·’ïÏÒq%/ôÕ}T;&9þ$çA^“ÿp‘sä?G5 [µ7qf×Ãȱg³êNuàeFÙ~8†Õ›Õ'6ôùª<¦çš©ð£S*f<çsÙë-3ëÙf¼`|¿L¼cÒ»¢¶¼Øz¹ ŽÇøûõ{+øøt%"ïXìÁGÕÓI:Þ£e•ÅÇžÿˆös~;ŸR³ýýç'îùHÏ.ªý+ѾæùQõ”Ít¡ü|mÈmç ž­Ãªx¶º…Á6‰çëÓŸŠˆT¨ÐJÒÈQPÿ6ÛÏŽ,évôµCD½éê¾?ðúcþþ˜OlÿÇûÃôŸTRo»VÃÑiÃvWL˺ÞíÚu2¹‘áæ"y2LC׆­¬”î¤|_wȸz³qÏ;äpÏVÃÖ [ž­s!1Þ¯Äk˜÷—äó{Ê¥Tjt:|žy±‰«—wü‘¿ßíh™¼ÎÎqg{ °µÏõûõùÃo6KVj„qþ0œ¢ÚϹ%XÇ÷|l"Û?AûúÏOh2ŽìC|ÉÆêz¦q©w»ÐGœ~ãt×~q…BâêM§ñþ¸ßõcû”þé/Û9zÛ5!¨1Ø®aØþÕå‘ñÂ…îD®^UÏ6,tWo6îyrI`ëuÊØÚ#q³ƒÆuVçâï÷ülØú>? -B?î÷›µ{ÝP¯ç'šgœÎšñpü×ôªªÛP¯,¦ýìÁ‹ýû➯6l"Bé¾ï7Û_¸p<ÏŸÛ7A I6–qP‹{øó™ˆPX-ç$Çx|±íWýzÓ‘ý7Q½êøßÛÈþ¥ÿ¸þèøkÉû“@˜Úl7„7ÝyQs!ZæwV¶:|\yØÆŸÿ¨öÓ[{üçÇ¿ËlsçšaðàïϪGì[¤â‚:iûÚúE=å¬Á[ø4CôùOxü¡ uâÞ÷¼½8ÏZß`†ÎÃÆ­šŽk?ßó™ÅŒ&`#ëMWóýñµ¬Ã/æIú¨þÍE™1ý»:Ç7’ç£a]s°]»Îóh5p òlã:\ŽŸ÷ ÚX¬’åçø8êªpØÆ‡ìêêïK÷¸¥ª×žÒ~Õk¿Üzrrl¹Ùúƒ4Ç(æÓ©Ý¹[¿g[G`&ÿ–†²Zÿ‹ëö…ÂÞâ#ç³rçSÚ¯rí%úªýöJͳMÛviF¦å£g+¢¬}QÊ9s  šÒ†mSTÑs½Û,Ï ýóÎM=à>21`”„gÅk Ô/ „nK I#ú¨_ú(ó]£°hÿ´S\Øø‰“¥êO©ÐF¡ò{ij ˆÒÔ@š°mÖº5ãy[x·°,ÏV`+âMS¼òY¢'Ñ€h ®h Fa»ÃÎFÞ‰ÃÈ| “0²t”ºÒQä8E«¢Ñ@u4P³° š³mV$óJ ˆD¢‚Ö@ Á¶­ÁÖŸ]ŠØ£mî<Û&ÍZ8 lU´ÈJ&`1×™PrýùÕÉ{et- ˆêžÒ‚í”é3©yq{jÞº=ÏÛbî¶­Û°QçFlQˆ '¶héÙ#“¦R“æ­|£™*'°O8*¬|f£ºwR¥#Ê9 ˆDù¥taÛÁ…-—¡ëÁv›†ÛÒ®·÷—Æ%´cãbuû—&%4aÒtjZÔ:1lÍìNa9N«SO3êóõsåœb/“®1¬¬¿öme’ñ‡Õ;õ¾ß­ÐS×êuJÇϯŽ/çC·h f5l‹hÊôYTTÜ‘½ÛŽ Z@·#­Wží¶Ô`›†ÛÑÚu¹Žm{.¯×NÝÂ&NžÉ°-I [-ްDèöãU­§õùQ… |%ܡlj»ÐëuÆý~y¾f;¿´·´·h æ4*lK:S[óbÜvaØn¢mmǰm´½‚í.Í;ÓÎÍ`”Mœ2‹šµh›lU _½ØìŠ.IÂÈQ° ­7Po3®zŠOèõ ^§tìšëØÒÖÒÖ¢üÒ@:°mÁžíljѦ+[7jÙ¶;µlד6lÜL ™³ 6úoºÝÄo{Ò®E=غÓ.EݶԼe»HØf•ã¹ÙÄ«b=Í´`‹E_‰CÈA°-°zÒùó«óËùó!¨9 ¤[^P<•aÛ²m†l/jÕ¾µîПa»•nû'†í¶fØnæJ}¨qËÞ Ý^ ¼“¦ÌaضO ¶IêIV§h\½Yûûýaáø“Zèõ:ŽW¯ºÏÇ·³\`¤D¢šÖ@:°mISgT0d{+ÈwÜJ:¦›> FÛíØîÀ°ÝBM[÷§&­úrá[¸“¦¶B¶þdêz:¡Û\×ÓŒþü$õ&ízºÉ=[€¨€ëuf„]]˜Æ½_."5}‘ï͉â5l›4lç0hûQq§AÔ¦ójÛuO†í‡´ív¡Û [©YÉnp[²‡ëÁÖïÙÒIKîŽ*èm/¤ö‘ßßA¥¤D…¡´`;mæ\öh* mßm?Ú¸ù#Úvû¿¶;*Ø6/DM‹[x¶#gÏÙÖua¥U÷VêuF«ëz–ãŠÒÑ@:°mEÓfÎs½Ú¡Ô®ë>Ô¡Ç´ióÇ ÛLØÂ³í§æm±Hjâ”ÙY«‘åĦsb¥¥E¢Ñ@þh -ØNgضé¼;µí²GlFÞBÍŠ¨ER»²W»Kó®¼Ï[Út>L{þˆ]Î…œ Ñ€h ¶4láÙ–tİÊa佨C÷ý9Œ Ï–ÃÈÎjd,êË^m/öj»©=·AI-j«!ä{¥ŠD¢Ñ@®4l[ry.•t ¼Û6JÞ‹aë.röÙnæyÚÊ£UI-8ƒÔ„É3²Ò5æê‡ÊçJ' ˆD¢ÚÒ@°mÌ«‘§ÍÄjä¾j‘”³"ÙØú“É å@v§¦í8mcÎ<-«Am5„|¯tBÑ€h@4 È•Ò-'µPûl{QkNhQÌ[€J: ¤ ›¶R#Ž ¹‘Èþ…‹ì¸k+RU˜Ô¹úqò¹ÒqD¢Ñ€h 4 l›¶È å¤jlÅ©Þ ·8é3UZ;åõviA;pm[_=Û„%òò¡Ñjãìª?µq òrÑ ˆDUÓ@:°-ât³8/r׺*ð®ß¸‰aË…þ¸SÏvÇ]²nñø?ïÜŒÆOœL›Õ)Ï6I!ƒ\‰1(OtÚßU›¿/íß"ŸWµ‹‚´›´›h } ¤[”ØkQÒ‰«ýÀ:óߪ?(±çÀv{³ÍèÏ;7ul§¦°µ“9åΪ'«ÓrzD§êWûAý7{ÍN²‰ *+3ëÑòýîN£ÆÖ‹µS)ºÕ…|U€ì×T"ûSÜñéc,/+£ ¯²‘UÕÈûÍNªK³Bl=Þ$¿ÏkGJÓýþí/7ýÎ+m*m*¨;H¶Í]϶“\®i[T¢ëÙ6l:°eoÕ¦<Û¦ÏV¶¼Ôót@d€˜•Øß¬êã^ðüt˜Þ 4Pê=™ûq… ì(ÏϨã¯4p£Ïl¬t¥¥ê7ë諯ë (ü¿×®JýûüWí¬_‚ö— Cݹ0ȹ’s%HWéÂÖlóâ Ünñx¶®gû§šÌ[N0&\Ì k!”–»06ËÑîkmØúþ “^"N…¹-`ÛÇçóÌMÏÖ.>o†u]:lÃ_uÛ_:oºWÚSÚS4Pw4 Ø•p™­’ž­Óh¾ª9h•¨Â¤æãuº …Jsá`QÎaçÌ+ î¾ÏÏ,¦ò×·ÉZ püÙaä(؆_’zºæk0·«+äý:4µM‡¿«Ùþra¨;9Wr®Déj V`«W$Û ¤r}rƒÂ´¹þÎÊ|~¾_e~‹¼6ÝŽ*í)í)¨Û¨7°M«Îl®ŸïÇ—«ß-Ÿ[·/ rþäü‰’i ÞÀV‘LÒNÒN¢Ñ€h } l%MdÊà%ô/Ò¦Ò¦¢Ük Fa»ã®Í}Y¤jzÎV•{AIK‹D¢l lųÏV4  ˆr¬mŽXFx2Ê ˆD¢­ÀVF´¢Ñ€h@4c Ô lQý†z¶f!‚º>úó*÷¸•„Òü=RÏVFÆiêI>Kô$¨Y lk»k.SH=Ûšír1’ö ˆÒÒ@ÞÀ6¶ÞjPºF3pL=V/i„Q¹Æ«k|Ž?]¤?£~ÎLi§cÔ°õ§NÌNëhŸ@©g+:­N-Ÿ#Z äŸò¶‡¨ðz«Iêņz¶qUkܼÆYyíJ>:7²~œï›Åü°‡¬Ù)’ÔÛ•z¶ù׉äÂ&çD4 ˆÓ@ÍÁvÝzúKã"Ò{mƒæl#«Ò$¬]üÜ_¦ºŽU5(ª^«ž<·kÈZ“êžg\…r{RÏV:l\‡•çE#¢º©ºÛ„õb£`ëšw«þ.5 Ò'ð|•ÈÀ¶¼œ‹×zÒÎ!õlëf'Jz~åur~EõWu¶ÉêÅú€eÕ›(B*4QrËÏéY¯5±gëÖƒµ¿Û󀥞­\pêïGνœûúª¼mÒz«v½Û,2ªÞ¬ Eëâ耞Q(=ª^®¥†$xç:»Úü°ÆãÎgz‹¯øµþôR϶¾v6ùÝÑ@ýÕ@­Àó¶°G&Ö>Ûêv¤\nªî±ÉûëïEBνœ{Ñ@õ5 °ÍqÖ¤"•z¶ÕsÒ¶–×I[‹D5­mžÀ¶¦O¼|Ÿ\lD¢Ñ@Íi@`+°•œ¨¢Ñ€h@4c lsÜÀ2r¬¹‘£´µ´µh@4¯Ø leD+ ˆD9Ö€À6Ç œ¯£,9.ñD¢Ñ@Íi@`+°•­h@4  äX5Ûµœù¯MZxù‘eŸmͨdô*m- ˆjWÛJŽfj»^®t˜Úí0ÒþÒþ¢Ñ@U4w°5Ó®à‚vV¥Àz³º"r«TŠ\ÝÇKÛhTúA*Å2 Ðé3é½zºn®d}^ªÅ˜z¹ªñí×T¡òOUN¢¼G:¿h@4 Èo älàŒ2w3@hCëͺ S¹Š"fÙ¾$õbÍ\ÇA~¢Køùs-«ãàÊ\P%£'rÑÌœ9?UÑ@­Àuma|¹‘Ú²A§¸xxÞ(计mÃÖS·`½~,²ž®{L‘Åéï¹rä«rå=ÒùE¢Ñ@~k žÁÖªò“&lÖÛ•‘ßBΜÑ€h ¨QØîÔ´¥·"9Û³ ªW딣Ӟgd½Ù„ž­Ö5=Ux¶ö­¿<^ÿ²U³apûõ¹8iò™r1 ˆDuKy[ˆÇ®Wë û,RRÏ›cÎ×]ð™ ŸëòrÌgêÑúàèûüÌbª¬ybïýÙ!âØz»2's¸¢Ñ€h Þi ï`›ËÑšÔ‹­[#Á\jA>[´  Ô¤RƒíŒYÔ²m'jѦ#•tP¶~ÃFÚ¦a#jðÇm’ZÄ…‘sùÃ¥^¬t¬\êK>[ô% Di V`‹LR° “§Rã¦Eõ.œ R:¥h@4 ¨_ØÊ܉ vD¢Ñ€h ǨQØîܬ•J϶~êd/ç[4 ¨ÏÈl1o Ëš³ØJg«ÏM~»è_4P5P+°ÅB)ØD™³•ÐMŽC7rq«¿79÷rîóI[¹Ø ðE¢Ñ€h ǨQØîÒ¼5éPrÚž­]µ§ú#'{Ue èc@ÒŒ|Ë$•~ûȨ¹ú“6”6 Ô  lq² T{¤S!äó}i«} éuÆôÛ'ûؤÞoz竾\„äwŠfêƒò¶þ䜫Ø*QUïVÁ¤Œ«ÿ„¥SŒ«7k=_^šml½[ª¡°ù~ýÛÍ4“ª6o`;ï ¯×ë F¢ÚÇ«줳4½sï÷—›õ€cKRï7+§ûþõˆëCg”ß(Ð ®ò ¶6¤X ØÆÕ»µëßÚŸg§kô×›EØØ¬ ”FNR‚OC-(Œýý®È4ðŒrvYÀ°™¤^¯Y8kPÀƒ ìø<ó»öÍ6hP]ï·zõˆåBT¸"9·rn ]µ[ÌÛÂ&N™ferè™Æ‡t#aâyyð`ÈeÒ¬$¦zûºÀÏJRï6ä3“ц£Z,„zp=ëØ÷+Ï5ÄO¡q’6×ÈE[4 ÈG äl-‘ÀËó<ÛjÂ6®Þl®a÷ý:T\-ØV§^¯íÙgÏ lå"–19&Ñe]Ð@ÍÁvýÚµ¨˜ôŠä Ï6«*`‹’y.ˆ²ëÅúëÝÆÁ ºÞ,>Ëï•Ùaì$õn£ÂȉêÝÆÂV{ÿ!åý¬9n»^¯/$m~—åÅë²…v9ôýî9òC«Þouë×…%Ç(~Ñ€h HyÛLY§æl|½X}ñ7·Ý¨ÇŒÅ>fH7²Þ¬µ@h/òmÿ‰¬wk‡Àõoñÿ†ðï~ö|mlÃëõ&ió5˜ÛÕµq IÞ¯ækCëüU³±\Ää"& ÔU älëj#æËqK½^¹å‹å8D‹¢¿¶ ¶ÕÔÑH½^¹¸ÕÊ1ŠNë«j¶˜·…e¯F!ÖW!Êïí‹D…¬mx¶…,Rùmr ˆêºj ¶ëx5rã%ÞŠdñl¥óÔõÎ#Ç/ ˆ’j@`+ž­Tû ˆD¢k V`‹ý¶°IS¦[¤d””t”$¯­ˆD¢º£mŽG3ÒêNgs%çJ4 È•j¶MZ¶ñæmÓölÓ¯×ZXõls% ù\¹8‰D¢x  lq²Ó¯×Ÿ9HdùZÏV:D|‡6’6 ˆr¡¼ƒ­Ô³uÊÐeÒ#&«g› qÈgÊEG4  ¤£¼‚­Ô³5r W¡ž­tŠt:…´£´£h@4¶j¶Øo ›4Õ^,õl‘È?¨ð|Ú'^>O.&¢Ñ€h æ4g°µ~¸Ô³•½o²Z\4  €j¶M[µ%½"9ȳ•z¶NY<ÛšmÊÈ^ÚZ4 ¨ äl¥ž­SÇ7cÙõl¥cÔDÇï‰Dij ¯`›æ“Ï’Ž" ˆDù¢mÌ䋘ä8äÂ& ˆ‚5P+°Å¼-lrÖjdªU4  ˆ O[ñle¥£h@4  äX5 Ûf­Û‘^‘,žmáÜd4.çT4  äAY`+Q:¢h@4 ¨Ï6Ç¡ƒú(*ùÍr1 ˆD~ l¶2W# ˆD9Ö@­Àó¶°ÉÓfPã¦Es’ŠETÖ]Fµ2ª ˆD¢Œ ¶eµ[’.+ådŽGK"f¹ ‰D¢ü×@ÍÁvÃFj^Üžô")Û³Õõ[ËËËt…8»—Q…™ÎP—¡ÐìçÜ×ét‡^ÜòRÏ“.-wÓ"Ÿã«§»Âÿýú¹¨z³¶™z´øŽÚHGÌÿŽ(çHΑh °57°…Ð@eÀd×·µ½Fõz¸ü¡ž-`l€V »´<öL}E|U‡\!à1€<¤Þ¬¶Y¹€öDίœ_Ñ@2 äl}‰÷Í 8!ž«í5F…‘õsÊCU°äú¹š `¬DS•ÇóŒ­A€2™ ¥¤D¢BÔ@Z°6cµj×™Z¶íD-ÚtT¶ž#ÇÛ4lD þ¸MCZFÖžm(lM0F̃FÁÖñbÀª¿K o7EØ:¡pñj ±ÃÈoˆDUÑ@­Àó¶°)Öjd„…ÃaÛ€i…y ë 5«¯=¾ªP1<Ô ÿ÷e/n2<_ý]±ž­–¶¿[J̪óªt4y\ Eõ[y[sA‘®žåùQsÕ¿€)`ñ‘ñ¾,ïR…¢õ-ƒÔÞ¦ªÎÀ¯·ëí®ð Ì­?Þâ+ëø¥ÃÕï'ç_οh ~j o`+¬Ÿ”ó.ç]4 ¨H¶m9[më7n ž³ #ׇ—ß(Ñ€h@4Pÿ4*lÝR[™Ÿ•ùYÑ€h@4 04*lų­£5¡Ë9 ˆDñH¶âÙÆ7¸ˆRÚH4  Ô? ¤Û]›6'µÏV<Ûú' ¹hÈ9 ˆDñlß{ï=Z²d M™2%˦Nªíi‹/¦µk×Ò?ü@‰`‹ ÈtQTÒAåH†M™>“7+œª?"¸xÁII‰DõQ€-@ûÕW_Ñÿû_ÏþóŸÿì§Ÿ~¢ü‰¡ú#}ÿýôÍ7ßÒgŸ}Në×o¤¥K—*Ø6Öžmx¹‘J'¥ÓK¸0À¶‰ÀVÈBÑ€h@4Pàl'Mšäƒìo¿ý›~ùõWúùç_èË/¿f¸~Aÿô3úøã¿Ó†›éõ7Þ¦M›¶Ð„‰ü° ##g#öµnßÅËéðNåØsÓæ-kIdn’ Éa\Kí/£ûú8º—ß,º¯¯а՞ì¿ÿÍ ýåWú׿~¦þó'úûß?£>ú„¶~ðv+½óîûôê«o*Ï6Û"gΖ=[Ó6è}¶Û4Ü–)½‰ÚtìA%ºSqûnʦϜMÍŠZÕâÅ> c®ê«ÐåwËE^4 ¨M ˜°ýý÷ßé×_ó@ûý÷?ÒGB[¶|H6l¦5k6ÐÛo¿G¯¼ò­[·![ŽOcn¶îÀŽk{®kl6ÚŽ6nÚLºõ¥ö]ûP;e½iÆì¹Ô¼UqÍÁÖJÇX^š Û°z¶Õ®·+¯¹ó,m-m- ä™Ò€m“æ-hú¬ÙTÜ™ÖN]=Û°‰3H5Ú–4Úv{Ú¸y uî½uê5À³YsæS‹â¶5$ ;rv9®žmõvksd%ß-#{Ñ€h@4P;H¶-ZÒôŠ9Ô¦G‰»öðn7°3Ûp[†í¶Ûÿ‰6mù€z FÝû¥ný†(«˜÷(µjÓ¾f`T­žn%êÙFV%JXoW„^;B—v—v ˆjSiÀ¶i«V4cî\jׇ£ÃÚz÷QÎlÃí¶£ÛýéÏ´yë‡Ôgð>Ô{÷½©× =•Í}t1·ëT°MXo·6O¶|·\lD¢Ñ@íh Ø6/)¦Y æS§A©ã ޳uÜmmܺ…m¿=`»máVý÷8€ú ÝúÙWÙ¼…ñ‚©.lu)»°âëU}ïó¦*ƒg¬FŽ«g›F½Ý€²*Õ'Åå¥Ó×N§—v—v Ô¼Ò€mQ»¶4{ÑBê²×Œí9„6}°•ýùOÔ`û?ï¨`;`¯ƒ¸R¿aû+›·h ¯P®)ØrãújÝ2𸼟J¶jÖêz¶©ÕÛØÖL$#ÏGÈÅ­æ/nÒæÒæù¤¶¿ÿþ‚a V&ûí÷œðâú⋯èÿøBmÿ±W#·èÔž*–,¦îìEÝtÿÞüÑ´íÎÀvàÞÓ€=b;PÙüÅK`+É'ȱˆE¢Ñ@õ5`Ãö¿ÿýÿhØÐ¡4üÜséú‘#={èÁé‰Ç§¿ýíÓ,ضìÒ‘æ<þõ*Ý—m?ÇÛ—6ü!m·ã~ØögÈjØVÿJ'6 ˆDù¯ Ø^pþùT1{6=ù䓞½üòË´zõê@ضêÖ‰æ>±„úqõ9’o{±¿Û¿0l·ó…‘yÞs·n¹¤Cg +JÈS4  ˆ Zi„‘lŸdØ´G¨ný°uH `ÈögÈúHÕÔjdrA YFöù?²—s$ç¨>k 0²ölXe‡ï¯BÊžg»íöÎÖŸ¾Cö¡>¼õ§÷ ½¨×n¼õgÁbjݶƒ@@Ä—¡èÿ¤¤ŸˆDuXi„‘[vî@s–=F=Þ‡zh;ho^ õ!m»#/j´“Ô¢çÀ=¨‡—Ôb°JjѲ¤¨ (µ‘ªÀVúôÑ@k 0r‹íhöc ©ë>Ã2¶÷PÚô!¯FÆÖ¤kÜ´y+ué3(“²±çBºÆ¢V%"°Xb l¥H? °Ò#7oÛ†f=º€:æÔÇÚvß6må}¶â¤N!‚-Ô±G?UŒÀ±>4³‚ ´¬ÁBIN¤¹WÊïU¾ó#mey©ÚÓ\QÖ=ùû¶ÉÛ*‰Žå5Òž¢¼Ò@lo½åµÍgåÊ•ž½ýöÛ\ùgCàjäfÅÅ4sÞªž»Yåš¶Ò]š¹§#;‰ÀV`+ð °Ò€mcçz¶­9ÍqkÎQ¡ŒÿVÅãQÏöÛ4TwŠ]ضlÛ‰`S¶x³ï"¬! çKù¾b\½Ù¸ç…{¶¶^øØòlïWËÜw€™p¶§\J¥†˜ðy&ìãêåÆäïw«¾Oµ±sÜÙhlíscý~uÝ6Ë \ŽÄ¶r¡-à m]õÆä¸^¿h7Ø6eØÎœE­ÚwÎ8®ü·ÛFضn×EA¶E›ŽÊ¦NgØò›mØ.¬Ñ o¬å]Ôãžwß—¶^õŸØÚs¸&¬âª5ðNmØú>? Œzüq¿ß¬Ýë†z=o<Ñ<3 ì $2Q¿g¯?çO`›^'• ž´¥h îk Ø6WaDˆµÓŠ[[ž®õælU¹ÀÖÅÈ|~Ï0¶XéŽc¿>ÖQžyÎa'öLèÝ %'^‘,ž­x¶ ¼N\?”çóU#©Â¶­¶ëmت02^TÏVÆö¤ø_‰z´NÖø #œjzea'K…‘­í@¦§ KÓ³ÄEŠ뺙d° ?þÈöIìÙV}ÎÖô”+½K`+°ØŠ X©ÂÖòlC`ÛI¦æl½0r°gä «Æ„aã´zN1ãyYó·Ö¤=§ ˜•ó"ª Ï-I½[ó5˜ÛÕŸ…ߘäýÉŽß¿HIµŸÙnîXüs!Z¦«[>ØŠ‡‘¯†—h³64`Â5lùåWúç?¢~ø‘¾ûî{úàƒU Ûuë6Ò{ï­¥7ßz—V®zïo  'ðë~`^ºaädž-`똶ù!&ß‚¦ì9ÊÚ8iùülý‡DÇ+ž­x5ìÕ$êòû ºhØþþûïh¿ÿþU4þ믿eÐnQ`}ïýµôöÛ«éµ×ߢ—W¾ ÛDž-Wù öló¶‘!d½­E2K¥ß)¶é·©\¼¥MEy£ÀváÂ…ôÕW_ѯ¿þJÿú×ÏìÙþ“~üñŸÊ»ýûß?¥?þ„>úøoôá‡ÑV.Þ³™Ó¿ûî{4þ|åÙîšØ³íà,r`›Ÿž­Œ@kiÐ#°Í›‹‚ôZêÆ‚î€-R1¸ '5€Ó~•‚mIÇ®ÔI-ò8Œ,šZºÐl úB#ýª–ú•[GÞˆC.r ˆDéh Ø&#kØ*à²M›1;;©…ÀV`+ ˆD¦4`ë­FNêÙ¶0m:#&yJ;ŠD¢üÖ@°­œgËy¶ù- é´r~D¢Ñ@º¨Yت{]TAØ4.±—Uˆ ÀB"Øt+í)í) ÔE Ô(lÛvæÂñ \ÔµÅÊdÔ³mÒ¬fëÙÖÅ“$Ç,Ñ€h@4P·5P3°å:{7m¦]{S»Î=•¼3fÏáâñ-e!€xó¢Ñ€h@4PЂ-¶ôüç?ÿ 5¤u4·þÄÎÙ6l´mÚ¼…:÷@º÷£ŽÝûR‡n}hæì¹Ô¬E«‚n`ÖíѨœ?9¢Ñ@‚-@Û·o_êÔ©“²®]»Ò{ìA\p7NA8¶fy=üí"h´Ýö´iËVêÞ0uë;ˆºôÙ:÷@³ç̧¢V%[ÑŠD¢Ñ@Ak ʳ=á„謳΢Q£F©ÔŒ«V­R  ólCa»íö¦-[? ¾ƒ÷¦Þƒö¤^÷ ý‡RÅüG©EqÛ‚nà4FDò2² ˆDu[as¶:”|íµ×Ò¢E‹èý÷ß÷@‹LR>϶‰Sõ'¶ÛýiÚúÁG´Û^Ñ€=ö§þC÷UàûèbjÕ¦½ÀVF´¢Ñ€h@4PÐаýî»ï¸Á¿|)áÁ“]²d‰´A°Eµ¼PØnÿçië‡ÓÐý§Ý÷9”¡{0 Üó@š¿h ·ëXÐ ,£Ñº=•ó'çO4 HC¶?þø£Ê Õ4”ÞûïÿK¸5‡ç «BìÙFÂöO;ü…>øèo´çÁGѰŽ !û•*è.X¼ŒJx¿m?D>C:„h@4  ä«4láÕêÐ0@gmrØîøWú€kôíuè± Ü£i¨ ÜG{œÚp’‹|m9.鸢Ñ€h@4†4l,€ª½X Xó6¹gëÁö(†íá4xßRR°í(°MãDÊgÈA4  ä¯4lÀŠð1àŠÛ K[#ïqБhí톑ųÏ^†ˆD¢×€†mLãO[½@jð¾‡Ñ ½¡jUòî©N"²™Œ¶ów´-çFÎh f4 a hVÕbHm‡}¶|Hý‡íOýxÛO¿!û¨­?ã'M£ÓÎ<—Z–´7¬µ,n§öß*kÝÆgE|‰0š·,ö[‹bÎFe>Ưá×µâ×·v> Ÿ›e%ü˜ûý­ÚtP[‘œûúµîq¸Ç‚ÏSßÝ¢55-j©ÒM6iÞBåxnÜ´ˆ +ðßüXÓ¢VÔ ¯Ã1èïw?Çâü|F+õzçýÍÕj31iÑ€h@4 °5pÜ 'Ñ]ãîßúÓh»?q:©ÍÔk·=¨ç€a*¡EþCèì .¡‡™Ló²=êÙìy höÜ4kî|šÅY¦‚mÍªÈØLþégÎæ[ýø~^¿Ÿ¥?Sýmšó}Áß‹Ïp ßá~ÏôYª*a“1–cOîêôNŸÉÏÏš£lŽ ïãcQV1—f¨ç*2ïç÷M™>“¦LÓ6ƒ&O ±©Óir„Mâç²l ?c§L#ŸMæû16aòTª´MšB {„ÿNÅ&N¦GblŸÕWåeÖ¦Syš]óþvÞã|Go.š€Â ޵éÔƒ+u§’]©5o[BmÞVœ›²{Å-JÆãVíø9~Mq~-¿§-§¶’Ž=øñnêý­Úu¢–m:R¿¯yq{jÞÖŽš¹Ö´U[г&-Ûi[”PœíZTL¶íÒ¼5EÙÎÍZ‘m;5mIqö×&-È´¿4.¢8Ûq׿TYÛa—fgÞ¹)UÖþ´Sª¬mÿׯgög~ݺít›¢ýõyÁùÃyÕçºÐúÑšÒ·Jo®A¶µ`]æÂ‚¾Ë|Ì<®°¿íß’ä~P?²ûNÐ}»/%é7A}%®ïÔf?IÒ'*ÛªúzóXÌþÕô5m¬¯av?ˆê ÐOP_Ò¿P¢¶ïïÈá4ö–[õtljܤ)ÍáôŒºAPYou«©Œá© šœ ;T R¥ú`m:u1ç5™×ñß^§àÏt>Ë6@’MÕà43fBV54.JÆÅ àmÙ¶£ ÝΔœk¥ ÛY½¯-âðr‘ Zû¢Z¿æy¯nħ6=Û«®Å\½CñÕƒ-þh´-€{ÏΧÎ:Î+"Ÿ U@´ kÜÚ@´ïë÷"Ì«¬}¸!”ë3„ TMó]tJN¾`0h½0„]\,\ùb¡Íó*TØØ±$¼&É"ÎÛ º@ąǶÉÃɹ‚mXXÙÙW6”NÖ0Ø:Ó3iõm²þTYØ¢/˜ÀÕ¡å\M¯Ôl<ìH^³4Wñ\5V”g–VƒÀpyÇŒK6nÊK[¿aÅÛF~M:¶Ž?'U[¿ÖåÀÖògfÙ:~Ìgëù~ú¶†?3ã…XLPf´Iv›»ç%JºKKÓµñ9©ô¡”úM`_±ûOŽúN•úI]î‘ý@_‹œkWàu1äú›XÃÌ1Ô“­I=f¬â¨áÑú=[s?×·i¨VO‰IˆD¢Ñ€h ¹ÀÏ ýÑÿ?œÒéú¹Â°þIEND®B`‚manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/flow1.png0000664000175000017500000012002612301410454023345 0ustar chuckchuck00000000000000‰PNG  IHDR°Y¡iásRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?ŸIDATx^í ¸ÅÕþï?ÆpE«PÃÅŠŠŠWŒJHb"nñªqGçDT¢âB *"Ê§Ä¨Ä „ˆŠQEÑDÀ%¸cÔHLŒ&1Iýëmïë=ÓÕ3Ý=½¼ó<õ̽ÓÕÕÕoWw_ŸSU üP*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T  ôÒg}®N£Û·oëFm´3Q¶¶¶Ì¶ñx–¯»n›+[®á)ú»G!{8ž4 T€ P* ú¶mÛöú¶mÛ}°é¦›}|ÚÃ?1r”;îZ5eê …¿™¨ÛÛÛ@vÛÀ„‰7ªQ^ê=ˇyÌß7Üh£¿555ýQCíe„Ù\ôã< *@¨ …P Ç†nøD·î›ÿå’1WªÅKW¨÷>úŒ‰° ° °   <6o‘~öÈÏ;wî¼fà 7¾V÷zM…èùx’T€ P*@¨@ö€±£eòí÷ÒP-€¡Ê|1Ã6À6P® ¼±zúÙ¹|ÖÔ´ášvíÚ“½5¦T€ P*@ò¬@Ó×;v\cF Zµllllh+W½§öÛÐG;vº9Ï Ï P*@¨ÈŽ=6ÙdÓ?Þ~÷ôÒ`¥ÁÊ6À6À6À6à×.¼ḩbçé®­1;ÝkJ¨ T€ äMM¾Ñ¥Ëó-¥×•!ÃllllÛÀÍ·Mù—î4bóf ð|¨ T€ dCF6üâ¬GçÑh¥ÑÊ6À6À6À6àÔ.ýÅÕÚ»É]ÙèæRUË&]›ÑÒ€ÚÂóí-gWebùùÓ¿oªZ8+C¨ˆ[„‚ám:Ã.È6À6À6À6¦ ü¸ù„õr;çÅÝOå¬üú|VU€Olú ¯^½,¿2ÀfMŸéúšNj0ÜN¨È“Cöðí¿„1X˜—.ÛÛÛÛÚ&ûëÐÔô±î7ÉSÇó¹æÄ| _š °Å¹Øeÿê¤ÖI&f¢T€ P*ÐZáÍ'œüž4<ÙØØØ¢h}úî°Fw3€3~*+@€e ¡T€ P*V¦7^8eê NÞÄõÙØØØ"i—Œ¹RµmÛöú°ýQó`.àyó”© T€ T¯@Û¶í>X¼tE$FKoîY=@lllÙn“nºMuØh£YÕ÷L…Ú“Ëêród© T fÖùêW?Çú}4³m0òúñú± ° ¤¥ L›9[m´ÑFKjî X T€ P*@,š×ãøW† òÛÛÛ@dmQ=ˆîaK¨ T€ P¨ ÀÒhÌhM‹÷‡õ '’m ¾mQ=]»uÿ³î°°.%?T€ $£@£> CÒ“ÑšG¡T Î `±llll‘¶ÎDœlÏ®×Þ=OQ1eOƒõ×ßà•ˆZK³.grDe±*@¨@ªˆÔh¡ç£¾žêOýÙØÒаÉöû`§N˜x#ûó ¾ˆjyéEƒ!ÀF¡"Ë T  °ÃË`‡—•u (± ° ”kØdûlvïEl²÷ F¨@> À`ÙØØØ"mØd ,V·8z`“½íx4*@ê¨@¤F =2ÙíDyíxíØØ¢jX§^î uʉ›Ý{—Ø(î–A¨@Ñ ÀÒóÂ6À6À6À6i À:™u®9N9 °‘¶Ï¨^ÔDQ6Š;€eP*P4rÛ)Dѱ°Œì¾ÕæµKßµ[¹ê=Õ¡C“óL©ë®»®sÞ#Ð)‡œò¡Ìõ×_ß9/òÇQçÆÆF5ÑÒL=« °N¦–/ŽäùâÔ`25ë휅8 %Y ©W SF¡$}PÂkÂkâÚ/]¡ºwï¡\>ãÇWýúõsÉêåÙ{ï½ÕE]䔿K—.jΜ9Ny¥®Ÿ0uSßúܦ͜©g5Ö©ï'À` °N· 3Q*@Z+)£ÈÕPf>BÛ@úÚöK&À²+Ö ` °X> ¨ U(@€eÊ6À6H À`«è£ò¼ –Ï^lžïpž ±)ˆáJoXú¼a¼&¼&I·,6¶ž,›` °ØlÞ»¬5 uV€Ë”m€m ‘6@€%ÀÖ¹¿KÛá °|ö`ÓvW²>T€ dBD פ==<½‹lékXl&zÅä*I€%À`“»ßx$*@r¤–(ÛÛ@"m€K€ÍQßÅ©`ÏûùŪ\zí­j~ö<½x™Úµÿî ßY|q© 6ŠO³.„ËèD¡$Ë T õ dòŸÅNŠuNŸG×$ÙkB€%À¦¾GL¶‚…ØÍ·ØR6lx h£X”ñC«–¯Ê¤=C€Mö¦ãѨȇ™|à<’êM½£hXl>ºÍÈ΢k>;°>±€v‡õ!ÀöÐeõ¬u² šhÔ%à&G úlbä•}Â|³ü/µöÓ-ïú_·³#aø(ÛÛ@"m€K€Íc'ZÃ9`[ž½ðÊn%!äXÀÀ ëÄI·”¶ãŒÍbì/e¥=¼8B€­¡)r×8hÖ…®Öéa¨•>CõÆ9U&–_Y»"èGûM{™‰®QxoX½€lÙnXlÚ;Ä„ëG€Õ+°)žÙi3g{ð `Å3¿ò¢FÂßGÝ\²_LÏî­wNõöG9ØÿKYiìC° ßu ËAÉ ÎCJ,½oll‰´,¶P½kðÉ`õ³°yùã[=ƒàa/+yÌñ²½¤&Àb?nÓ­f°Á7JVs`³zåXï,(ˆášö„õ˶g×/×K€ÍB§˜`ûëc!º­æOSSÓÔ oL}î7Öï7*¼¬âEó9/Pë°ØÏ AN{ÿ@€­¹ù§¶lj/ +–Rß᥽óaý²O¼Nõ¿NXlų́O¡)Š‹°2–Eëaq)@€KY–Kô¸öõ7ìy x ŠÐ°Xvºñ(e€µÇ³âYhzQÅÛjÎ^Œas"'Ó‹‹m⽕çjËõÄõŒ¦6ž{" ¥`ÓpX‡¼*@€åøG¶¶DÚ–›×Ž´Þç•e€Å$K€8€'B¦&€Ê$N²†,ò!¿LÒ°4“<á”#å¥9¤˜ýݳiôEVU"¶*Ù¸Szèß# Ùɉ‰®q½¹d¹ô\² d§ ` °9é7Óp­l™¬,@ÒÏ ÅR:€N{B'ñÀb™À+’ ¯èìr±åÊK[Ÿ!À¶E»(Üç|}ÆÓŒ³~Yÿ½S T À¦à"ä¨ ˜ÑmŠŸ/ ÀÒûÆ6À6H À`ÙñF¦&€Â²‘(1+[ <Ú6USFš÷‰` ËKا°‘=\XP: À¶¾.‰®iîx^îÞ,¶Š>›»TV!Ä·mÛvnÖåóríçe–cK‘®Ô Ј¾ñ€ Šÿ1ËþÆ ‰íøðkîk7mÀ­”#À*eZQ&Ê|²?ÖV’ÿçQlë«J˜#̱ $Ü0Ɉ‘£ —NWý¿ÿ÷ÿ>yþ…|–'ü,È[x/ {»Y7¶€¨yXó7ÓÓ Ø4'X*W9ÌŒò©A@,òʇ¤·çAlë«È/ƒ^&Ëp÷F­Õî{P‡yL¦€$ àÀvìØÉ‰ÃÀ`ÞvÇv"Àæ„£»ãUì_ùÊW>þå•×°?Ï`ž5€µg÷µá&&¼¬2©¾Å[[ "l(µópªŒ²P~0><äÄsH…X,;ù vòQCd=ËÀÚË@Ô³>I›!Ä !N…žJÀ¾ÅLµ|é§ë1°ÓB\¿—’µh¨Ø.“8á[BŠm¸Åÿ2‰ö“0d„ËøX.~/ŒVCë&sµ†LÙÕ@_Ã^ô?ØÖ"²ó©±ó)O“nºMuêܹ­ÇsŒzî¹çÔi§VÚšÔ%c®ä5ˆé¤l °Xl½yö‹¨`›^“X+¢}b¯ø?­ÏÙ¬Ô+Kk/_ƒ¶'ÞQs6b{YbЉ2n½ö²:ø_f5¸ÊGƽ^BlN …|f9~õÌìã^WÀë“_ÿºZ°Ë.LÒ× ××0‚H€%ÀÆÚáÎztžê·ó.%pÝ}÷ÝÕÃ?¬Þ|óÍRZ°`4hP)OÏ­¶VS¦Îˆµ^YéÌ‹PO,–Aožý"jØVg·ö¼Ÿ_ìy]_{냊ýÔ ËWyp‹d?ËŸ^¼¬ô¶›ÿK^üfoC™HfyvùR–ì/y¥>öþ²½\]¥|”çWÏ(û©,¬ß-ç7‰SöoÍì¢ï}O­X±‚)Càš`c»™Myÿ–­x] ùþJPÚµkWuÓM7©·Þz«lº÷Þ{UïÞ½Kûì³ß ãe£ìYV´ãW£Ð“K€%ÀÆÖ§e©àÌ,<­®ôüƒGVB‹‘›!ÆøM@Üð¿”yÚ°á¥ýÍ}ÅókÛ2ñ-õ“ãâûò+Æ·*ïÖ;§¶h»®&c›.Ås¿\Ø,ݲu¨+6»ÐN€õ†!,Õ°ç:êÂK°àê¼óÎS¯½öšzûí·Òå—_®:tèàuè/{¦7Ëñ±éϨŒ,–k¿–•Â3°:6íg!<œ6° ¬J^Qñ† `ÊvìoB¦½_%€E߉òGê‚ãɱ£f¸³®”‰ºšÛnƒ<ÎQô YX4b3Ô7+7_fêI€%À¶4V†3„82hŸ|û½ª[·î%ê!CÔ’%KÔ;ï¼:-_¾\x≥²:uÞDwmdu¢£eÑ@5–K€ÍŒùgEs°6¬š )žMñÀšá»ETšv¸¯‹Ö,åÙÀmÖ!ÁØ.cye<¯Y†lO¢ÏË:ÀÆyƒ°l­–K€õ}ªðÀb,‡ÓÒñ¨vÚIýö·¿Uï¾ûnÍéÉ'ŸTûì³O©ìÞ}¶WW›DGÊcD¨A:` °Xš¦Z\¬ß„Nf¨oÀ,¿sÈ`.ÍpåjÖ>– °VôÝR®|ãØ¦7Øooг½šíØü> †èS]ëé` °-m¨¯þnªµ=åh‚Q€Å8W,#àÚ©S'uà 7¨Õ«WGž¦L™¢½»Ý¾ôîêñµ8~5$÷IL]u&À` °9êE«?•Ì,à®ÒŒÃv®¬ì¬9æccícÛc`mlÀÚùíg·Y¾ës½Ú|,l[ظü¤Df]„}Öô!À`kj@ùÝ™@ä°XîËÞ ³Á8Õ#F¨7ÞxC½÷ž^Ë5¦„òGÕj|숑£ÆÝVÛYr¿úÃ,–K€Ío§âÌ2°·Eߘ4û€+<§²ß寕¬:ŒãˆGÔOÆÄP¨‹9‰S€•c3k‚sÆ=°!šÞËߨKØ„*À!ó:Ö† û‘µ^ÃîWm~{Yžj˱÷#ÀfhÆà8fxŽx§¨Úe^Ê!,:d,sƒé;ßùŽ·žëŸþô§ÄÒ‹/¾¨Ž=öXc|lg…uf £õ‡Ñj®–K€ÍKZÓyd`ñœ“Y†e‚#|›a·2k¯„äÚ^Ì €µg– dR&Ù.ÞàZÖž1uËAq¨ÆˆµVÑåhpFù‘µaÖ èEýâúøA5Ž ¢ü` °Q.£eÛÌCY„ €ýêW¿êã6Ûl£¦OŸ®Þÿýº¥'žxBõëׯ²‡‹ñ¸Õ@÷©ü` °Øø ®éÆoT;w.,Æår|lý€4ìË,–_§–¡’ûëºN¢¾MMMS'L¼‘ý¹ã °Ïì8ó×`j6ùˆ§¿áo®|ð7~CB¨­ùÁ6¤M<ø[>ðèÚKì˜å™yÍr2e»N’õ‘º™uÆvìk×ypî€wÙ&e¡ž€ø(?ͺ0Ž-0Ä2„8ÊÛi­²Øá9,Öh}ä‘GÔ1Ç£zõê¥î¿ÿ~õá‡Ö5¤Ï9çcýÙ&o=Ú8;Z– $` °ØXûµ,ÞEe °Ñ<›ëÑÇé럸Öö¶\uHØ6¸¥QQ9@€ÛLÈÃo²¯@1Ê0µ½ÊÈ‹ñ·å€QöÃväC2ËEõä7”)PjzP‘y¤Ž¨Î eâœðmB|^ßf} ,Va"®(ô,£•G€?¾zúé§Õ´iÓÔn»í¦<ð@õì³Ïª?ÿùÏuMX?öûßÿ~É‹ñºS¦ÎàuMñ›x,–Ëž8J°Ø0í pg{FýBˆí|€@s쬩9¦Û$m(Ä6Ó‹[É £Æ4úÍúáw”eåJXt%-· ¡Î8f5!Ïå´Ï=À>õÔSžGÇ/ÁëÅÄH\pBŠ¢¬¤Ë 6Ìc)t^‚NH€]¸p¡Z´h‘š0a‚êÑ£‡6l˜ZµjU]!=sæLÕ§OŸÈî³ßjþ¢¥¼¾)Y,–º¯â À`ÃÜ 6üa_`Ëž™O¼²æ±MϪYF§_Þ ã¢æyáoxXMFžJõðÓ%Œ®vÞÜìñǯvÚi'/aœ’üØa‡E(gÿý÷¤¬Œl_ÝÀšji9Û—€S%ÀÂûŠÙˆ±œÎf›m¦ÆŽ«>ú裺§qãÆµ{Ò©ÃÔÊUz©Ÿ‚\QëD€%À`sÖ“Öùt°ØÛ6nàÇô¢Jæ¨Ö„Úz,¼»fì*L€­!Ü÷þç)k›om«¶Ó çRöºÜ]vÝÍ)o7=¤Ç¥Lä Sç0õÅzÏY»_ °±öÛkN€%À†X?Ï)àði{+ò;þ6'N’rÌYŒM(µAÿ›å:åcz…ýV–ø‘ü2i“ÔÍ †V~—±°æMcÖA~G½ñ{”ŸÂ,pŠPEñŽ<ñ&€@Åÿ¦÷+chM€Å>È+p›6«]l”·6¬×M+¦°ŒŽÌB,“8Á»ù‹_üBulß^Ý}÷ÝžVöøƒZºt©:ûì³=½öÚkÕ_ÿú׺§_þò—Þ¹àœÕˆ‘£Ô«×dÙ¢lØ{ù³k”`cí· ° ?³ã|¡?ލµ8ó’žßqýBhñ›ßï&‡™½7LÞ m*•Ui›}>€W{fæ cmw¾ • Êrq¹°b@(<+~Vü&“CÙÛ± »i†XlÐ-RÓvÂKØ{î¹Gí¢=š?\}µ]Û¶ê׿þõZ‹åmV®\©,X Ž<òHµë®»ª¹s窿ýíouM¯¾úª:á„JÞØN;«YÎc;HÐ "ÀfÌâ4dóT6Ö©_î¡svʉØì>Sê°hNðXÖ l~žÜ(ÚsÒeHHrÔÇ%À–™Ø‰[USã$N­e#¸„ØÙ³g«æþPõÔàª_Š)Mj¯ví*ì+¯¼¢xq´Ã;¨Ã?\ýñTŸ|òI]ÀÉ óDXgžŒç´Ÿ 6»ÆfÚÛVZêG€u²Oê\º+©ýC€Íî3¥^‹a3„·šV®µŒjŽõ>G¶ÇGqŒþºSj-(ËXLÀd{LýBˆíÙ‹ñ¿LÜdzq±¶,n˜¨Ö˜Û{Kl­­¿âþ—{ê‰'ªÚ´ñÀU’+ÀZ1;0ÆÇvéÒÅ[—ë·þýïO<ýãÿPÿùϼI§ð,Àø½´¾E¨6»ÆfÚgçH€uê· ° F¾DÑ®ã(£^ëÔBÓœI[/›ü§¡a¼þî•æzÖZ·,,‹ñ¬æ$Næ¬Â2i“ä‘IÌ1²&àb(ü–I¡âÑjË'ÀÖÚú“ØO†PÍœ;Bç"c`1Óh=æuu ûÆoxÙ³Î:KõèÑCM:U}ú駉¤Ï>ûLýûßÿVÿüç?ÕÓO?­.½ôRl (le€=òèfõÚ[äîY‡ñ›Ö2 °Nýv¤{†îƒ§é>˜)[`3i륇x4È.ù¯övj«,.µ»e`á•‘‰—Làƒ·Ð)3 ÛË與՜Ø^FÇ.ÿKy8nµ€÷~ØXo«ÈŒÄwëîy%ñý‰ž胔Ír‹P«1öL€ôÔày¢÷*ÏÍqÚ#Û¥©I]rÉ%­&q’1°B,X,f&~ûí·½‰ž ä…òbR(f ×Ï?ÿÜóº¾öÚk^ôŃ>H€­¼¢ `+ìæ[l©}bAU÷k5÷xœû|çÁ )Îc¤±l¬S¿À¶k×î˜6Úh Sö4@ø·Sk ÎÔ¬³Ô¼ìhðaR’ÃX3,NÃìt ³CSRÍš«‘€­+­[m™iÙ[só¯T@d†•¬ùùW¿]ÔÇoTª£2̯½ûl§ªX`1 ñ6›m¦–´xaá]¥Ó5ÔvÝpCuÙe—)ÌBì°ï¾û®Z½zµš>}ºêÝ»·:ùä“Õ›o¾©œQ%€ëÿû_õᇪyóæ©Çœ['p•öL€M'ÀÆÎð†ÝzçÔÈž³Q=ã.‡ëÔoG°NGc¦¼+@€µ@v†Ù4Ìb,jf?Ø©õ°26ÖÛ.2ÃÊ`åYòß6ê³ïÿP­¹ýÞÈŽÖƒ! p»ŸÀ^sÍ5jÿ 6ð¼°ûë vÖ9Ù ;nÜ8oâJXØ?ýéOêƒ>PW]u•êÙ³§3fŒ·n,B}«Mâq…Gž^x °é{I€ °¶7ö…å«<­ý»Ÿ×ÖÕ“‹|؉“nY«ÜrÇóäéÅËJÏ”aþí…ÆþæsGʳó†}6¥9?Ö©ß&À:ÉÄLŽ `M€µ`v¹þÿ\xmÅLM¶¼,Ʊ¦9 8R+m'ÀÆz…ºrFS%€mõéÔYýýÔaêÃEK#;¶«!wɘ+ÕIúØ®ù‘Ï`-Z¤öÕcȧ·ÌB|æ™gª-µ÷õá¯,<²»hìÿþïÿ†XxIW­Z¥†®úöíë…ùþë_ÿ •\1ÖáÊX?áÉØtÀ+Cˆƒ¯ƒé ÅxXü/÷,ÿïÚ÷RÂ6@!~Ç‹*É+¿ (– çÅvì‹{ßH²}<\9êqùãKuÂvÔY¶Ÿ÷ó‹½zÊÿ§ Þ*/þó<ÊJ^¬S¿M€u’‰™°˜üH§ÑYMÚ»zM9`­ô»ÞoŽÞÞ¬S££°uÍ–w€­Ó¾o¶%”/‡}–ŒÖÆ«F‘þÛ¡C«Ùy]ž+Ÿ÷ÜZ}<îZõþŠ×3â?ò5A‡5»‚åöŽ;îPýô9ï¬=°XvþüùjÀöÛ«1úœ;àö½ök,3#=óÌ3jß}÷õÆÈ¾ôÒKÞÖ „1®þ裼0æeË–`ë.ì×ÎèuóÀþL˜ 5½ª€R@ümÂ#ÀÒ„Gl3·Ûׯ!–ãbM 5å›û ñÌ6¿a› Ùy /&À:™¼X'™˜ÉQw€ÕO¢.FZžóhýL§É:Äx£ÀuÉF€Ívñ¹ºÓߪ¡á7ºñhæª)-Ñû?PcµÖ¡ÕþÛ64\õφ†Ïóüœp9·5ÌÞ1ø{êܳGªªãJÃuù]õDS·ÜvÄ–Xx5þÞ÷<ƒû /x“8 Ðk½^?Ó©›3Æz«˜uØo';„k§{î¹Ç +†WÛàYµÀŒ›…×Мv€…— Ë|yáúÒ"î|Ø`€O¥ «¤>ŒoäHo©íu½ž6À @Ûá¿f>ùÌ<Ø.¿™k{c]ë•Å|X'S—ë$39* `‡¸fyÏ£v¡Øáð"9Šœx6lvö©ÁƒÕI9Ø! “òþŒp9¿?m¼±zBƒW÷“ØàP €ÝRÃò£ÇÔ °³gÏVõ²:&ÀÞwß}j;=&猿M¯ª„‹÷ÔZéºù¬éÁ•}ƒÖ„Zlâæ^–H€ÍÒÕJ]ClcKm¢aQ†ÖB<½åÜã^rGÛ÷ ãkm7ØìlÄ!ÄxÉÒTk{ŠzD0Dy_»–¥]Á‘„#¹šbÌRüW=.5©%wÞX½Ftð¡jÊÔÎÐTÉûÜsÏ©o¼Q=ùä“%,Â}: /ìr¶ìÔÉ `ßÿ}% ³7®»®:lþ0ÅLÅ»dÉ/\ø“O>ñàõ½÷ÞK%ÀBóI7ݦöÙ÷€´ ¼`ƒÇƒÖÊYÝW h.AãâÁÌb;|ØE‹¨åàø8&6êÞ2Wå`su9ë~2°mSëHŒ\xM]¼#t11B…‡êüIŽ{u£PA¬,Öp}ê©§ÊÎŒu1qSØñ«W\qEè}Â#êülä÷PÆ t¹ Ðu§h€ÄÒ:IŽ{EÝ«×J“8!„‹I’“B èüÍo~£öÒžY<[wÑßðÊ– !~ýõ×Õ]wÝ¥0±$,àîäcŽiõ»l¿ôÒKÕüãVÚ´ì|=IױǨ:èqÂ&°ÚÓKˆ-7U¼¨Ž+“-Ù3 ›^Z„¦Ë$OæØUãM+9Å~ævÏjŽYµ!Z¼ÁR9¾x~Íü²Í¬?ÇÀf¼­­úØÚôãÞEV Àj`]Õ²„N=Ç·f`1#0Öf•6wÒ3œbßr0h®û ˜E~|W‚Çûï¿_uïÞ[ä~ís`á™ýTuü‹öÂýI{ã‚@8ŽíÕÂkµ‹ñ§Ûvéâ­;Y§ÃôDLåö{ßþ¶:PCnŸnÝÔ¬Y³2–´¨§q€õ輪Î÷.¼ˆ˜eKÑ,\¸PaJXìĉÕ`  ˜Ì©ƒ »|ùro+&YÂXÕ·ß~[]rÞyê§-ãe»ðÖ=è uà 7¨ú˜Wõ«ê¤£V«W¯n•°-ÖÛzÀ"\xˆ^ó7Èó*[D€Å8`´ÉzÞi>¶ía…çÕô²â„èÂÃê·–j¥år‚Ö^(Û/äx~/d¼+<®€U¿u`íßð?òº®Q›ækU®nX§Žžë$3Q2›òI˜2 °¦Ù0ÞÏ0ë²K€å#Àï•qôO=Æca?œ·(W†yµ `/ìõÒ:ß7vêÔ©¤ÚC8nÑÐÚY¯!»·Þð /ìÒ¥Kf,–4~üx`m°­7ÀÚm cFlNÚd{f‹BÕ=ÇrüïíµZãÔÉoâ8—•² °NöE:H‡*@ò¨@.ðà a¾H€Y3ä° /ªÀ([ü†¼HæøU3„û˜åIH±ì' -+0-^b€‘Ç–}M¯1¶¡>øÍo_W›c`c½­s›qwµìù眣~ºÞzj¡¾÷wÚb‹À‹»žuyþ]¡Æò?¾á‘Å6xaO<ê(õÎ;ï”öhí™5ÃßiXóz<¦_l ìÖ/&Àr l”÷­½”N”eû•o¯½ŒNÜÇÌBùØXûmN¨@ÈÀ"Å+0‹o9H"›È#Ð)aÂ~+å!dIÊ–ñ·r\lC9´&› ŒÐdÔCê ùñþsTËå'ÀÆz—`õä.• ÂZãW»j/ëj,¥×„}üñÇÕË/¿¬ú}ó›%@¤þ¸©I~ðÁª›!¾P‡›`ë­'«½°˜( aÇHXJ `µSš—Ñ­—èÙ§±œ –› cõSl¬ý6 §T  d`í1°Š®k™5á²Àâ¸&À ú…›¡Ê¨ŸæŒ: 9†6h¬mTà*å`c½K °1ìÊ•+Õ¨‘#=/ì/´'õ´NPCöÞ[MÑKäˆçð:~ÌDÿûß«³N;ÍÙ[ô>’ÇóÂyd V`ß|óMe§,,a  P/ꕵ6@€µßfáT€ d@Ì,`PBÍYˆ]Öž…Ø„ÍJk†›e,òÂÛ$áÃò-¡ÍõC1ÀÔçØ#í>©*`cØçŸ^uÒcZWé{«Ñ3µ'ք׫/»l-½çž{Ô€–ex×óÂêýP€uܸqê(VŒ1µv"Àv²;¬oþÚ,6©.œÇ¡%`ÛÂÆå'% d`Ë-ƒ&à0(„ØÞûÈo•Ö ôVÐJPl{Rs°˜0mŠŸ/ Àְ矾zòÉ'[­‹Yˆ1‰<°¯¼òŠ:þðý±¬z=êµà³ÛÉX@ì¹ë¬£~¢½°È{ÕUWy XõK8&ê€f ÀŒÉ˜AyÞ¼y^3^r=øàƒ ëÉ¢ ì¹×¶ƒ€v@ÈÊdñšÆwM °41¨@â DÂK‰×:ÇŒä‚hÃqà\m¨Á›uˆ«_y&l–Ûnzeò+¡ºÈ¿Í0`^wê°öÒ9AXÔõ†ÁëWgl.ï2‚K•‹µX·èØQuÐ`úë_ÿº"ÀbÒ¦ÎFH0†Çixôó â·»ï¾»äÅøÙ“ôŒÄ½»vU=ô·ìk¯½¦üRšËÇ`©ÃõÚÀXj‡c`ヵ­W ÀæÒ^àI¥[Hx)ݧ˜­ÚErAÒ°2»°Ì> `µV¼§«æ,ÅAc`1vÕÞ/`œÅ++kØÚ³'ñÀBmÙsë’g–K‹œXV:ÚÖÉD&À:ÉÄLŽ `…ÊT6,–K€­Æ¸Õªµ !ÀÞ~ûí%/ì:„¸“ž˜éú–u^áa}hÝu¿\ïUƒêØË.S/¿ü²—f̘áÁ+ 0+¿‡ùþå/éy`¨A)­ëÀ\/s¥Z¹ê½Â½D¡6UÍó€û¸];¬“‰L€u’‰™ À: •©lX,–[ñé°‹-Rû¶Œ…Eø0€rèÁ«õß–‡§h=tÏ=×>°o_o&cdõXÙ°cÇ1òGáÍP”Ò °Õ\‹¼ìC€uƒ ¼\ï"žÖÉD&À:ÉÄLŽ `…ÊT6,–K€­Æ,°wÜq‡ç…5g!¾óÎ;Õßü¦ú‘þÝ›m¸sgõì³Ï®¨'Nô&rÒ²Õ<°j€œº$ÔãwçÍ›§üqo‰¯|Ð[Îç·ç^ ç­¦-Dµ–U[Jk9X'™ë$39*@€u*SÙ°X,¶c¯À ÷í×O5~å+ê׿þu«Yˆ¯ºê*Õuà խ·Þª–/_î›0‰ ·›†Ý§žzªl>¿ýñ‹_xØçŸÞ)`ÓLØt]jž ܧò5$À:™ÈX'™˜ÉQ¬£P™ÊF€%À` °Õ_Õc[±;ºï¾ûÆÀ"„P8eÊo› °AËÛ`ûu×]§Õ^X,«ÓüÃV\Ç.o̘1jèСjñâÅN‰›.`"À¦ëzTó\à>ØŒ`l"²ˆ’Ø<6,–K€­Æèœ0ñFµžž¤  ºÎ:먣Ž:J=öØcÀ>÷ÜsjÐþû«»ï¾»•6hyÙÞI{_±Ô&zæ™gJKâ<úè£êà}÷-»DŽ,êàšBœh"À¦çZTóLà>Á×X'K:2€m×®Ý/ÑG1eOƒõ×ßà§Öœ‰¬Qâ9šj="–K€%ÀVkxb¦Üc?©d´×Ëâœ{î¹Àb ê’%KZ,Ö‚uI#Ï>[©á Ë>“'OöŽ5aÂßr.ӳà èuMØ`£»Úöv?lz®EØkÇün׎ëdµF°MMMSñ²•íÓ­}¦I§–—N & 6 #,c¨.ë†ZË#À`[ÚÐ&ú»±Öö”£ýÙá¬kvtó-U»í±W d{ôèás56hmVs;¸k‡Þš±=›š&Âö[n¹EmÕØèmC»LX„3»&lz lz®Eš Ù<Õ…ëd%`Cô¿yº?Ìs‰`aÛÂÆå'% DòFAöém¶QÏ Æ”! pÍæj×0%m2OÕ ÀVÑN™:Cmö.%ÝW‡ûbvß 5Yý¶Ÿzâ‰ê"šŒPâ=õÄO#Ï}úx³c|¬kBþ¾ùMuŒÙ5^°H[³¬‹/¾ØóÀÎ;7t"À`ëuáeO‡MjÀÞ{¦­¶ÚZ­¯£\ò"φn¤ºèµ•]òo¼ñÆ ÷§KÞ6mÚ8åCYaꦾôøø¬A,Ö©ãî¥sÕ<Ç ŽÄIœêÿ\¯ö¹J€uºW2™‰›ÉËÆJgDlÄo€'ß~¯g¤·tJê»ßý®úÝï~çÍVìšÎ<í4u¨†V؇5Äî¥ nÙôèÑÀ>ñÄ¡¶þ†NQ=°‹—®PÝ»÷P.D1ôë×Ï%«—go ]t‘S~€îœ9sœòâ>vý„©s˜úŽá­­Ö@®Ç~Ød-lýŸëÕÞgØdï•$F€MRm«h dÊ(ª¶ƒHz?Œ~öHo\,:§F=£ðgœ¡žþy§tã7zc``ñ  7nœ·¿,`´šôÈ#¨|Pa2(ÔoϽ°Dü"£R›#À#a$À¦Ïx'À&kJ`Ów¸ÚØdï•$F€MRm«h \b„L:øÐ’7vÓM7U×^{­·4N¥ô«_ýªÀ.×¹³§qúéÞ~ð4ýèG?ò<»Õ$X¬eK€MÞð!À`é-ZWïù`“Ž»jP>l¼÷F=K'ÀÖSýü{>%Œ;áç °1¬t\³§zmó­È"4rêÔ©jñâžiÒ¤IÀbì9z)-:vTøMò ÀΞ=[U“î»ï>€1¾màØãOd;H H{ À` °ì‚£T€K€m±maãò“°)¹9©Æd}hSü`‡¶±ã®Uíõ,Å€F$@$B€±ôŽ™n¸áÕMÏB¼¥Î{ÆO~²Öö /¼ÐÛžÔ° ¡Ì˜ÄFêpÒ©g¨•«ÞK\‹ ·ÒyÞN€%À`ÙG©–ÛbÛÂÆå'% `Sr!rR lë IpIÐó(,žvÆðÒøØvÚÓ:räHµhÑ¢Rz衇Ô=TáÛü]þ¾à‚ <€Åv×tõÕW«-·Ü²®ûìw€Â’&yÅ´ž–K€Í‰E‘’Ó À` °)¹j`ÓwM²\#,6ІY‘â íÖ­›ºþúëÕ3Ï<˜°?üáÕ¬Y³ÓwÜ¡X:NO½<É”©3R¡AZ3îz` °Ø,›é«{šö¼Ÿ_¬Ê¥×Þú æ¾èéÅËÔ®ýwWøŽûÙGù-6@м…Š–Á ¡˜,ªK€MU'‡å1¾Ñ¥k 0û÷ïïͼpá²éç?ÿ¹°¿ýío˦ûï¿_qÄ¥q®XÚç’1W¦êÜã0²P&–K€¥5¥YØÍ·ØR6lx h£X”ñC«–¯ÊdÿF€òNHWYØt]¬×†K€Me'¸\OOØ„ÎluÖQÍÍÍjîܹjÁ‚k¥Q£Fy;sæLßtþùç·çzìq'*̈œ¸+B °XlÖM‰Hê %oˆ¢¤4¬ùLÀ>úÄöEÆÐ%lw@:Ë À¦óºdµVXlj;OŒ=öø“JÞXŒ½øâ‹ÕÓO?Ý* À>ðÀÊLX;sûí·/í¿ÇžÔcó¥ö|‹«~çH€%À`³jBDZﺴ9Q”˜u€…Wp+ !ÇòìðÂÃ:qÒ-¥íøßc3„ûKYi/&ÀFq¤³ ¼¡šÎª±VT€K€M=Ðar¥]wÛ£¢˜| kÃΟ?ßKð°Â;mÚ4/ÝrË-jРAÆxÚîjòí÷¦þ< °Ùx¤šk‡qßÝ»÷¦W/c°Ü”ë' véÒEÍ™3Ç©h—®Ÿ0uSß{ï­0Ô ÍëµOŸ¾;èUÀhü”W€«=‘›â™E[|XÑ~ñ;îC€(„‘ð÷‘G7—î Ó³{ëS½ýåžÁÿRV½î‡JÇ%ÀòA¨€‹Xlf AL¶Ô±c§˜|ðÁjÆŒÀvØa ë¹"ÔžZt‚jÄÈQêÕk2sŽi4(â®=°ÁHQZ $ÀÆÿâ„ëbŽx€_x,`óò+Æ·ê³àa/+yÌñ²½ò¬6û™p÷ó¼Öò °N÷ 3QÂ+@€%Àf .¼TÃéãcÛ´iãÍ*¼ûî»+Ì\ÜÒù©ïýàGçšð’HÕ.Xlà¦6·v V?³ýÆÅPáe,ò˜Ï[Z?€Å~frµÏé¤ö#Àæöþæ‰QH À`3°Ò‰b¦#Ž:¶¬®ývÞUÍzt^&Ï))!mÇ!À` °‘öëY-Œ[%ÀÊxXl«¦ß¬ÿƒË 9T€K€Í4ìaR¦=öÚ[õÙn{5é¦Û2}.iˤêC€%À`sh]„?%¬X{<+žÃ¦U¼­æìÅ6'r2½¸Ø&Þ[y¦G±\O\ý=°áoîAЍ–KèËH¨m\C½Ë%À` °É›ÿihX®CXÎÕ©GòG÷="V÷E˜d ðDè/ÀÔP™ÄIÖE>ä7'63“<á”#å¥9¤˜›’»‘Õ )W€K€%À`ëÚ°Xlò–‚¦%IÃìý7H¨)ùš”ŽX ÀöÕ¥”@<+Ëè$ý¼¡€Q,¥è´'t,–ɼ"Ù³rÛåbË•Wï˜öñ °u¼yh*!°غÂKÚ:OÖ'þYm °XlòVƒ °È~¦avò%_£šf!Æ“ uò<+[McOØTMiÞ‡[‡;/¡C6Ê šÐñx˜|+pƒ>=®+üå5&ÌÑÉ6p À` °É~kyeWk˜¯&ñ©Å‹ú Ñi:6ù—Qq„ Û6.?)Q ™$%W"ÕhÊÇiDv„—„á%ÊŽeeÓp!À` °‘õaά³ËµWv¸þmç„ÏX+À∣‘òì•¥tòÚßE°Þ»ŒðÍ{Ä¥–ÓBÇ¥.Ë-ºX,Û@Âm€K€ÍÀ~<ñFõÏ=8§ç×ßàó–`¬iZR€µ`öá–ñ²ˆŒòÀ¢>ÓÛ¶mûô}ò yy>¯ˆ6ÊöɲjT€[£€Ü TP€^Âð’çŽ˜çææÞgßTï>Û)€lÒ6ßÚVõß}ÏÀsÙ©ß.ªC‡ÁôªsŒ?^õë×Ï)/2…Â.]º¨9sæ8• ãÒõ¦Îaê»Í6ÛÔµ½œ¸}ßÒäGÕB`öû¸¡ás¯»z‡††Eº]̉ -Ñe¬‰ œyºŒÏÏþÙùìÏ3ØŸ`ók£`ó{myfõW€^Æ:<ÌÄh¯q—48b–HÌ™ôqór¼e+^÷fÑÌKºåö{œÎëo¶Ù7œx0 æ`ûï¶›ºdÌ•NÇѦºõîÂì_Þx®¡á7ç74üDwÛðœF‘†ër±µ–õËuÖYg5=°n/ÓÖ`ëoÇUl\ʲ\* ½ i{˜çµ>æ"최cfdÄ’õ†èZ4ä¾õ1ô/]¡ºwïA€ é1°÷Þk-’t~_¿tùH¿tqM'l¾ùß&|1†t`ŠR(×¡Ï [ÆÁöˆÉXÀdQ—×X¶g#ç} lÒí=Éã`k¼R¼;6ŇU˼Ø„<°æBëµtŽØúÀW-׌û~qͰ_²{˜â4lØ6ܧï…g15—0f ­Ó5´žóäMQiR² °Ùí°QÝé+‡›¾kÂåGXž:>'Ý¢ð7¾mCá¨Ø†„ÅÇe;;—Í%l¿õΩeËC”g/vŽýÍcøer,»®f¹~Û°eÛç‚óÀoX,¬n.¸.úà˜æâíf9åucP–Óç-Ͳ¤Nò›Ÿâõ;_Ù¯’^(ºHûÍ6#z–«—ß¾a´aÞä ?,¶ž]g™u`×hh½KCëP½=êIšâ<Ý!ð¼Ê’X A_†„(¿>ÏÔpÏTlœ·I}Ë&ÀÖW=ß Ä °èä¾sÈ`/Þð 6;ÀjO$Þ[l °AšÈñp.È+0[I—󵽺¦¡c;òášáز]góM¿À±Ýnè ¨ÝŠl °Øúuu ZÇkOkÿzÖ!®cÇ °ö3¿Ü³ýú(¿9ðrÖ|Amþ/¿ã7ìkn󋜲˗ü²¿”'õ)yU®®R>Êó«g”Ïʶ-l\~R¢6%"'Õ@È Ú?_(;Àšßä?f‡g¬ 5ây3T:UìÌS@˜]F¹ŽG<†öörÀhæ÷;/Xˬݛ`ì×aVòVÒD<¾6¤—Óur9_¹&¨ƒ ÃvÙö9ÙkzÒ~Ís¯{”FËŠˆ °Xv¼ñ(7ÀJÔK¥ç¢¼0O­_”½ûÃ~Á}°¿ýÌ·=½¶­`FõHxÙi–g¾Ü–>Ǭ«ýÂÙ —޳?ˆ`ÉKñÜ^U—Ê RµtÜÑGlkQê °F*aÄQ¬@ŽKç“&€µ¡ÖÆZÖOèŽNÔôv×°¦q áÕ¶Ö4\ìsáܾ𛋿íñÎ.לy¢‡ÔJš` °´JâQ n€ zIˆÑ6°š/˜å4úñ† `–‹¼‘ßí!*øÝ/ZK¢¤¤.æ±ÄÖ0Ë4£ªì—²¾Ug_A€çžHC©Ø4\…üÔ›€h1;ˆZV<‰A“4UòÀú­m*õ´Ë5=–.É0X;õ’1²aÖE©»__n­× ó•±­¢µßÅ+<ÙZø…XÇi\°ìè —K€Í)‘®3©7ÀÚ°Šç¦€¤¼ zî‹Õîk]ÖŽ´²Û¬Ÿô‹2–WÆóÚÃX’z)J€M×½em°QªÉ²°)XéÜd¢$ùßÔìÐï ¯ ]fØBY‘ü DŸ¤Ó·À“>e¿9:yclϘëµ’Ò‰ËØT©W€•·Þå4e˸]sâ*3œÙÔ#è|M½P®m€28®xåq,bíë„s”ú6£ƒÍ8´$À`i‚Ä£@ÖoB'³ zî,ÍÉÃx`Ã74J¿$ßò’ØöðÆñ,4Ë$ÀÆsO¤¡Tl®B~ê@€­3Àš„„ŠÊ¸F|WX 5={fÇ…}eü ~G2UfD.×!ɘYÙׄF³\³ž(KB]Írm85Ë–I!üeH'.õ0.,ÀVÒÄöfÚ/Êét¾RGó˜×ݾŽö9I½$LØ`»^f¨XÜÆ˯’ °Øü˜‘œI_]Ji)œZJŒ`ñLöÔrQ6fX°«Vö‘™ûåū߄‚µDk¹ô¡åúæ8žÿØZZ~º÷%À¦ûúd­vØÖåaï²6«K9f”YËÌåö£®•έ–s°Ë­E“jëaåu¹Ž~ÆE9ƒ£šò]êÀ<ÕCj%í°ج 1×w .NLj`ñ2 eG0á%#úÙn†Ý†:b†ËÒyò‚Ø|QE´–9Èg?ðŽ»? ÀFq¤³ŒºZ¸Éù¡Q(@€MÀÆÝ9ä¹|Y2 ç—²xî2> FŠœ“=>7‹çÅ:¦°Ø(:ñ•‘€ÅóK†o˜3û–‹¨’ˆ%ó%cP±”+ûÊðê"ÛÅ\Ëp#{Æd‰#Àæèîâ©Pœ)@€%ÀÆ:ór’ "`gå±;ã$ëű`\˜çTnÜrÇbñx[ýt%À`sfOÔz:™X¹§åÅb¹u\1‚ßHÈoÏêk.@Ï~${"'Ù&åÚǶ£qìcá¸ö>R&êkïŸdt=°µÞFÜŸ C,67KøJ¾¨uíZ` °Å03œÏ2“ËgaíÏBSC¬óýÂŒT Ð ` °XF#„$ݰØB[kŸ<–}‘7¶8¢û¢Y—ɤ`Õ‡ÅP*¡X,áFÛ@Ú–a_ž‡¢°ux%ýâ.èxØ<ÜÊ<*¿X,á…FÛ@Ú–Ÿ©#`ëð ʤ·`3uϲ²T n ` °­à9ØK$Ý¥õxÐÅžL#­ue½ÒM€%ÀÖ­çOç °X†§óÞd­¨@ê À`[,f»õ›11Ï@„Y‘‚κTZ¼>hnO?T&y°ØÔYõ­–K€­ï=ëÑué›Äz^$Æë“Z¤8×@ˆIÒÀMúX4st‹¯§É#G}°ü޹~_%Ý¡OÑ?évX”ã`;wÞDÍ™3'0~úéjë­·Ì'eõíÛW577;åßtÓMÕøñãò"¼Ï¥¾È¦Îaê»ýö}3wöé»Ã­Ÿò ` °Q,l[ظü¤Df]Ϊ•’‹‘ƒjà…?_*Ph€È!™`Kr ¸ x©w}ü4 ª3·Óóê×ÞX½Æ{q²ç^S¿wÑÛ+0Ÿ”µÍ·¶U;ì°“Sþ­{m£vÝmw§¼›o±…S>Ô#LÃÔw=(À–î+¬“™A€%ÀF °ht´qn½d2`“Ñ™G)¦™2Š¢6à‡X4åb‘s„Ëêfà}#á7Ü·Þ9µ•N€: ÁÅvxlñö±X¼ºR&<ªæy l»üJõ‘}åx(×.ÛPwüŽí¨£¹ˆ;ê~Ú°á­ê!y‘߬ô±½ÔQ_–GàeÈW À:XlÔëÔð˜)°ÉèÌ£S¬émÄÖw…~á¶fX-þFB~3j†ç¢,@!Žƒ„¿Mx°õ3àýêƒ|²Êó;¦lGÝ‘ß&„ ¬Ú0Œ²¬æ¸WìO€Í\y=ãnX'ƒ‚K€%À:Ý*ÙÌD€Íæuc­³¡@aVÀÌže×ÙµÇÉÚIä7'D’üR.¶›ÞO»<ü_iŒ©]9¾9N×>¦Ÿ'Øô6›‹zVç*ÛÓV·ñÍò xlµµ¬“@€%À`n•lf"Àfóº±ÖÙP ðkª~cNMøƒwÕô –Ë/W3$Y<£a<švù¶÷Têoæó +63Ëpñ°Ö{.a¢6˜ ~Ô/é6@€u2zè\£rdjjjš:aâ…íÏ“nßQ6Bm€e¤Olú® k” Ûá¹z`ÑQX%,Øö¨¬l •o×°ÞK,áǵ­2ÛŠ´l²FöŒá#¼h¦li@€Mö^IòhØ$Õæ±Š¦@a†V%ø4q„Ë8W{]TünNzd† ü™!Äa|»ŽW¶=¸~!Äö$Mf˜°éEÝЖ«£‹‡6ì91?A‡m ßm€›¬)Ñ®]»c6Úh£%LÙÓ cÇN7'ÛZx´¤ À&¥4SD °2 ±Š‹I…æ8S€«=ÞU XBŒeB%s&bxn±/¶É¤K&\b{¥5YQ¶]ùÍœÄÉžJ€[&q2ÁÛC6ë(ç zpâ|ƒF )3xÇQvÒeâÅN5/ dVó¤ë›–ã`‹hNðœ©0 À²=D©zîe/«Ð p³—½‘Yxa„›<ÉŒÀ~“>a”%`i‘2 0¶ÛËÔ­³êWYÞGʳ—Ñ‘™Žå¸~ËèØç-y±¯9)•ß,Ìi1’YtÂu½ÆLÇ1ј¼Ü ÛÖâŒ\À‹µj :ì9Ô’Ÿ›qË€ÕÏ¢°maãò“°)¹9©Æd}hSü|¡@¡6ŒwQ–Ãq™ô©Ã/Š}ý&qª¶\s«jËà~é͸®K½6®ãÚ/­\t‹`ËMäæR¯¤ò`ibPÄ /%.yåò‚¤ì‚d¼:ØÖ°Ð+!À•–±1C‹MϤù{žŸZ ͨÖ^ò§–:qßâ@¬ßÄcö²O€0Y;YÚþ·=‹¸çäþHúÁ$ʶ×L6‡U~ÇÃqQ>òâÈcßãøÝãŽ}¤<¿çœ‹,ÊF>û\Íýru“óÂvꀲ$Úï¾i¹Ï°·”Xý,*@^JÙUãIÙÉxu°ØVÐã4`Å4bÓèöû½ž†¤iô×R?ã½–ò¸o1 Ö^ÒÉ-QmöUvh»¹]–¡²Û ÛQ"yt%4ÇCʳNj#„ÝÛÇ(7^¿W¿.+k2K=Lª› Ica_9>`OÖ––{Œ›qK‰ÕÏ¢䥔]5^”]ŒW‡K€-¼×9-F.ë‘O €04¯³=k·Æo‡ôÛ½åÚíù•ýÍ û6°ÊÌáòRËX{¶oÔ£\x1Ž  6_Ž™‘Au«‚ÌâŒ[!­«ß˜«³áÉÔSòR=Õ÷96/HÊ.HÆ«C€%À`µáMx¤qµñÚË= ” ˆš^Q3$_ /h‚3»þQ­™,3x£|À£€š[®,óð7¼¼v„H˜zÄuƒÊ¥ÖÉR¤;Ór2V€¼¬Q¢9xA•;÷#À` oX¶Û€„éÚ@é°¶WÕ„Vsíâ `’ñ즷ÕýÆ‹—Í0àÀ»kÎ$n[°×£vÑ'É<X'[j Î5Ç)'3Q`ÈKÁ%š£‡>ZßDȃåY,–ð#¼$i$óXéô"— !ÆMØViV_ ­ÅØtÛƒt½ý<°v",“Eù¬9îÖöÀºÖÉ`ƒê&ç‹1öÈ+“F…é ÍâÚN€u2£°N21“£XG¡˜ dQ,–K€eˆ± Ø“8™3ɤF&8Ù³ü–› Uš´È„<Ÿjÿf¯kŒú˜ž^@¢ –&, ü HʤJr,„û œ¬]Ê0ë&3#ãwX™‰Xf\ÆïÕ,ñ´šå`Ì#¬“LÌä¨ÖQ(f£YT€K€%¼Ä/IÇlì}7@lê†S–ô(@€MϵÈCM&ë“@›âç hÄÅhÄ*xî„ê¸ÛÆ·tðàœ\/ºè"õꫯª·Þz«Uš3gŽúö·¿]ÊÛ­[w5é¦Ûøü‹éùG€¥‰AW€¼”¸ä•È ’² ’ñê`[_@p1pqï,Ÿ€XÔ6€ñ¬C4½ªÔYg¥^{í5õöÛoWLS§NU½{÷.lï>Û+„U˸Λ›qK‰ÕÏ¢䥔]5^”]ŒW‡K€¥±JhgÈ`X¶âu=ŽõŒ|b\ë°aÃÔòåËÕ;ï¼*]ýõª[·n¥²öÙïÎXa› ÀfÜRbõ³¨y)eW$e$ãÕ!À` /ªqypX.=ÌÒ0ñ&`°b؃7!ÓI'©¥K—ªwß}·ê´jÕ*uþùç«:”@öØãNTe¶¿ÚÚÖÙRjrÎÉŒT€«™jØL]®ÔW–K€¥qJ€eÈ@¸Ž9JfS 0‡ªž}öYµzõêÈÒÊ•+= 6ÇåŒÅÕC,ÖÉ„’;åd&*¬y)X£Dsð‚$*wîF€%À^2/ô€UY×3_2æJÕ¹ó&%p=è ƒÔâÅ‹Õ{ïéepbJ(Ç—©“>þØq×òyQÅó‚ëdK Ô¹æ8åd&*¬y)X£Ds4é£õMôˆ¨kºì²Ët8sçÈùþ–ò‰³^6ÖÉ<"À:ÉÄLŽ `…ÊT6=È qnCÃ]:ÍaʤwékØA£#À`ix¦^²n´³þµ{«`¯»î:uß}÷©¨<Л¸éÃ?¬[³çœsNë™õÌÈœèÉÿš`¬¬“LÌä¨ÖQ¨LeÓð3Pƒ«bÊ®¸†4:,–K€eHi0öé§ŸV .T×\sêÑ£‡·æ+–¾ùóŸÿ\·„5g;73$cÂ)L<Å_Â,ÖÉZ!À:ÉÄLŽ `…ÊT6Ø…{í¥–ÝvS†4À5Ël,·®”ò4†k÷æQÃìiè°Ï<óŒç=묳Ôf›m¦ÆŽ«>ú裺¦yóæ©ýöÛ¯²ÝºuW“nºÏÓ–ç)Ö©¿Ž `µ4žÑ…™Œ.DTètmßörj1•3`#1uEÀ.úÞ÷ÔŠ+˜2¤®6¶[Š–m€m 5m À>÷ÜsjîܹꨣŽòÆÇb¶à5kÖÔ5ýö·¿UÛm÷Ÿ]¤Þ}¶WÓfÎNžõz‰C€uê³#Xmßö`tav£ [ìÛÑN-†L+‚›]h'ÀÆz³ÞЪ—ÇãfÏ;Èkÿ5 ØçŸ^-Y²D=ðÀjçwVƒV/½ô’úË_þR×tóÍ7·šèé ƒ«ù‹–öùJ€uê·#Ø'¿þuFf(ºÑ †}K€uºe²•©‡®nßZ«L€%À¶´!<$†ÔÚžr´a ,ÂHü0B©qØ6à °/¼ð‚Z¶l™š0a‚Ú|óÍÕÏ~ö3õÖ[o©?þ¸n kÕŽ=ZuèСä‘=ö¸Õ²¯î9K€u²"ا¾ñ Ff(ºÑ Ïéqýz`aÛFÂN—™‚hÖY0ñNM,¶¦”ß gXùÔ˜€eÔ…—ª#G¦agž­†Ÿ=20Ÿ”uÆOÏvÎ;Ì1/Žú™g¹—¢ÎaêËPIjX@ ʰ!~ì±Ç”,¼¯ðÈb|,@k·Öbqì7ß|SvÚi%ˆmllôîÕ"MôD€u2°Ψ‡ F °NŽ™’S€[ð<ââäZn6ŽD€Õã'L¼Qmß·¯ºè¢‹Ó7ºtQC† Ì'eµk×N ><0ÿñǯÖ_ýÀ|(wÇwT½{÷vÊ‹üaêìZ_œS·îÝÙ~8~6Ò6P `Ï×^Öö_ûš¢—Õ1öøƒÂìÀ˜µøàƒV»îº«š3gŽúë_ÿZ׸>äCJ Û©ó&jì¸k#Õ+è…@½¶` lÁí[¬Ó}’ÙLØ‚ßàØXïÝBSAFöØc›•ËgÛm{«É“'»dõò´oßÁ[ú#èóÔSO©Ž;eó¶Ÿp jèСNy‘)L]ë‹s"ÀÒto…Ýî°7Ýt“ÚFÏ>|’~Á3^O”4DÏþë°+W®T/¿ü²·~ì;ì ?üp…õ[ÿö·¿Õ5=ñÄj/=›¾~’{ißýÈýs—ëÔo` nß`î“Ìf"Àü'ÀÆzïæÞr1  °_²0–PêrÏÄ•ÇXxQï½·ê·Áj‰?MJ' ØW_}Õ×k¯½VuÑ—]v™úðÃÕ'Ÿ|R·ô÷¿ÿÝkظôKK¹X§~›[pû–ëtŸd6¶à786Ö{—ÛBLìK€%ÀÖ‚L€9s¦ê¤Ç~Ö¯aQðÈb<ê–[n©î½÷^L:ýóŸÿTÿùÏ&y"ÀÆÚŸe­ðMt…‡ÖZiYF‡“8eo®l­­?Ýû` °Q®›îÖž|í°ØV¡ÈXlZcZO:òHuÁzëyÞWØ}wÛ­b±x`°o¼ñ†7©ÒÂ… Õ AƒÔÞÚ£»hÑ"õé§ŸÆžþñxàŠæ'Ÿ|R͘1ƒ›|—û#`³®26ß·'–K€ï'À` °œˆ)5Ï{ ì¼yóT—öíÕê€]£¿Õ!ÅßÒK†\sÍ5Þ°2‰“ŒõØ·ß~[½ûî»êþûï÷&@;ùä“Õ{ï½§>ûì³ÈÀõßÿþ·—0¹ÔìÙ³Õ¬Y³°ñõc….™K€-ô â“Ï5ÀþßÿýŸ×¡šÉž¦yþçþGá;ê)¼³PCˆc½;Sc¸ÖÓëÃ1°[ÏöÇcéõö›ÄéÂóÎSÇhh…v•Në„1±‡ê¾²×]wŠ.»zõj/”÷Ê+¯TݺuSãÆSΨ õ¿ÿý¯ÂqߘÀ‰kVøÂ °ØÂß) × 0Ř˜vÚÉKÝõ²H\pA Vyäï7|ÇœÈq”E™ìh­÷À”¶õzT‹K,=°ôÀ¦æ9PnÌB hÅ$Nm¾úUuNKX±€ì¶z²&À"ƼVòÀ À¾ÿþûÞDO§žzªêÕ«—zðÁƪV›>ÿüs\.Œ>uÁ‚Xöµ‰ôéXlKCƒm —Ÿ”({€œš xÔ^ýõ±« •ðþÚuˆ<£*#b€ÕöOÚ?_(õž^(z`é­gûã±+{`Ÿyæuà 7¨½´ÇUf!~Ê)ÞÿZ|—~– Ôk9‡XÌLüÑGycb÷Ýw_oŒ,à÷_ÿú—spÅ>¯¼òŠZ²d‰zöÙg °}wÐÑÞ|Yœ„¡‘f€…“ÆL¦sFlÄÃ;L!Ee3f©œˆÇÀFÂKI´Ù¢#’ ¢oðsuJSãÆí¸™á‘•ºî¿ÿþ ëDâxb±€‹<Ç|)Êo.þ6ÏûK¹(åàa‚ü|ã÷4郺`c½Õ °ôÀÒKljžå<°€Â}ûõSCŒetn¾ùfÕûë_/-±ÓYÏXŒ‰Ÿ\=°Ø?ÿùÏ¥„YŠ{öì©FŽ©>øà8­”0A¼®(!Ì/¾ø"¶å^â2:±öÛ­ O3À¾…])+‘†·Ýv[+»Õ´c£¶Aa÷úsÔÇ©¦<lr÷I=ŽTH€ÅÍm‚-þ†§7ˆxLñ rc ƒŒ§µáÿãFÆvì#‹ý첪¹ãÚ‡ëm—õž^(z`é­gû㱃=°X,«ÓfuZ­‹µU·ÓžXŒÙ¦1|x(€|šéwÞñúG€,^ËdLæ·€+f2ÆLÇflëÙ» °±öÛ™XÛ™;4ɨ?ؾvâ²WÖK€Mî>©Ç‘ °€LxEZm€Å6ó n{œ¬ ÁW\qEÙCˆëѬSsL,=°ôÀÒ›šç@%ìsÏ=§Žþþ÷Õ=´Õ2:ýõVí…ò&xêºá†Î /k¹ôûßÿ^577«¾:,^]VÜ,ðÊb -f6&Àú/;E€uîã±lMŸ´{`ýàÑœëÛÍè?ü ›U¼µ2ÿ‹8[°¯8kL`0¯¯ØÅ°—e~™´,¶¦¦Ÿú °¸á$lØÏkÞ¸¼>lN …|Zì÷vˆ›ú{ Î ¦Æp­§ŠXz`ëÙþxl7,ë¹bMÕ^xA-[¶Ì[F³cFbxaè%wî¼óNo‚&sXYFÇœÄ * ³¿õÖ[¥ÿåwL …þt¸öì"\øã?öBޱ¶üšÉX§n{ Î¥'Õ®í“E€•ˆ@?ûTÀ‘âÄVVãÀ6Ó‹+ˆb3c;ÀI¢¥¬°^Ò8ó`kkûiß»kßœ.+yü–å!À–š9'qj}Ç`饖ØÔ<‚<°Ï?ÿ¼7ÎÔX¬‹Yˆe–âÃôdLåvì˜1j„ ÞR:fê¦ÇÒþô„ÖúyÎ=zôhå­%À–‡W¼!À:™ÖXÍŸµ}² °2Ï‹mŸšp+ðû^Y&M»Øo»äeqmí‹{W¯@“ÞµGõ»±g–&qÂ&{üjÀâ­“BlÞè6ÛÞÛ$Ç#„}›Å1°µ¶þŠû§Æp­§ŠXz`ëÙþxlwl9€8q¢¬½°Ÿi/l§¶m=ÈõóÀ=è u ÞÞm£<ˆ"áÿ_ûšçÕ•ßä{ñâÅÀš[,6‚ž¹Ð+7ù¬ê+!Àö·ß0;ÛÆ$ÀFÐRYDýH;ÀÊ mãoÎ@,qüö$NöMŠqØc_‘ßæÌn¸ñe'¼É’1±ÐWÆ›¸Â~Ã6ãÈO€ wßè‹fL –Xz`éMÍs ZìÒ¥KU×¼euNÖkÄ^zé¥kì%ç§~ÚjŒñ²Ç®¿¾®×\s÷}‘ž €‹0c3a)¬ ¶ !fq¸Þy­Ü…XÓcê °ö¼/å¼±Ø[%wO—iXY GƬ0ý Rf Æ à,·Ü9ˆeškÉÊ~2Þ|8stµ[×Úü·¡¡ÿÆë´J§0!I©1\ëé…¢–Øz¶?»v,f>ÿœsÔO5¼.IJp[lÑ `§NªÐ -ÆÊJmüÊW<øÝN¡ýÝï~§Þ}÷ÝRÂ:±Xó7üM€%ÀºöÑeò`íe"]ÖŽJôƒÔrKñÐ[c+åîõU ­‡Ç2oe–ñÀö×-ª¹ŠV•›1°Ú¢au²N«MƒŒ[9´Í°XBdøûfåª÷Ô²¯Gú¬Z,á¿â…ÅÒ:³fÍRo¼ñ†Z°`Ú¹sgPåY OìÕ_ýj hlõŒ:jýòËéHzæ™g<€5ÃߨêV_ƒš‡…UÑ÷§q—B¬8Mdr&{a€…3‘‚€_sMY±wííæ1ıÀM[”!'qJãm™²:`W´üž%ÈõØStó‚—±©Šf–Y€ÕOï&š5 Þ¥Óg&´`Ãß&°` °Øð÷д™³=£r§~»¨Q^ªæ/ZZ3ÌÖ°+W®T§è¥o~¡ÁpzÂG¨—_~Yí±õÖžWVž“?njRŸ{®:ñ¨£T7 d‘cioºé&P‘0ó1ŒpÌRl'.£¼ŒŽÖ´Ñxáº/^«è»ó¸KîCÓ8ñí70¢‘OìRüí.,ÃâÄ£jçÁÿ€U$3úPf:Æï²$OZl`loëˆÏ‰›}€½«¡áÝ,ôWà :5VÙD2°xS­ÃƒOWµ°Ú¿ÓÞ'À` °áïX@¬¤®Ýº«“N¦°­MkX¬ÙÚmƒ ¼5a»iPýî>û¨[4Ìšð:^ÏD, ŠIO<òHµ•ÞgM ä.Ñß½»v-å€}óÍ7•ê°ð|Cã7V¯©Jãj®KØ}öض÷_lhø¥ß WlÉzÉ=À¦ÓZl•–|‘v#Àf`ïÖE»††ßë6;´Æv›z€Õ†V/FëN~‰+´æ)ßõD,ŸŒ•¨aF€%À†5Йÿ3RMxµÿî ïåÃ$?äõã %ƒ)ËÛtOÖʀʔVs.:>¼¢qõµþö·÷i5o¹¶Ý¶·š>B}ÞskghÍKß„óh™ƒbtv=°ôÀª¹ºMiF‰ª=ÁÆå'% 4ëzÔnB€Í6Àâ?¨¡áݦG±)iÚ•«Pâ–¥qZÍ2dp lø±|Xl­F}÷¯°[j¸ÁÄNag)¶³l¯g>F‡cam ìÏGŒ(…cB§Óô죈¨”ºj8>V{m{o¶™úÍo~Ó*/¢#°¯¼òJÙ'ÀtðàTD: ÒÚðƒú ¢l×q¤âºàEÑ×tš±É&…ÇÍ‚ÇÕ®cÄØLضEª$¶ào¨¬YˆuZˆ•]{eÍ>,F–kÇÇBÜMõE7žó÷˜ °mÚ4zc^g=:¯êvaì”)ST{¦nÎ °×O˜ Õcoåy¸¤eB¦rá¿òûÀwV#Ï<Ó ¶ó>ùä“À"D¹\Š`1YÓ±Çè¥zOÜô']—¿hoú§ú:c¾‚¢ÀªÏ8ÞË#0Äé-¸}K€à.Jq™XLŽi¾‘Ì©Á³øv( uöYF§‹{Tw¢²”ÎÂr†–K€ ߥá53—ÑACpe{`·ÑQ@¨<ï*ì< vÖᲦëI믯°®+&sš4i’zíµ×ªJË €­ÔÆ °Ò6Çê ’vßs@h¯vœmû#=‘×§¬ÿ­gŸv…YŽ-YÜXl”!Ä)F¹bV-S+à*ë^í¿ÿþ ) ˜Õ:ø,î@,Öƒ-ìG =tÂlÅ«¬ 2tļó§jOIœFQÒe3„˜!ÄI·¹<áÁQO(dìØ±cÕ¡ÆLÂúᦶГ=þøãjÙ²e꥗^RøÃÔc=¦úuêä…ãy8eÝuÕ–]º¨z<«w¢ìß¿âÖJã[çÎë,ŽS)Å=‰Ú ¼ÛШ/w\mÏ«Çê%À`’5Y+٤؎%®ÂØ­Çë¡~ëdž)#é¼ôÀ:Ûª™Ì˜€Å €µo¿…›“¾I²|¼2›ÉÆW¥uˆq:a‘x¬6ôÂiXl˜ö¼áî¯0zùMâd{aRÜS‡¯Þ|óÍÀ.Z´Hí¾ùæ%Oí*„ wþyµ¥†ZŒÛÜN{agΜYqk¹1®°¿ÿýïU¥”ÀBK¼8À„NQx¼Ã\›0y¿³Í·>ÖÑD¿5Ì °%K Iÿ5¤V»@ëÜc®nãO}ã¡€¯Z»ðúë¯/EÂæ…“& 8Â&ŠhÀv±]A¨ œGÕžW=ö#ÀÖÚúÓ½f7\PÈ0Þ!Ÿ$ófÃÍŠeÈvÛ{ HöÛfÛ~@ è_xá…ÀD€ýÂ;o¬Ùƒá%«N½bíÕ Vx½Ö´å` Šö+’í‘ÿM€…íèg7›XØŸ~åa?ów`+Ù¥Þv™bÇŠýÖV­&?=°ù¾Ys°~Z34Âï­”yCŠgÖ¾épÓ¸¬éa•²ÌeÈ4es;ö—i©«_]ª¹‰+íC€õ'À`[ÍùÔ¾}o ‘ òpâøBió ©•Î­À^yå•´Ê$NãÇW[n¼±:g½õ«à…®û¥!ûíçAêâÅ‹ÕY§žªzꙇ¯×pÚS‡6í¾gâĉÞLʸ›†àùó燂XXx‚\êº`Á…Ù‹á½={¶š5k–š1c†×¯"%¡e=QΪwbf'ê °v´  Àâ0'r²Ç¹šXÓNäš‘ˆ¶ÇÛØvÔàxæ1% RʨM\å˜,l[ظü¤DL¬`¥1°æ )7,Àb?ÜÀò–É ÷ ! °~Rù…€Dªåʉ`õ ü´)~¾P ÷†”‹Çb†»´æ‰ßã,{ùå—+„ÿ.\¸P=óÌ3 PxÕUW©Æÿ÷ÿJ+³cÖ#9DÝø¿ÿë;K0¼¢]õ¤O˜¡øtO:öØÀ…ÍÙ†÷»ßyF3&…rIØò!Äìx£W  ÛA@÷† †.kqƒígÃf9€œÚ@+Ù¹)!ÄÞ¬ftÜvmL /EßB‹[b$Dßàçê0Wè´Ãj%DAÂì7Frùx`Ë…dØ7žý0°·ûË|(È›©rÓ˜gØK€mý,!ÀÒK,=°©yqÔ±%/å‘G©°Ï=÷œú–žiU<°æ2:AKÜ4ÿð‡^1 0 ¨5÷ÁLÆåÊ€…Ç×%` °IšìõX±akÚs T°¶MY`aË,È6ÈÙ¹Èo†ËßRÿzL±6^J²íæýX‘\$Vbèåm”ÌÐ+žLÜtxkд"dÂôr,ö‘}m¬L…cÊr>æ® Ûo­Üܲ¦­y °¹¹íRc¸ÖÓ»E,=°õl<ö—žÝ•«ÞSÇR bÛéÐßsÎ9ÇóÀ`§L™¢Æ^v™B,‹¥t‚Òoû[µÕúë{ceÏÔãfGž}v«}¶Ð`†íj†!WòÀÚQŒb›¬} ”Snã¸ìY»\z`kmýéÞ?s›TÃ/Êq°±Þ ©3\ëᢖØz´;3xL픩3Ô7ºt-ì>ûì£{챪†îI-KêŒÑÞØýûõó&[´`èm—]tÑZ+‹¼a'qúbX~Ê*}4ÖöIÀÊ8U Ù•hC`íÙ€ÍÙ„%"Ðobs G™tÉœ…¸ÒX™Ê %®©˜„M€­­í§}oì µV2©â$|ÑŽA€­µõWÜŸK,=°ôÀ¦þ9pɘ+UcãzÈ~Ue=å”S¼p^—µY%òwhÓÆ›¡ÄÓÖ]Wm©ÁõöÛoW]ôLÆðÎb|¬]îÃ?ì…cöâ0‰K€ è½3 °ˆÀ³£õl»P «DÚùål·#e˜š”iûÚQ…È‹ßíy[üfÆ1ºö6¿ýã¶· °±Ú·u/¼Q× ©ÖZ`WÄ6yUÜ78¶ÖÖO€ òtÑKlPáö`oiÜ-[ñº2'yj¯—Í;vl(ˆy晪«^J:dN¼³gÛFÿ¿ªe|ì9Ú¸5ÁXFy˜D€%Àæ`ã¶ùŠT>6Vû6…` °--™³·¾¥Sïy‰Û(FùXlíŒÇˆ‚›·Hí°c¿RXñV[m¥î¹çµtéR§„äc~ðÕU‡ Èê0/h‘ð;òHy=ôç†M\66dLg‘Il‘3îs%ÀÆtgå©X,–ë{G` ° !fq&Ÿ“o¿WuìØ©²ßýîw=ðÄ$O.ÉÙëµaÅHW¯³Ž¸RƬY³<€;wnèD€Í“%ù¹`uèoܘæò °‘ßSù+›Ý‡Cˆc½3i¸FíÉ¢–بÛË‹ÆÛ¤ã«×¨ágôÆÅb|l£žˆéŒ3ÎðÖŽÅx8—„u^;ê°b„`á…í§ÇÄÞu×]Þþ>ø °€Ñj&Ϙ1£ÛAç•õí}úrb‡ž›K€UsõóF3Êh‡ö”¥Yg@”!?yR€K€¥–ØrF!–›u`(zý1>vÐÁ‡–qã7V×]w·ìŽKÂ$N°€Øé:í«g)ƾXC m5‰›'k2Òs!À` °‘ÞR9,ŒK€%À`+ìö}ûª‹ôAé]º¨!C†æ“rÚé b†˜ëC¯¯ÇÞÛwÜqGÕ»wo§¼È¦Î®õÅ9uÓF}ÑÁ‰çŸŒ§ÕUçYÎSÛ|«w dq¯Üwß} k¾VJ&Àb6â>Ú#ûS}ObŸ™3gz Om5IVÖħØõ|²šX'CšK€%À:Ý*ÎD€%À` °åŒÁ•«ÞS#FŽrJ?>î'jØOG8åE™˜5Õµl×¼g ?[ý¸ù'Î冩³kpNƒ˜U›õNxF}=&Ýt›êС©²‡~¸þ»xñbß$Ëè£ÁµoêÎ;ï,倈V“î¾ûnuè¡_z‡ûí¼KîﬓÁM€%À`n•g"À` °بd–—oâõÍöõÅ‹©ÓÎÞj|ì¹çžë­ój§.z=Ø.:Zâ¢óÏ_kƮ‹u"Ã$€ïqǧկ+ÆçâÅÆíæ½m` n,–ët«d3S/]íAµVK€miC§èïþµ¶§íŸ{C*¬¡8qÒ-Ԥ̬¼¯½õzzñ2êÃY‹3Õ/]¡öÙ7¶[·nê†nP‹-*¥k®¹Æßjþ&?ðÀÀb9×t饗ªM7Ý´tÌ!ßÿ‘Â8ݰϣ¬æ'À:Y Xl” Û6.?)Q Y×£æYµ°Ø”´ç´U£0•‹!øè Ôæ[lYXM¾sÈ`uùãËžÿy?¿XíÚ÷ÂêãÒ†˜'½^Ûi3g{÷7¼¡H»í¶›š>}º7cq¥„<XÌ$”~õ«_©]vÙ¥t „ c\nÑÚÖ©«'À`£X§FÇLÉ)@€-ø ñ2:ɵÜl©p†U%C²è{äÑÍêÖ;§`éaÍõsaì¸kUÛ¶m=È\G¯ùzÔQG©yóæ©… ú¦iÓ¦y‹àr Kî`r7ãN;+Ìn^4p•ó%À:؂۷¯ëÔè˜)9"عºÃbʦð¢'×ì s¤ÂX~†eÑ6ÈØ¦6½ÞÅ kÇí­¯ÆÇ{üI%àÄ8ÕK.¹D-X°`­tÿý÷{‹±°~éÔSOm5ÎõŒá#Ê/²æX'"R€}òë_WËn»)CšÑN-†™2¥@TÛ¨Áõaæ0eRƒ‡5Àn’©–›ÊÚȲ L`á‘4Cf±ÿ# é´aÃ=ý0.ÿÛãCñöA„ç–ón ‘Pž”±¸È/ÿc³¾¨›ß6ì'¿›ß.Æ´Bl–…ó¶õp)“y½inó-U»í±W d·Øb uã7ª§Ÿ~º”° ¡ÄfÂ8מ={–ö=èàÁ ãmÓ|¾IÕëdD °tÎdÓ9ƒë¦í[¬Ó-“­L‘l¶N™µ¥‰)@cË5V`R†€P&y„F€:3Š¿h‘WàO Fì Hõ3 ñ»¹]Ž2ÅR/9–”: <ŒÍ:Úu2^q<³Ž(KމoüÏ1°Ò v”ÅíS¦ÎÐë%w-Áè^{íå… ÏŸ?_M:ÕX„#Mš4Ií¾û½ûl¯0¾6‹çW °N}x“ÎId™† ÉtÎdÒ9ã9Õ4ÀbÂZ~r¦6g”§“*htù¬Àš ¯~0Р“}Ä „7ÓÚ–¯r[ hÕ­¤ä½E™& ˱ì2ýB M/p±jlÐù•Åíݬµ,o3êÂKÕzë­çÝmÚ´Q'Ÿ|²š2eаçŠõdñ;¶cÙKÆ\Ég¨Ï˜qlªúyV† P:(@€­ƒè5h§0‹cʱý<°fˆs/¬=‰“y¾¢'q"Ä&A /HÊ.HÆ«C€m} °ÀÂsi)þ7=šW*¡Â2C¯ihÛÐ+ÛPN¹™Jñ»]–ßq%¤pŒíâéX–ewÌú†ñš¢v1•,„rýΙ ‘]Ðàµãµ‹£ `3n)±úYT€¼”²«Æ ’² ’ñê` °±B»¹ôL†a¥2ýÆÒš3%']pÄ6PÌ6@€Í¸¥ÄêgQòRÊ®/HÊ.HÆ«C€%Àư~ãF“4àåøú‹o+žXs›ü-3)'YW«˜`Ãë^ŒëN€u²”u®þN9™‰ +@^ Ö(Ѽ ‰Êûƒ` °±,BlÃÌú—1:H’c`)ówûï¸êÂr‹,¼Î¼Îf À:ÙRu®9N9™‰ +@^ Ö(ÑCõÑnHôˆSX'3Šë$39*@€u*ÉlMIŒÇʵX,Ö˜¸ŠÆ6‹m€m ê6@€u²£°N21“£XG¡˜ dQ,–K€e`HUÀøu{t*íµ¡£†Î(Ë#À:™GX'™˜ÉQ¬£PÌF²¨–›*Ã5J£‘eÑ“Æ6Í6€å©¨•®6‹&GÅ:`swIëzBغÊσSx À` °ô¾± ° d® `ã5êP:¶¢çøØ”]ܺ>ª†„ ZéÃò‹§: ~¾P sF=LÙô0ñºçºaÉ&Ìz-ëc™&ü/mà…å«”¹¬“:‹ü˜Ñ[òÈvx)±ô“]ʽõΩÞ6$ìg¶7û–kƒa‡úc© þ—²åœÍåªP· ö:ÚËY™eÉYi˜å<è\°!ÄN&F/ë³ìÛÑGiÒÛ×°ü²*åQŸ 6áÔ0™‰ P*v +c…yŠ'¼Ö¼ÖAmÀI—€žìÀœaàžÅ‰“n)=‹ð?ötâwù€'åa»”'eàÛ|M¨¬žöxÉ8ê'€*uÅÿR'œ'`ŒßÍðeÀ« ¡•^ôØùÁ*6 ]*ëH¨ȧ;%xZÓô±NOðx¦-\͹s*{üÖ~ce‡ýb»®xi!T¹7XÈ/ñîš¿a_¹ñ%|ßâÁ•‹À  rß½Ìá”ãu Çm ˜máÊ©²¶,¶)Ïr­pa±_M;2Ùÿ»DÚ&ÀÊßv"Ê…}‹oÓ!#ÑaˆíüP*PAj7­Ü\æMï °ò`0ß@ù¬À,¶XM˜ç…°RLXáuçugðoôÀËh9[¤€¢xd± v¬À¥í9Ø´AÑ„vD ”oª=Üö®ß&'¶8—™,d3åIW£@5+¡½q0½µ2nÕ¬‡<0d¼ªxQ¥ y@`{Ðzz`«¹ÂÙÛ‡K€e```ˆ´ `³g DPcQ»S Ò ÛÕœìÉÕc‡ ÀâöäQò›e›Wʰ!:‚SgT ß T°PÄ/ÌA`Õ|ƒ$ã DEBí ûö1Ç`óÝåì"5Zè GŠm€m€m€m€[ Âç,%RPÂw‘ÅtšÀ¶4íÖZÖ¶mq~Ôå¯\õžjjÚpî#›rÐOò¨ T€ P”)9ã(jc‹ååœxmëmám5a°†ÿõsÐûF’<ö˜OH/< *ûØQäðÉöÓ† oõlCþ[ïœZöy‡íæ¾&ÀÚÀ-u‘ãI½Íúaüo¶AxYíúËqEó¼pqz~ãº?æ/ZªÚ·oÿNÊú:V‡ P*@¨ȃë­·ÞÇËV¼Nˆå ¤ll±´:–ä7€¡À`ЄNs?äà!Bj‘ð·vk#&‹²½—Øxöƒ6ñ€JÙ€FO&r²VÀÞQÉ#ã|í}äüpnRÔûbŽ)Û°¯2\Î3xFU.´Øh£–ä¡ä9P*@¨ )S ©©éÍ[‹á•1ÄrêïEã5à5¨¶ øA˜_1€ÎôÔ"h%¿9‹±xrQ/?H(”zWKŠãÚ¡º•<°6<ãö8_ü†|Í~Y©[9PÍâäW8§ oÔ!ÄMSSÖݱ:T€ P*RÌ|&Ów‡Ü55ÙQ™Á-5•bEjS ÃFÍštÓmXzߨØbi®+“‰ÇRB‹D~Àkþ†cÀc*á¹åÂŒËA¸ßx× €µ½¹fø²Y`Ë-ÍS`³0VØOÓSOÿég?¯­wâÞT€ P*Po0}÷ÀzW"äñ¬X`ZÀõÇyð“/šùÁߪõ®p?zæØØ*µÓS4‰¼®ðdÚU?3óÔjkO2…zV°ö¸[S—J“B•«¿í•Îʽ¶é¦›}¬»É^ùê*y6T€ Pb)ð³n΂²–—9­9Î#k ž­ëYǦõ×_ÿ“7V¯‰Åû’ƒ‹õ$„± ÄÓ‚¼§~cc²¦‡SÊ0=˜]™àHBmÍcûzÚD™Ûí`ûx~c`m¬²lßãkn/°µ‚y=Úôâ¥+TÛ¶í>¨g§Æc;)Àåeœd ‰ÑŠ¡%ãiUà&]1¤<|òt.y¸‘œ&Û0—¸¨‡ÑÃcÆÔ•ºÖ» Ø¡Á¨üæ7nTÂoÍ1«2‰¶±Ÿ~øµZš€(cN~ÈcBf¥Iœ0;1ÊCÙ¢a=°rNß86þè–cH½PWÙo²y|¹fâ‘®÷5 sü‘çþ|ƒ 6IçÄBâRàh]0#êâQNj¶,5aàµÄÃÂüÀ«9­¥‘ã;hñf€#n3¹ŒGÅ$ÇÁ¾ƒJà˜(׬ B†Íö5ƒóÈ¢79áKž¹Ã Ä¢óaŒæ%± ° ¸¶¿I’t€9{ò$@›½&ªx@áEèõ{醲úìr”•^Ô¡>2q<¹Ø_<ºø6aÛüÊÄŠ× l/£ƒ}¤þö’>(ÛÌß+Müäª}’ùÉÓ¡© áÛd®,V…až^¬S®úlÍ¡t®…0ZÑU)æKµhÈf¨€¿ááßñÿË©À«™×ÑJ'‡ Óï8ø ÇÀ85!Ö®»ì“jÁY¹ð l¸á†OL¾ý^†s"¶¶ÈÛ€x#]`Êv³:™‘Ëù–Ëc¯Ÿ[KYIí{Ò©Ã>éСÃèð=÷HP±A]œ V+µ‡²í`—Š2ZÑE%æIµ|æƒâJ]cx6ÍO¥·aöÍSË›3³,©› ר›éa-ß;‘êfWUåúvë¾ù_’2txzïØŠÕ¦•–²A{±¬v¾",¼±•&…JÛýƒõÄ7ؠßuïÓXUÄ’R \$œågGãÙuƒÅŽ „ýèò=,Ž;Ñ,ÛúE Š3ÇF}ìáze(çfûÙõ–<8ÊDx¿œ«-iFFÚN%F+º´æIµ~KÜCóæÇÿå&°bpxØÐÜH(É`Mñ°¨°È[ÍÛ¨T_$Vî ôº}çý¸ù„Óf±>Å^ï|^ïJ“(É5G¨n¹¥fÊýž×öâ¢WZΡÃÛö›ú³?M½{fEå7جH°;Ë©Eˆ¼.Cáäxš(@ `Q¢a³âØ¥øˆ=-ÃáÄá"uÅþ°OMˆ;¿a|›6®l7µ0m\±›q,ÓF7ÏAŽkB,£S°‚A øy`åf“…|—['Vn8ß0BÉM&aĵ¬<¸è ºòÝÞ±c§›/¼ḩi1ŒX| ¯+¯+Û@~ÚÀ~ûú¨]»vÇd´Û+Zµý<•° Mï" Ñ=S#ñfÊoa`­’gҚͺúÙÓ¨£ ÚvÝÊ«ÌK°8GÛiã§YI[¹hwUNÏ×nüP;„¸Ò©cÜ ¸yÃŒYð{¨ø¬ Îö›9»îaT9½œù?­¯wì¸hâ“ÿE3?&¯%¯%ÛÛ@mQ;›l¶Ù5ùïss†€6Ó›»Î “¨+°6ÀÙÐXI¨Jyý¢ûÌü~ö§}.â1•:ØÛñ»ù[5+Ç}Ź„oóÃhÅÜÜ2Å=4bsbÜüxXà&ï+þ–·_2©“y¢ ¹Y$¦ÛýÖj•ý°MÂ1dÌþï©ùÐBä͘YWûd\1ÚqãÆ:=rÒɧsL,'ô‰|BŸ8Œh–I8cH¶ ¬\õžÚsÀÀ?cèI1ºÅÜœ¥í•0´£ËEÚ‰wÑ]ר¼´¬¬ÒQ ™ òf 7n@y[d{tÔ)yÃ,n^œ?ÅT ¹mÛvÀx¹dÌ•Š0›l¸Ã#©7ÛÛ@=Ú uøÙ#?Ç2k:\ø-†|1{Á|œµ =³çT‘è>‰ 4gÕd ¤JÔž„Ð ¬Év;ŠÐV ÇGyrØ–òÛì@ÙÏ,u¶áÙŽ`ˆË¹ùÍ–lÎfŒº˜‘Š8–œ/àV43ÏÁ¯®ŒVÌǽ³РàA`Þ¤aDÁ #ðŠïj˱‰7Iv¸F¥zá¸öZWa΃yó¡ÀÀ¶mÛ^˜ml\ïZ¤!ß?üS}zcÍD ØØØ2Ú¶ì¹õ§ò\Çóкîºm.Ó÷ÍGƳРÆLaQdHš¹ßoaÊŒ+o½êÅhŸ®(Ë¥T€ D @SË ¼Ü8E'.`¨,‚ P*P'zèã6Ïõ:Uƒ‡Yñ*VsñfŠcEþwC[Í1«Ý§ {ˆÑŠÕ^1îG¨ T€ P*@¨ðQ Lž½;X!¼a–xLòb nÊ$?8£“TœÇ¢T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@²¥f²Ä¬‘X£Úõƒüi\¾Áµþẏë±SOŒçD¨ T€ P*@¨@¶˜Ø)ª%°,ƒÒÉuyÔù±ÖŸëeO É([Î9 ,Ë>Aú L€¸,K1Ðõd¬|r¼J»'—¸\±Ê‹Èݨ T€ P*@¨ˆFF@£™+€–jÁKj—À¢®¨»í!Ø•Mû¼]ÎSÎ¥`£èfë‰ÿQOW‡~f+=Ê. °ÑÜo,… P*@¨ T€ P0=žâíÂö7µ@À(Œ‡Ò®J+žNûØá~òÈy üƒôP,ŒÐMÀú™^mè ÇÂ6—]ÇrðK€uQ“y¨ T€ P*@¨ȼ•Bvb]ËOŒ$¶ÜEpXxÅ»X),Xàß~Àˆ}P‘Çï­%Ï@‡–#×úË1ýÆ`Äd*@¨ T€ P*@²¯@%€d• —…Wp ¸BÂß~h,¼¥È¥N~ g×yû`_¿Ïà–ãË6ñÈ ØÉ8Tsó8ø=hœ®@.ÒQG”û¢‡‹ÇWê(:—+?`ECeáºVú˜šË5ªBŒëŽrQ~¹ëpHn¦T€ P*@¨ T€ +P `ìl(¨È˜N'ù߆#6”!$^H? 3ëc§L˘S‹ýÌcHMh´Ï»âœÌºúåµëPNy€¿htuL€5ÁÙé.WwÓãk†?ãøåZÎü®È[`%4Z¼Ý¢{¥1»AçÍíT€ P*@¨ T€ P_Ê, ­\¸+àÄöžJˆ­ .°Mâ¥5Ë7Ë’ú¬ c?+¨ËÁ£k1„‘pi?¯°„ðJÝý€1È jŠïš×Xì_Î{ëWžÒlzžMá-5?~!ã2íÝð|Ëu2C›åŇ‹—™·% T€ P*@¨ TÀYs¢ s<¨ ÅõãR•Bfzý<£(Ëï)P†mæ' €Øc6tÉ1ÍߣX¿Y“mm€Åv9WÔW>•Bš¡¿ý)§c'×Owüæ:.Û å†\ÛóQ*@¨ T€ P*@JKµˆ·RÀ€UnÌi9ÙüÂL`]AªRH3Ž+^a3Œ6 €•²m “ú›žÙ¨6hvg?€5½ž•V4)w [³ Ýý®m%·Œ –°nÞjT€ P*@¨ T€ Pšðsìd9,àV Æ/i{é*¬ r". e{/£X©«Zë «®aÁ~ç\î"ú,òÊØ`yÁàZGó8¶fâ ÷{Ñ€ýl€5C½åøö·‹—¹æÌ¨ T€ P*@¨(Ž•ÆÀMš$³Ú¢ ¤°Ø´¬Z[êÊ£ °™!ÛA­¬ÀÔ;Jˆw¹Y‘ƒ€:*€E9rýý¾ƒÎ‘Û© T€ P*@¨ Î Tòxú·xò[.&,ÀúÍÈä03ÇVFå…hâݼÊùØã8ýàÐo$¿‹ ùì ”üò–Xä5Ëñ«k±„é^.„Øö¸;78ž@Ê2¡O Í/ô¶ÀŠçPŽgžc%€­ä5Fˬ@¥ ÀæùJ=±ÝbƒÇ3Ëô»¶²]ô“o¿õnÍòÌk„kúúy³íubåÜñM€åó† P*@¨ T€ PÈ(UZÒÛí°RäÔ˜Þ=ä±Ãgíß¶ìS præñä8€¨rkŠ"O¹°]ñúí}Ê•´Íe ÑÇ–ü8<ÀeÐRE~ÚÛ ÀåúI]pü \#S3¿kk–#ûà8ø›ë¿©ÌíT€ P*@¨ T€ PŒ( Þj?¯fFNÕ¤T€ P*@¨ T€ P")PnÝ"iÀs¥T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨@±øÿw.G?ÓæIEND®B`‚manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/rabt.png0000664000175000017500000012764412301410454023262 0ustar chuckchuck00000000000000‰PNG  IHDR°š6¯»sRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?¯ IDATx^í½ ¸U™ÿŸÿ H ›dWV‡‘5ƒ"Š‘‘AЈ#‚,FAAq (ÃOˆ"£ˆ@•%È&‚D–@v1 (*:êàŒ3sþõmî{9÷¤ªëTuuuu÷§Ÿç<÷vÕ©sN}ë­®÷SïYÆãƒ(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(0d LLÎwÚH:.ù;+Ioó¶Ù>þ¾¢Z 6€ `åm`ÿ‘g͡޳fȽœ.      @ÖM2:q•Uæ¿ì_7ßêM/*}äÈ™Ûa§]F¿Ûvþ¾¬ °lèÜvßs¯?sìñnÿú‹éùêe–ùÏ +¯|mòlÜêÅ*@@@·ÉJ+­tÓJ+¯ü'9_z¥{þ÷/‘ÐÀ°l §6ðèâçÝYç\à·¯yÍkþV]uõï¼oÆÿˆ#ˆ#ˆ `Ø6Ðï6 ±[OÙöw«®ºêG†ï‰Î£    À€+°Újk|íÈó§~wXh?N76€ `Ø€o;î<íw&LØwÀ㜠    É„½u··ý§§À°l`Ðl@<­·þ†¿Mžê[ Ï“½§g:=©}VɤµåÇç´žòÛ „>=5*G¨C­ô`×~МÎGÀ°l@6°pÑ#n­×½î©ä¡Ê2;Ý÷,¦•„WAï̈æQ~{‘Ð'ˆȂ(ÐÇ $“6ýâÆy €WÖsİlhÐ’pZ×¼Ù4PP†[e—}Í‘ûíÿÞÿ BA„À°l`l`ò:ëý!yòo5ÜOÎPPúSñË/?áw÷?òä@¿q‡Œs<°lˆ³‹/½ÒM\e•ùýùئÕ(€(€(0Ä Œ?þ˜<äÏ8=qN:¡6€ `ƒaZëO9~‹4I¶IWƒ¶ @• ,õªWýí©ç^`™yÀ°l`(màÚæ¹ '>\å³uHË`‡ôÂ7ô´؆^š…+@–¨EÙ¨Ça;Ø606 eä4œ¦ã*°Ø@“`›t5h T©“8ဂÊ9`ÇØ6PÖŽ9öx—Wl;‡(0¨ °ÀG¯ZÔ Öe, ÀvÕ›Y7)}qÅ5ôÀ>ñô n½õ7Mú­ñ¿ëÿ*çÎ<ë¼Vùæ*ËíeYlÅwÅ¡À +À½|`Q7öW§ °,ÛUfh6´«í¦n?pÒnÿV°]½7)K€èöC‰ò±±¦Ø À°]õaØ‘ñªy«ªº+…Ý‹õýü‹.mEV-Ÿþ†¶›Ö…ØòŸ|Êl§nÆMùíiG¯v«‘ŠÕ9&)»ÏôÈr²ê¢üö×aÐõ™™ØŸXà"æÁBìdl€`Ø®ºElÀ nÕý÷#GÌtoÇÞ-ŽÑÿf›SåñS˜G]‡µÍïBlåpàŒÖ±Uw[îö3  ;-)3wL¶2ÝÜÕÛ‚ÂQ ^YIV%>l_½ìö‚òÑa¶€`»ê°9+¸ ÁRÑRm³ˆ©V¿U~ÔUÛü®W«n3›aï´–C$ °ÀÀ0?0ñÜO<éÔÖ’ ¤t òîå·Þz«[uÕUc³Î_¦ŽÙ³g»)S¦D·©h~\ô˜/ùËn©¥–ÂÞ¸çºfïx×»ûnâA|¶tûœz°Ó’Šo†P ! ¼,Ûí‡å×kcŠV±Hלl>ûØ9sæ¸ý8¨¯C÷Kbòø4GÆÔù2~Ïê}†ôê™ À6çæ£%½S`zRõŒÞUß?5°Í|0\{ýì/Ç•W€-æèT O=÷¢;þs_p‚Tƒ×iÓ¦¹›nºÉ}öÙníµ×ݾáF»‹/½’‡iMÓª¯u]å°,[ìw€-ïCT|äº)åÕ °ŸúÌ n½õ7“¶›º½{âéxövùÙ;(; ppµëœì¬Š).B¶˜£S%„̹ð7yò:£€ºÁ¸ .¸Àýò—¿M?þ¸;î¸ãÀ0šo×·ìî.z„‡i—¦U^ë:Ë`X¶Øï:á,ô.Km{ò)³[ÏYýõ³?rÄÌV4¶Îßña¬k öÖµÖr<òi€5¸cÛm[Qv¶7O¶˜£SÅCE“3 B-â*8¤>ýôÓ™é§?ý©{×»Þ5zÌøñãÝ‘3q.~ž‡* ;ÆX€-ö»ÀVæLLJR7à*?µ¬"¯8#ê™*ÈU´Vï{xñ˜cοèR7÷ªë[Û•G)Ì#_ByÒö«Líóý •©dÛÔµÙ@ÛÚâcÛÒêÕ6ÛæYç-q¾Ú§ˆ³êËj{¾PX;À°7ˆ0ÀVù;_¼,¶˜£Óɶ`óÃŽ3¾õ ƒr÷Þ{¯{æ™g¢ÒUW]å¶ÞzëÑ2&­¶šûÒigD=p;i;ÇÖg'j À°l±û€-î;d¡.À‹++íå‚jXEXP!8†¿ÇÊ'ÐU·bEfõWß}¸Ô6°å³.ÉþxZ몬¿Êûöwì=ú×ñÚî×­mJ¶Mûm›ýµèqX¯±Ö~Õé—á×¥ãunVN‘îô•u< ÀöUÄ€­ø§¾`ql1G§ì·&aòǹn¿ýöî†np¿úÕ¯J¥¯}íknÒ¤I£ «™‹5ƒqÙöq\=vP‡Î, À»ŸØ‚ŽCvö¾XAš€--b‚d¥Õw. jýnÇ…V–¾‡jûbVÀçGPU¦¶L+ŠÖcpmuYR ¸ë÷ À°le¿Åƒ_[ÌÑ) z›»Ùæ[¾1M SðY\ýã~øawÄG¸e–Yf´üéûì˲;CÞ¥€`Øb¿ële¾N߬EDÛ=ãAM‹Òê9¯ía £¥*3ܦˆ«¶¥E|cV Ù.Jkõ( ÎuŒ ×fYÖßjÛÁuQ¨H~€`Ç›‘Ü3+ûIà‚ØbŽNì±&YÚs¯½GÁRyôÑG»Å‹»gŸ}¶Ò´`Á·çž{¾RW2>V3k†ãØö’¯;vÐ ]X€-v?°•99} °‚:T»Éš¬›q˜'ܰz6øÝŒÃ.È1]ˆ‹¬µ3ìzlÝ¡c¢ÃÝ|¦u`§%eæŽÉV¦džj>Zûõ–ĘÄið'°êRâY‰%*ñÉQ€-æèäýx 5¹’&Yù1vÓ§Ow÷ÝwŸ{î¹çºš.¾øb·á†ŽÖ«Ž5Óq^›Ù_­ ôZO€`‹ÝÓle®R߬E'³ºõêw= `Eõ»Ç¬ÊT½²~bAm]M‹gÕkçd伕(«`Öœ<ë`‰ü–Šü°y·Vw÷°ÅvprÖ98MªdàºÙf›¹¹sçºçŸOf ®1xâ‰c–ÝÙ~ÇÝó²CÒµ€`Øb¿ële~F߬žíêÖ+x O€j݃Óf*Në‰MëBìû![÷bËãOeÛÊ¬Ž™i€íà~ ;ø‘W›Q€íàF©àP¶˜£“°š[C֞蘴zÉ¢ìXû®«sÊ[N¨ϱ^¬¬|U· ÀvhKl¤€l1G'íG{åUVi⪫®êŽ9æ÷ÄO¸^x¡QéüóÏw“'OÙ 7Ú¸'¨n<ô(s¬ °,[ìw€tò³ ÀòL)vÿT¥W/6ß´#s°l¤©de`#`;ÿ¡6€=ýôÓÝ¡‡êÖYg÷å/¹Q+ ~æ™gÜ'>ñ‰1Li¦d͘\Õˆr:·§N5`X¶Ø}ÀF: ùÙØ!™k¡ÓçTÚñ,“)•šLÉÆ¤Öý—1°ùO„næ`‹9:í"°_ýêWÝí·ßæ·Ï>û¸í¶ÛÎýèG?r¿ýío•zè¡VûF- ÕÌÉŒíܺñP/Z& À°Åîe¶2/€`K¿`Xöåñ¯J|r`‹9:1;þ|wçwºóÎ;Ïm¾ùæîŸþéŸÜ£>ê~÷»ß5*]ýõnÊ”)¯L>•Ì ¬™”‹ù;·¡*5`X¶Ø= ÀVæ*°li€­`úÓŸºË/¿<3UÑüìg?ë¾öµ¯õ¤¶Ó€leŽR°Å"{×]w¹… ºý×uk®¹¦;þøã[Ëéüþ÷¿oT:í´ÓÜjÞò?šQY3+W U”Õ¹ÅjÀ°l±û €-å>¤À°¥}¶f€=øàƒ[cÞ,éøßœUBìÖ[oívÛm·Jˬ²}EË`+{p”*€-æè”Ø{î¹§‘ýð‡?ì^ÿú×»ï|ç;îÅ_lTzòÉ'ÝÌ™3ÇŒÝÿ€ƒÜý¤41Ù˜ÌÃZég–®ÏŒb6Ýz° Ø[o½Õ}ìck¥SN9e еhíøÃÑ|:Æ\u»ûå^pÁ}Ðl¥?ô… `;(Ø,Ä6‰“µ.ÄŠÀÀÞwß}îþûïwsæÌq›l²‰;äCÜ/ùËFA¬ ú²Ë.sm´Ñ(ÈNž¼Ž›sá%8}ðÆ€m°7ß|³‹Igœq†K~£òª¼¢ùËsøá‡»7Þ8ºMEó«ME9î¸ãœzkô£Ãlm` »uÀöÁs§Šû¿W«~ï3«²è~]F§]VÀªè¬ºÿúÝŽ}@Õ~Û§®ÂÖÙÏ£íJµ‚YåÓ¶÷¼ç=­ÿ«î¶ÜÍ1[Õ]S®¶7û³ŸýÌ=øàƒîóŸÿ¼[k­µÜ©§žêþøÇ?6.|òÉn„ £ »ë[vw·-XÔ×ÎjÚ&—ÀfßÓoO–Úq§£Òö;ìè6J`16ÿÔ7oŸ¼ô‰Ï¯r‹³ívSÝ&oxct›¦$ãÙ7Þx“èüjS™cN9ýŒ¾þM`Ëù5À°eMmFràœ¼ƒ§UÙm`ÐVš–Ú&è4@4U´FÛô7̬«•¡cú) ÀæÝZÝÝÀv`™}ÝÊ+;­kX؇~Ø)R;cÆŒVDöÊ+¯tÿñÿѨôøã»~ðƒ£«·¤‡vËî4Ô©`;¿§›ü‚‚¶U}Øîú–À6ôYSõoQ¯"°lœYXEDÓÆÆZ´ÕXu/ö#ž\?âꬺ낫ÞnFI»Yv—vz¢‹Þ¼ðÉQ€íÜÊêB¬ñ®;&Ëèì¶Â n÷e–q'œpB*Àêþzì±Çœ–µÙyçÝ{ìá´VëŸþô§F%ÙÝi§ÆŒeÙÎí§j'€mÞ5©úS^µ×€m´«À°e tjràqy°mVPš°ávå 6Ìã¬À€Í3Mö·S€íÜ vÞ¼yîð$b¹ök^ã®K^0%7©{ßrËå¬"O<ñDk|ìúë¯ï>ùÉOºgŸ}ÖýùÏnTúÖ·¾5ºìÎøñã‰Ä6̹`;¿§ÄáÒ€m´ŸÀ6ìÓ­ßÇ.D`£ €-°F`Ø(Û$SØÎµ`·L–Ê9*»—Fàµ(Àþâ¿hõ¨8öØc[ «.ÈùË_“þû¿ÿ»Ú#·pÑ#}=þ­[ã^• Àv~O÷êÚQoo®Ûh7 €`»j l€µ®¾áÌÃYc`ýñ¬yc`Ãý:–1°]µõ*€íÜa ö€w½Ë¾ÔR­È«¥"XìâÅ‹ÝSO=å4ƒñAä¦Nê~ò“Ÿ¸ÿüÏÿìYú¯ÿú/÷?ÿó?î÷¿ÿ}ë7€íÜvº,l3¯K7®5eVs­ØF»5,ÛU`Û¬ ÒfÖ_u 'cR›uXû,¶µ›…XùäHúåúCusüjewi lW} `;w€Òº¿nÅÝs>À&c`?üá·k]ˆ}€Õ;Ï<óŒ»úê«[¿ûí·Ÿ{òÉ'k…Ø—^z©®Š¼Þ}÷Ý­¶|ãß`êT°ÝÓs¯ºÞ}ê3'Ы ¡öÝ h`íÕÌZuÕI/m¾Õ›^$ ¶t!îá:°š8ÃêCž"°‚K-¥“¶ÔU>[n'\VÇ…Çj)•«”¶¾l Ù­2ØÞ>8†`å¬vÚ6m§yd«±E`³ï\~y·ó[´fög!¶IœÚì¯~õ+÷ÜsϹ³Î:Ë­½öڭ߀_|Ñ .»•þú׿º¿ýíoîÿþïÿœ úG?ú‘ÓÄTlg€Ô Ü/€íìú^×[ƒØœ1ðçXä>`+óC&&%å.[R°6•© ið5ت mT’làœ6‰S·À±)å°•Ü¥ v€¼n¿ãÎAlÀª»ïW_Ý=<…½7ù+ˆÕ¤N›'ëªrÀ­e²´ŒN€}þùçÝÓO?í>ñ‰O¸ 7ÜÐ]vÙeN Yu2pUwa½D»ùæ›Ø>‰H°ý°·/¼¿˜ú[Äbóek#li"Û§îÀ¾­)ïÛß±·óÛ¨6ù0luøÇ…åêø^ÙpÝõ°•ù!leRRP] °lÀØ´:º°3ßY—ñ÷s=ì+ÎnYˆm°çwžÛf…Ƭû±Cu;­´’KúY¹£˜ýtÒ¸,À b-ýÛ¿ý››‹º2¶3hM9mÄ $}˜| õWy Nmâ'Ál8†V)ˆ´zôÖ8[A®_¯žV¶Õmmñ!Ö@Tu¨m:^ÿûõ§AºÎS`kõ ‚ëÉ^ÕÀVæÅ°•IIA( 6W À–Š `/Hœ7î[‰‘ÉŽªH*«ÊòªhS#ËXn¹ån9ò£G·Òõî¬s.p›m¾…ûáo‹vÞÚ¬ºö¾eÊ·Úßý;á„Fg!ž3gŽÛ"[EaÕ•ø(ýíoë¤õYMy“M6q?øÁZÕ¬$pÕGãg5³± ºöâK¯tÇî Ñ׫WNr]õí f î~‚Ì0) £žKD[ ê÷Ô®yXÎ}/n;¾5« ±Êñ£±*?Œ¦Zû-Úk­6 ‡›vNuÙkê`ó\üèýl´Td¬AI¹“аli€Ý5y°®õòÀÿ›+J*«ÊòªjWãÊYj©¥~³Á†·&2"½¬Á¤ÕVsÛï°s4åìÅ_ì–IÖ…õVãKwK^Þ\1…=þ“Ÿ, °›•î¸ã·ë®»º·½ímîç?ÿùˆµˆ«Àö׿þµÓR=še¸ŸVÑr9kO^§ ºnMp„›Ð¶s€•M CPÕõµ®»Õ´hªQõÐ"´El# `³ 6Üž5‹²Ñaí楃ØÊP€­LJ ª@¶iÝn©=]êB<+1|%>9 Ð…øg÷ÑÅÏ·@èÄ“N-Cy{Ï=÷¸Cÿå_Ü\0fØË/¿¼…ÕÙM_÷ºB«±®~z÷ž{º+®¸bÌ6íÿîw¿Ûš­ø¸ãŽku5VWa¥?ÿùÏ-xÕò<ý°O=÷b+B¾ë[v]ƒV À.9£ªò"ÀDÞW44¸Sd3Œ¤À*â*pô“?c°UWÜ¢ÝqØÎ^Bµg¶2W €­LJ ª@(€šT”ªù$aªi·$ŽÉ­k­U*ª7H€7èçÀVsÏ”-€}ÙQºmÁ"·õ”mÇtû‹u‚bV“"iÖàû5ÆÔÖštõM~ïÜÞ Èj=guV4tñâÅ­É”—Ï<óL 4µ¬&qx†iÇÍ6s›'ã_Óö© M 5mÚ´VwaëvÜ/«kóþ|ÈMHºZ°†‰À¾âðí ~ü褭—êwÇínkÝ}õ×ïNó»’°êvœÖ¥¹Š.ÄYÑæ˜¶B¶¬±ÄqleRRP D¬ê_Ae­"`]ɃϘ†`ŒŠÊ õ¿º‡YM¾²Ûn»®¬º+¯¢¹ÚïGWCÈU×aåñ»«.‹[ý>Àª £<þ~†>øàÑ2T^¸OÛçíÊ(×EóÒ…¸¾;9­&¶sÇ¿S€Õ=£et>ºì²îë „¸Ï>£ûøã»6Þ¸§>¸*Ÿ´ÉšQ£û™=÷Üs[?YºóÎ;[ëo³ÿûeb9Ä÷?òdkl¬º}ûÝŒØÎí¸Ÿ€£ÚÚ«n¸lñ{€­Ì`+“’‚*P€Àü {>¨…QƒS'€µcüq«!À†ß•Ê/86¶:Už"Ú/@UÒwýõÛeଲ­ «îp¢'ƒX+SëŸ[§0Zæx¶‚[´ƒ"ØÎº*ö±Çs‡Î˜áþ5ÕÓ“ôÁþg÷ù$*{T³!¼Î>餄þüç?w³Ž?¾²Ç.·Ü˜|ŠÂžsÎ9£°j+XMKZºGmx衇ZËüh¦sA´ŽÓo×Í7ßì~ô£¹ë®»®5ÑÔ7¾ñÑî¼ =<&Ña °þ´¶'bì¡“¯É<¬|P  À ê, ò|P,°á,Ä*Ë" y«È«¢ªÙpb§´1°á6u-¶®Ï6£²Ej­<‹À–ϪŽ`{{“°;\UìwÜÑŠ¾¾˜Ü÷–Yf̤M‚XE^ ^ýHª@v³¤{pâ5ŒB¬Ea-Ÿ¬¢ºi €íÜ€4ÄúÓØÞú!ÔŽ]R `Õm`fU èå2:‚>E*˜iKÚt °6V6`ˆê:l ê±X‹®úKÙÿ~6ì]˜Æ–ÀVu×”+€íÜáŠXE/ï½÷^§ ›áüÙÏ~æ|ðA§ett¯(ú©ñ®ï¿+{r0ÆUðzú¿˜=UDuû7¾q À x…ýæ7¿Ù:Fp¬ß¶Å‹§&¶s;^ÐèO`Ëù… W `§UÙm ×ÛnýÖxÌš…ØŸ *` •×À:kâ°]íº ° ¿åjl۹Õ°ÇÌœéVL"ªÿïÿý¿\€7ož›œ¬õv¼fEOµ=XEc7J¢¹gΞÝ:îöÛooý†T³R?v!Ö2HZËW³Û2¢á0Ú[£ÓAU(PŸ¬M´q¤¹—i4)º©±d6{±º+ªªïykù”Weøãgc Ú–ÿñËQY~Ty€»OO´–áòÉQ€íÜaÍØóÎ;ϽaÍ5ÝQãÇ»½“t '䬢´®²ÊèÄLмžö…/¸'Ÿ|²m2€Õ„NZSVßüãsÛm·µ~CÔÝ8+õ ÀjYO:Õiæaý¦*1 qçv<ŒàÂ9c7²W R©ÉY—wfCZ÷\ûN˜dPk@)Ÿœ,ù<€µ®Ã~]þ,ÄáMiPë×oåø]‘`óì•ý# °;p!Àª»ð{öØÃm“táÕXÔÖøÕd’¥<€U·âØ|óÑ®Àþ’:Y]m»€u¿¥—nÁïgœ±D7a›DNÚ.5u§ko˜ç9ì·Á†§® ÀvnÇÀ« °¸D(0¼ À¶ë>ìGbÙT”Óïæëï·r4a’à2œˆÉ¢°iÇøeêØ°MŠÈÆlS9ªWíd‡Çh_Z»bǯV‘¯Kc`‡÷N,xælçNk°ëMšäN_j©±Ý€#öÀ½ör_K"µÖ}XÑTÍ0¬q²íºþjß¾{îé>òþ÷»E‹¥æýÉO~ÒŠÀ>úè£mSSÖ_"gÒ¤ÕR¡Õ¢¯D`;·áaÎÛ![Ði ; ˜°U@e<2f=Úvz°½ý%`;wàB€=à]ï* °Ÿ8ôP÷É$bkðzH2v~°Zçõ3Ççžx≎’ÆÖ†½6ÒîË^¬ÀõÈ™Ç䫯üÿr7j?i}\à¬ó{ ‡CC"°½õC¨z©›D;×â°½¼mÇ`;wÐB€½æškÜÚ €¾ä-ksHY=ô_þ%u ì¿'-½s„Qx=)9vêV[µfVäM_÷º¶cWÛkµ}·ÜrK `¨y© ëÀjmYsÝzʶQ0KâWìXθðÕù½†Ã¡!Û[?„ÚQ — °l)€`{yÛ°U8¨i“8…QX­íºo¤û¿ímnáÂ…£Ëè|÷»ßu;­´Ò(ì^œŒcÝ{ç[Kì¬@­º d¿þõ¯w±°*7/5`ýërÿ#O¶fÞs¯wºe–ÏØdÒ™vv ÀtUñÛE/Û [™21)iNe¥Q Ô  À°5ÜhUWA¶sg7 `¯½öÚ%¢°êüïI$vÓ5ÖpW_}µ»á†Ü”d¼¬ UûÔexû6r<ð@k¢¥ì·_kMX‹ÂæMÀÔnÿÍ7ßÜŠÀj¢¨¼Ô4€õ쬱±D`‰Àcÿ– «†ležÅºII‹++‚P X€­áF«º ¶s§/k÷NŸ>:ök ˆî›t ¬>œ¤Í“èêÖI×`›¥8yâ»ÍV[ÍÝ}÷Ý£³_yå•n‹‘q±Û®¸¢»ä’KÚÎ °‚Ó˜¤‰âîºë.7þüÖÒ[`Í®|Ýu×µàûßøÆh$TÝ}{åøÎ½êúÖìÄûpPÏÚЫsϪ—lç÷tÓ®)íéî5`+ó,ØÊ¤¤ º`X¶®»­ÂzØÎ£,€ìÙXX-£³G²´ŽºîÕ¯vêR,UäUÿoóÚ×¶à0œ%øï×[¯™½"I»o·]î,ÂY³ ßtÓM­¬f)ŽIý°8öKÚ/Ûù=] —†leN[™”T—,Û$€‘þ̺Œ¿Ÿë`;wÔ²VÌ÷¾ûÝ­(¬­«m9ø`7yùåÝw(ÕDOïLÖn=çk_K½fÍšå4”@WÑØ+®¸¢Ô}öãÿ¸°÷Þ{oT`;·‹^AÛ¿×®W63ìõ°•y1leRRP ˆMgç•À°¥ë.Mâ4+1X%>9 °;»íVë,+ ûOÉr8'œpÂè,ÄšTéÉdM«%púéd­g­õ𖼓’è­@7™ÃýcÅÍÊÛnû7ÞØXilê‡.ÄÃîx§?Ûù=] —†le®[™”T fåN*À°lw[ÝE°;j°Ÿþô§Ýí·ßÞ7zçw¶ÆjÆá÷ÙÇÿÿþ¿1«‰”’úÀZÑvKÛì³ûî-xÄ*rû“Ÿü$w)œ°<_ÀjŒml`;·^€۟׭¶BÌB\±ÏÀV,(Åu¤Ëú®Å×wÕŒlG7gǰ;»;íò£í¸ãŽnîܹcV³ /“LâäG`°yËÙØþo}ë[n§‘Éœ4+ñû÷Ý7úX+CmÀ ¨cÛ¹môØþ¼n½°ê`;v"ÆÐ-€UŒ4øÈ~ªüDìÔ¤Ædž‘j>7'é–$ÚpëZk•ŠêÅÂùº¦±Ú°ÕÜ3eK`;wvµNéÞÓ÷…ØW%ù¾÷½ÏÝvÛm-X¼çž{Ü'fÎt\pÁhâ"ÛZ6Y+V3+ ;)鎬¯ÀoÝi§ÖÌÀYPl+(-’š> 18“8aÿ† »†t!.ëA,q\7v檫Nzió­Þô"i°5XúÕ¯þKe–ørAQ«Œã«ª€í=XÆh§ùتîšrå°Õ9ZÒe“7¼qd'$3ŸtÒI-€Uwau¾ï¾ûZËØä­ÅîÿØ‘Gºc“Ù‹5™ÓQ À{ôÑcʘºÉ&nƒUWmÕ•Vöõ×_ߊÀ |‹$¶:û¨ ˆÀöß5«Ë6¨'Ý6ØrþCÊQÝØYº>Øîàÿ®%ö$ÿ©ÊO4ÀVV) ÀvhL³’ã•øä(ÀVÿPøÒig¸‰IÄtäÇØ½á opçŸþ€}à\‘¤uX'ÌF¬µd7œ8Ñ]tÑE£e`·L"¿Ç~üã©åÀÞqÇ®hjò:°85D`±êÆMS¶2W €ý=÷cÙߨŸ¬¸¢»ûˆ#H¬º‰ß’8äÉK‹*€|°ÝyØ<ºøywÈaGŒB¬~˜÷Þ{o§ål-“vÞb wÝÈÚ±êN¼cÉÇuT«¬í6Þ¸µ,ÏÉ6M$–¯ÙÕ¾¢ €íŽ”}ÈçG¶¿®WÞõd÷¯'é0äg`ØÒÑòAØ©‚Òðhìqù¿Ñ9ØH©Øî:G·-XävÚe×Q]6éþûÑ~´5U݉‹¤Ù³g»m’%yîX‡=2ù¾ÛÖ[»7Nž¬—@N“<½ïÿq‰r¯»îºÀ FË$E€5“±Ê¹úê«[ãm-¼pÑ#¥X8çÕÛ[½¦Øé`k ÀF: ùÙX¶´?0«{$×ÙIº™4\‘8ß›äÿ6Fç`#¥`ëqÌ.¾ôJ·Îº¯…¾I“&9é¢E‹ %óƤ×Â;“åt dç.½´›¤ù#“d_‰Êj’§d:x÷Öm¶SÞµ×^ÛØyóæ•Jl}vkOYùØìkõáÃŽt;î´sTÚ~‡ÝvÛMÊ«2‹æ/sÌÔ7oï¦l³]t›ŠæW›ÊóË®,í´vjïUÀF: ùÙX¶ôoÁÀD`óïr Û© °õƒ‰–Ý9ð}3>ö½ï}o+2Zb•×@vß$êª.ÅX¥Í“íw¿ûÝÑò `¢e]ˆë·•2Î9›}ä ÅÚÿg$“±%“¥u+¿Ê-ZÇá‡î6NƻǶ©h~•[ô˜ãŽ;ÎíÀA¥Ö26^õ1l§žÄèñ“ÿ’u•~˜…xH €­ô¾¡°>U€lä…`{%×Þ0Ïm±å›FAv¹å–sG'Ëãh)œ¢i«õÖk5€U÷bu5¶r4nUXM"U6°½³•";Û`]äGcÅWM–¦Šýͯr‹£!S¦L‰mRk˜B‘ü*¸è1sæÌ`#Ÿ·d+¥ À–2œä Iš“w°º ø @`#¯Û{(9ëœ ÜÊ+¯2 ²ë¯¿¾;ûì³ A¬f!öV «q²§vZ«Øo¼Ñ•Mlïm%dXÖ·(Œ°‘N²Õ© À–µ·(€–”^u·² æ8`#m€m”h|¬Àcéd2¦äÒµÒÎ;ï쮺ê*w÷Ýwç¦m7Úh Àž—ÌF¼Á„ îÌd¬¬ŽW9ŠÀ B;I6 ñ¾ðf!n¨SÀ°l±ßuºG: ½ÉÀ6ôYóBµHžß§J+`«T“²jQ`zR‹ —OŽl1G§Èq™¼Z’f·í5 ‡¯J@tÆŒ­µ[.\˜™ `Õux›d<컦MsZûÕŽ1€½þúë]'é²Ë.s{î¹ç+³)¯¶š|—9WŽéŽí°,[ìÞ`í*°lYš˜»D'زòr ôP¶˜£Stͽêz·ÉÞ8 Š’hê¿øÅÖú±iI»G½}ÚkºóÎ;o‰Cï{xñ(ܦ=ß´_Û-Ÿ}÷ó¦¬åï0wû9 ÀV|çP ²l1G§Û?àEËW·Þ}÷;`ÌøØýöÛ¯µTŽ–Þ±¤åo° ë§9sæ¸vÚiôøñÉ8×#gã4î¶h[Èß [`X¶Ø½ÀVæå°#ãUÛ¬"³ên¬<ÖíX]‚í*ðÕ¾œ1¦K²ŸGðv!¶r­L•ÑOÏe¶²û‚P`ð`‹9:M}¨›ï[¾iD—[n9wÔQG¹Ûn»­•.½ôÒÀj,¬Ò÷¾÷='Ðõǹîú–ÝYgf{`X¶Øï:[™¯Àæ¬àT€éGO T}€Ìi»m ó„«¥÷tŒ¹Ú¦ú,ií`+»)¨˜‘´}f·¿¶¦°Å¦? Î:ç‚d ë*£ ûú׿ÞqÆ£«Hì'?ùI·ZÒExäaá6Ühcwñ¥WöÕƒ®éס—í`X¶Øï:[™ËÀæ¬àÕSë&¬íŸ¹þsD]‚ýˆk°*³ß"®ás² ;-)svžu+ÓÍy™Ø5)0+©G‰OŽl1G§—`[·ºÿªðÒK/ýÊdL[oí&Mšä6Ûl³Ñm&Ll£-—|ýa+, À»WØÊ\%6`ÓÆÆ `m{Àêù«<'Ÿ2»õÌVð À.aÇ fåN*ÀVvÿSP °‘"°Å~‚¸…‹q{¼m¯1ãcGÞpº÷àC,‹3Ý…Óì€`Øb¿ël¤ÃŸ €í` NÛ¬º °ù†8’€–ŠŒMQ€¼l1G§ŸÖÚªÞë×[¿²[OÙÖÝ8oQ×…W]s€`‹ý®°‘C~66`%}û;öó >ÿ¢K[Ïg›98mœ¬­ûjy¬MàÔ>еyä{¾•ÅçˆØ­’ò®ˆ/“œ(ÐUØHyØbŽN??hûp\k€`‹Ýël¤ÃŸ €ÍXƒUY„ LÃq±XÁœ‹Õ1úßß`µ|Ž•ã—ÛOËéô `óÍš(PŸl¤Öl1GD¯¦Û À°Å~§ØH‡!?;°Të>3¤¶„NÚr;ÜZ¾pâ'©öù€ªÞV~¹þŒÄMn©}lþ FŽÁW€¼Æl1G§´q¸¯) À°Å~ØH‡!?[§ûõ¤Š·ÕÌÒõ¦çZÖØA׀ͿÁÈ1ø °‘×€-æè ú„óë{`X¶Ø} ÀF: ùÙ:ØñIŽèC,;Às6ø>›ƒ‘cð`#¯1[ÌÑðЫé6À°l±ß)6ÒaÈÏÖ)Àª†b‡`Õõ8œè©éÏ*ÚÀæß`ä|ØÈk Àstªø‘¦ 4ï¦ °,[ì7€tò³U°!ÄÀvóùÐ䲨üŒƒ¯yØbŽN“üi×’etÚÛ€¤ØÏ­·ÞêV]uÕØì®h~\ô˜Ù³g»)S¦D·©h~\ô˜9sæ¸ý8¨¯Ç(°‘C~¶ªÖ‡ØoÛØa}–°ù79_6ò°@ϰ>,õ¼åì­=y·ýŽ;tzÃ7+|~l>ûØ/ùËn5Ö,|-šdŸº_ÛXœ¤›Iipk¢ß‹j8/)ë¯ïØûÝ}ý‚dPŸ5UŸ鸓m `#//;øûÄÓ/´Ö›«úaÓIyMkO'çÒ´c]ü¼Ór ƒžæ\ø½ÂçÀV°ŠÀîú–Ý _‹&Ùç~ÿ| ö[IšFj”º&OñÑ£õüjÚoþ ´§W«n‚>(ÐØÈ«À>À6mJþpöAyørÍ¿—Øî,]ˆ#¸d‹U`b’Q³·|9º7ÿ·µŠç_¯Vo­Ôõ‚ 4A6ò*°Íz0Ü÷ðâÖìƒú[ÅAe°ÍºÆU]WÊ)~]X6í¾a l¤ÃPO¶M’jæ'iúHu,Ë蔵¼Ésò`ób è‡O†Ë'G¶¸ÜMpèFt€mÖ5î¦ýP6“8ùˆZt<+“8á2ôX­ÿ*xÄÚ€`ËšåÔäÀãò`ób? 4Pºöü‹.u8ÃݾðþV„q»©Û·"ƒ¡Ã­õÏ´OIùÃH¤ŽU:Vyì»Eí¸4Gþ#GÌ-[õ„y/Ú¬^Õ–¡1]vnú{æYç)+OíW¹‰™Œ¶SÛÔ;Vm ÏQI+Ó-lWY€Uv>víL›´óó¯…òùzøÇ¤k¿µ;íZK?_Oå÷¯®§ÎÕtÓù‡šûvaúè¯éªý²'k‡êó÷„ƒÿº·b?Eg.šŸYˆ›c[D`ãœìŸ´dbКZVÏ=#Ö[ƒÑ¤g Ï‚î߯ú}î…%°½P:Q CêXƒ(=ô0PÒ•ÿ`P„ Ä"zøog0nÓ÷ppƒ]Á“Ê·vXÙUiPíC›Ž4hÛdD*ÓÎGÛìüüòò4Ð9ûº(¿àÊïlí–N_Ú¦ï:^í Ï¿ ÀšF*Oç£òU®½P°2í» i¥¶H»žþ~ÓÚÚmeù×Zyt ³êW=>ší¨N@ €ýënZùúIwåQ9a8.Ýw\êÒ€ÍÇ÷¢Q[–Ñéð!Ìáy Ô°þsKÏ%óE˜x°ûÏ6ïV`?  À¨u¬ÿ4b²œWåõ¡Iù zBPòAØàËòÔùõ„y  Ã6i»~\Ã( å XÛÃó3Pk§AVâ,°2@óÛ+€S[Cø- :ŸæÃmöR@šhŸ¯Ú¾D0صsô#Åjsx­Ãöj¿aµ·ä~¾0:­c|½­ÍþK‰°!ŒѼÝwr:Ñ€`Óì‡l£¢ÚÖ^ŠÆüÆèy§gKšÏ gõæ2nçç„p¬2ÃÞ`*3ì‘fu[[ü:Ò¶¥íO›oÃÊÕ¾:Á€mô}HãP Y Ô °þhVdÐ~ð-ªéÿ€†•|¾aäTõY²Hg°†›vQÌ,è ·§•nk°i°Ÿšú E#°ëÒÈ×,ŒìÚÛê4xÏj—Ú–uÍôðJ{›=¤t¡ãGdÓW럧¾·{±ãܧ™ À°l³|ˆÖÔ°öÜK†ÚŒ=#ì%ªþú¾„¾Û%늬m>”ÚK[+Ñš6Ä*|¶Y4ëeõXï%¿Þ°ý~»Ã €ùöâßzÕñL`#î²  ¼¬@“ÖÿA·ëjʬýXûbÿÇþ(À`†+ Âñ¡ººFáC¿S€Õ›_ÿ!kcƒý¨pÚÛòp›9ÖÅ8Œ”‡¶‘6F:ÖVÈ×LxÕu`X¶«ÞÏĤôÜY_ ¶ €µæy/µ­×“Á¨þ†={ìÙeÏ{©>¿üžc~$4`õ{fÉÔƒV+×êõŸgaï(Ëã?휬gÝzÆ°ï ²£À0+Ð$€µ·–ö㘩+°Ut Më†líÌê^vSîV6œ`BBédŸ¢Øðø¬‡•½aNÓ7-*Û×:«Ì"kç!è6@ »Kµkg·Ò”Û;À`X¶«ϺIé‹+®¡€yNÚ3%|akÏz{¾Ä¼\ {ùv °aO¡¬zýçfØ+Iõ†¾W¯ž‹lÅwÅ¡À +Ð$€ 4UD`íÍjø61„ýÈg½q´—ÿ YD2|›öV6`}èŠénN®¤cl›ó`NëbŽ ÇÏ$Û›_¤­ þ[jÿ­px]C€ Ä6ζÀú]¡u|ÚX¤°vŽy³Q¡½ƒÐN´`X¶«M߬ùí"°±Ã…bVeY¤6œ…¿[kí·«ÿ×Þ·ëAÕÉïoÞ±lWïM GÁR IkãRíÇߺv:Ö‡:+;Ïi?ìáÄEþnØ>?ÊiÝp´Íïúš¡ipéo]ÚAh8žEmð5+°áùXöpß[„ÚŸü!l—= cÆÀ¦u'/3Ö«”u]®ÊuŽùÉ{ ³¿ZP`X¶«>LßlÌØ<€õŸz®„QÕp›öë»=ƒ,·6ob&¶«÷…£@[f&{•øä(PÀ*¢þh¦m³5^íA£ïat,íÇ8m›ÊÔÖf ™¼uåW:>+R«íÆ´²b5P=*ǯ#íX¿ýª/+‚œwl;HòËõ#ÖYš‡×Æ–ö ßjÇ\3ÕçkĮ̂ïa¹þ6ëÖå·É¢ÅáWµGÛtLÞ( ²Z¨¬SO€`»êõ-ÀZWÚ4ÈÔsÞ#ip®8 Ÿß~¯¤´ãíåx»žU±]ˆÛ½¨Wù°Ó]gçYü´$ÃÍy™Ø5)0+©G‰OC¶N'•ºú0ª¼vyöÖ"ËuONQåyQVyû`X¶«nQ_¬=,ªgˆÁ©½tµïö¢Zõ».ñ–Uï Úû±îÌö×ÀÔ/3«÷VZ½>°ZûÕ^V£mþ‹ÛØɵÉT €íêo…T€¬®,Nwy§»Jílr$ÿèÿo3VYg¯ÊJ›É¸W“SôJê{ß°,é”ËÖ×k½¬ì9iCÂÙ÷„þГp¿ŽŸ¥á6¿ ÁlØóÇ@Sõ¨|Á¬_¦¾‡óT蘴zÃGaûÃcTn/fâ×ïs9³Ë< €­XPŠë¾l¤Æl3À².аÅÍíÍkøw»Ïúç\—ÎÔÓÌû €`ØHç \¶¾X~»{÷ÛÝ+€Ý*©øŠröÎQ(P¹l¤¤lï~¬yP¢=6P¯ °,é”ËÀ&cHù]/§A¯¶œ©s tG6RW¶Ü-(tÃúÏX€tÊe`ØÒÀ–»é8j°`#¯'ÛN8àÄ5ÃÊÙ À°‘ÎA¹l, À–»w8 Z °‘†À–s„tÃúÏX€tÊe`X¶Ü½ÃQ(À±¶ÿœpÀ‰k† ”³€`‹x…ó°,[ø¶áxE"°‘ÖÀ¶w„5-½¿¾àÒ.oÁtôŒ×­:× €`ØHç \6€`ËÝ;…D`‹Ø›íkQ­7ˆKË”…!­I®=—U–i—~eëâ¸Î YvÊ”)ùÔêå˜={¶+rÌœ9sÜþÔ×÷ù1ǯõ'õâ›Og °léß½`ìÌü–8zF²eNÅeR tU"°‘ò°Ù …¼C¼Å\/à¡Ûí“&E8—~D°Ñ^Ü iuN˜0QRtZj©WEçU¹Eó—=¦È9Ô‘÷ÃŽ,í´6Á6ØH‡!?ÛÄ$ËÍù٠嘥ëÓ;¡ Ý}–°…î 2¨lä…`³mT7Xÿ¡¥mJM}5­}8£ÑÀßÔëH»ºë(¡/úú6ÀF: ½ÉÀIT·W«n‚>(ÐØÈ«Àf;rŠª¬9:únÑ ýoÑYAc8ÖÓºÏ>ñô ­.ÈÊ{ÃMw´à׎ #“áþ´H¯êñÛä;aYí³<‚I«[Ã6[U¯å >-Âë—«óó»\§E±àÀšdl¤ÃЛl,[Öòf$æv!žÖ…neÌq(ÀFÚ›îH &«úkŽ–¦ Mÿ¨ ò®Ƃ:‹ÔZYÖVß ú4ÎVåÛx[F}Ç›Z9Y#eµOå[dÖÚ,°ÔùùmЪòUVZž0ÂkçqþE—¶ÎAÇúí³63Ž`i°Ðì‘l¤“Ðûl,[Ö زÊq\ϘžÔ¬Ä'G6`Ó& Jë¢+8 †ÑZƒCƒ8A^15ØÓß°¾´I| NsÂÓÚ§rÔ¾"Mõ»C«þ¼.Óaùyc\Ó^À6€ 4ɈÀ6ÚU`ز:59ðмƒ‰Àæ)Ä~h lº#™”~$3t¾vŠFj{žYçc]‹ íoÎsúÒ6ë\ÂíiÝŠÃ<~ù1p“'ïœØì`Ø@7m€m sòJ“X¶« ÀvU^ Gî(À¦;†YKÀdM’äõ ljƬuMV~?qÜXý"öB^ìxɰÝñ/**uÖö;îܺF¤ÁÖ@/ì+²™BŰ…ä"3 4C6~ l»¬ö)‚i ëw'ŽéBœ6޶Œc™°êʬ¶Ùx[+·Ó.Ä6n·Ý29ŒÊØ1Ç`7uÚÛ $£[%Ûg‘†Bƒ™½°D¶ªS' t¨? ±*MÍ.œ„þìÄþäOz³èÏjNâdãTý5VU¾?nÖ&vj·kVûT·€ÕÚl4ùåÛ,ÆV¾ÀTíö5dƒ`¿\ëJ­ógb@¤NÑ Ý›uÖÙouå£ï·ó©¢½l‡Ž‡£@+ÀöñÅ£éëÛ`ÃIlIœ4P5õNΕE! .³–ѱH©¿Ô_¿ÕÝ`³Ú.Ñ“6ù’¶ÙìɱËè¨>i ýïGŸýÙ˜«p4) ngiã¸ÍfôÒ(œ.ö“ž~ÿ°•ù@“’r—-©¬6 B `+‘"P nØl h×­7Œ¾ÊL›ØX‹|äE@Tv§ËΤµOmiW¶ïü­_å¦Õiݪclò¨Ø@V¿ዯNÎ)L¸ª^%;nÚÛ‹.BH•å_ŸÒ šU—’-Ç•vM `m¼»µWÇøùóÚjZ©>•) Ú‚o+SíÒÿeÇëÖi“le®[™”TÓ“2fç•Àæ)Äþ:˜•T¦Ä€eFÒ!Y½N‡—ºš ýwQ>h¦Íˆmk×3Œš†ûiy=2Òº[Ýþ Éúßïõ`pÎDöŒǪ ¨ý‰ÓÚÙfZL•éÏ$®ïíÚêÖIâ¡u=Ø¡½ô<ñI«r'`y톶Qlä¥'ÛLG@âº`ÅmÀº­êo%µH¥Ô’"‡@릂™Ž » ç]—4€ÍëoÏŸvÃõáÐ@=¯M~”WçëkÖÓV‹ÀJŸ4x`#Àƒ— €¼kÚÏgÀöóÕÒ¶°‘€-î$Ç:‹äC[l ^°®±ú޽´.»>ÀÚÿa]¬AZltÓâfuÉ í!`ý±´MX‡ô±nÚ:¬.ù¼lìà]Ó~>£(€Ý*9Ã+úù,iû@)ÀF^N¶^ Aol {6`à$U¤Ñ»&±òÙR5áøÓ˜k—ÁTy!Ôª,´³"°! úßÓºEÇt!ni«_‡u…ö#Õläxð²°ƒwMûùŒ¢¶ŸO¶žlä5`»çLÇ8»äAl :H›ÄÉÆvÔ†Õp^[þ)-Škc`ýñ¡áõKå4 Ûc+еò¨q¿Íþ9Z”Ø—+h7p×__$‡ÛbÚÎŒŽNÓ®ÉvÎ$N‘C~66_#rÔ§[ŸÖÔT‘l¤luÎs“4ÚÆu#a4Ôïòj]ŠÓ–ÎQ9ÇÔòf!–Æ‚F›V·¿ÝŸÝXÇùkcy•?ƒž£ãµú´ß€7m¦c•Ù.«¶¨ÎvmµÈ¯_§¯•ߦ4}›f‹l¤ÃŸ €Í׈õ)ÀÖ§55U¤)$ Ø4Í™¤=ØdY˜…³ç†“9 ´e´%`ÒêÊêFk ™×>«#`}W½6ÆÖ/Ç„Z¤7míèðU†•›]ËH[(m[»¶¶«ÓÎÉ–*J[c6O¿º÷°‘C~66_#rÔ§[ŸÖÔT‘l¤,°P·³H}Ø\Óm \º¦Žöf­£îa¯€tò³°ù‘£>Øú´¦¦Š`#…`‰aw^9îßl)º#‡lïì€tò³°ù‘£>Øú´¦¦Š`#…`{ç4Nh 4ÏÔ9­ën×*­+oõ{l¤ÃŸ €Í׈õ)ÀÖ§55U¤)$Û<zØIΛİ:m€tò³ML²ÜœŸ(P‹l-2SI• °‘j°8Šu:ŠÔ…½aØ@Ól€tȆý¥@ÀªÛ€  4A6ò*°8“Ms&i6‰ `uÚé0 úK(€–œÝúëÂrkØÈ« Àâ(Öé(Rö† `M³6Òa  ô—l]/Z›(0}$!FŽ,ÎdÓœIÚƒMbØ@6Àâ*¡À@*059«CóÎŒlžBìG*Àâ(Öé(Rö† `M³¶Î MBš`kšjP JXœÉ¦9“´›Ä°:m€­Ò« ,è/Øþº^´Z °8Šu:ŠÔ…½aØ@Ól€Å!BáU€ÞkÏ™÷±,ÎdÓœIÚƒMbØ@6Àö±CÓQ CØäpè…,ŽbŽ"uaoÃfs¯ºÞí¹×Þîýø»ÿ‘'ݰ?œ/[™÷11)iNe¥Q Ô [ƒÈTU+Àýà`ÒFì´ßlàÆy Zàšüf¦ &:ÁÒSϽÈþ¾96 ÀVæY¬›”´¸²Ò(jP€­Adª@ª`›ãDõ›ƒN{±l`IX¸è7}Ÿ}Ç€ë{ìá6Ø`ƒÑm“V[ÍuÎ@lC €­Ì³`+“’‚êR€­KiêA ` À:·uÞƀë›ßüf÷½ï}Ïýò—¿l¥Ïþón„ £y¶Þf[wí óÙƒ,[™SÀV&%Õ¥[—ÒÔ£ÀÌ$“ŸØÎWœ4Ć׮‡v„?~ü(˜nºé¦î /tO?ýôéÁtÿò/ÿ2t±e|lïl€­ÌU`+“’‚*P`zRÆì¼rØ<…Ø_§³’Ê”ø°D7zÝîz瘣}÷´tñó­ñ¬ךü̶’º ŸsÎ9î™gžÉM·Ür‹û‡ø‡ÑcÀŒíÞõjw/°•¹JleRRP ÌHÊÈT €­@iЍL6RJ"°½q˜ tÇúÓ4ÓñŸûÂp]{íµÝìٳݯ~õ«ÂéÛßþöãc¿tÚ¼x«ñÅé0äg`ó5"G} °õiMM)ÀF Àö§ üpݰúmàÄ“Nuš€É"®“&Mr'œp‚[¼x±{öÙg;J*GåYÙ›m¾¥Ó<\çî_g6ÒaÈÏÀækDŽúˆØ­’ö\Q_›¨ Ú*ÀFÛ}ç±þ¶¯œy¶[{ò:Þr8ÜÑGíž|òI÷ÜsÏU–}ôQwä‘Gºe–Yf´®=÷z§ÓÌÆØP÷l€tò³°ù‘£>¢¶¾æP ä+ÀækÔÊÀvÏ)ÂáD[l ¿m`Î…—8EB-**°<æ˜cœ@óùçŸïZZ¸p¡ÛsÏ=ÇŒ=rfRo2ަØH‡!?›¯9êS€­OkjªH6RH¶zgM±þ¶uÝõÁU,rÈ!îþûïï´¦ñܹsÝf›m6fýXucƾªµ/6ÒaÈÏÀækDŽú`ëÓšš*R€€­Ö±DOl ¿m@‘N‹¸êïþûïïî¾ûn÷ë_ÿºgé”SN3>vÃ6v_z% [ÑDOl¤ÃŸ €Í׈õ)ÀÖ§55U¤)$ÛßÎ6°ÄõêµI“^ž¤iõÕWw?úÑÜo~ó›F¤ÇÜuÔQcÖ›Ýõ-»»Û,d;Y6ÒaÈÏÀækDŽú`ëÓšš*R€€­Öù&Ðèo0€1c†[c5ܧ?ýéÖÒ8/¼ðB#ÒOúS÷ö·¿}L”øÃŽ`|l ÀF: ùÙØ|ÈQŸ½Ø[Æ›¤›IC¡Á7§¾ª>l¤’l;ÛÀ×¨Ö `¿úÕ¯º›o¾Ù}ðƒlìœ9sÜoûÛÆ¤+¯¼ÒM™2Å›y¢c|l9[`#†üll¾Fä¨Oú6™© ¸:Òðh\óã*´i6RL¶œÃ4 60˜6àìí·ßîæÏŸï®½öZ·ûî»»]vÙÅÝzë­îw¿û]cÒ7¿ùM·š·6­–ûÑìÉØg¼}°‘C~¶‰I–ÄãƒP ';Mðú“Wtwqi€5¸u­µZ/*’_ë>ùÉOŽ»ýŽ;»ç-d#º°‘C²%¾átù‡¤¡Ñ`õ Í, `×M*¬ @Cm¬àæ‘G! °wl»-[áÝZ´(vðr ‹kŒ ÄÛ@;€Õú¬÷ÜsOk\ìë_ÿzwâ‰'ºßÿþ÷Iò—öÙgŸ1ãcßÿ¹ûym²lQÏ¡žü LLXà%ñih4øz…Ö°Ó’ +ë6À´°Þª%Š`ã[ ­°Á·€ÕDJêJ|ðÁ·@öŠ+®p/¾øbc’fOÞf›mÆŒ=þs_pO=÷" ›²l 硆C47ŠëÂ÷¿ŸÞ˜ÜÓX ¹Þs*4-–(p÷€ºK;=¹”øä(À¾Ctq±xˆØE‹¹ûî»Ïi2¥wÜÑí±Ç­õbÿð‡?4&{î¹cÆÇNf|l*À°Ít•|€}ðÇ?¦7æ÷ÆÔpÑ‘—UìÔIJͳn"°lXÝø.lž½²D6ޱР|( °÷ß¿ûÙÏ~æ¾ño¸õÖ[Ïyä‘îé§Ÿvüã‘~ýë_»ãŽ;nÌøØ­§l뮽aÑØ‘h,ÛL—€í^ð¨›~}™²»°Q† À°¥ÞްQ÷W×2°qùg>ÿ·ãN;G§m·WåÍ_ô˜íwØÑm·]|›ŠæW{Êó•ÿ&NtÄ$3€sÜ}Z…N1{òÉ'»üà­¬ìƒ>è~øa÷©O}ª²gœq†ûÿøÆ¤Çܽ÷½ï3>vú>û2>6¹ÿØ®¹ À°PäÁ, ÀFÞ,MÊÀÆ9Æ“×YÇ}ç;ßi­ “’k•ÏÊ*š_Ç9FÎtr­£ÛT4¿ÚSô˜Ù³g· ¿ è Œ8;F§|ÚìE]ä¶Zw]7áU¯jMà”°Š>hùþçv[mµ•ûáèþô§?5&©m;í´Ó(È.3~| à†y|,Û$¯ä•¶°l– À°lwZÅu°ù­œ~ìâÅ‹]ìGpYäS4¿Ê.rŒ&œYuÕU£›T4¿ .zŒ €³?À³>ÒV“"½wút·ùòË»ùɽý¾å–k °=ö˜SÄóꫯvS§Nuïz×»ÜC=ÔˆPë…Ü:Éïš~G”vH–ÝV;`+v,**€`+2¥¶Å°,[ÇVqlœc Àæ³/gKà ýrÞ!À~ñsŸs믴’;}©¥ôÖ¨•bö‰'žp¿øÅ/ÜÙgŸíÖ_}÷ÙÏ~Öýö·¿uþóŸ‘﵃ ¶_®QÕí`+s,&&%U6  ÀVf™m `X¶Ž;­â:Ø8è`تfÊ‹»÷êÖ)Ø7¬¹¦»n\Ëì“O>é•ýØÇ>æÖ^{mwÁ¸¿üå/=Mýë_ÝÿýßÿµÖµ`—³*~¼cqë&'½¸ª`تl©]9, ÀÖq§U\çD°lÝ E}q÷fÕ:…ûÍo~ÓMIº¼–‰À `Ÿzê©ÖìÄ÷Þ{o«K±ºÿä'?qÿùŸÿYkz饗ÜÿüÏÿ¸ÿþïÿv·Ýv›ÓR;,[‘kÀÂ¥X€Yˆ1œR†SfÊ몎aâŠ%‹`ãœd€­”(/îÞ«[§´1°»n½µ»Â‹Â^œLâ´~2¦\ðÎB¬g£µ.Ä>À>óÌ3îW¿ú•»æškÜÖI¹þð‡[p+°ìvúÛßþæþ÷ÿש]×{Ýu×°ÌB\Ò{H= €…CJq‹á”2œª`´L9]Ø™ÉO«ŸØ8'€`ë)ê‹»7«Ö) `¿÷½ï¹ÍV\ѽäAìÃÉÿ;­°‚Û+™Ñ÷–[nq¶ŒN,À>÷ÜsîùçŸw§žzªÛpà ÝI'äþð‡?8uí­:)Ú*pÕÚ´wÜq‡»é¦›Xoù*ÆÀVæ*°pH)éÀNO,{vžuebcјKš ASæ§Ün»íæ¾öµ¯•º°eÀ°_ŽéÀÎJ V‰[ÉÌš,[5(Q^o5O÷¬et4 ±Mäô¹W¿Ú”t+Ð~'I&KT}ü#i­[`ó›ß¸ŸÿüçnæÌ™n“M6q—]v™û¯ÿú¯J’«"¯>úh ^Õm€k{le® À–âœ.ìŒÄ²s'J€¸jzu²”ˆå«!@j›·Û`yðÁ;¥n×SUùleŽRs¢X6|Øw/5]§,€UwÛMWYÅ=72 ñ®»ìâ6Ÿ0Á)ûb’>¹ì²n—-·,°/¼ð‚ûÝï~ç-ZäöÚk/·Ç{¸x #ˆÕ8WMÒ$@þéOêîºë.Ö‹ºúvÀ–rÒ`ØRüÀÖl8X«sŠÊÔVyEÊ1.rL/ó°•=8JÀÆ9Ý,Ûtð¢}q÷ržNY+<ú¨£ÜQãÇ.£óýïßmºÆî¼dL¬&wzgÒ¥øÌ3ÏŒk]ˆ˜°‚X¥K.¹¤Õ­ø£ýh @IMж \5žVkÑjŒ.ÛÞ6ØRîë1‡ßSÿ‹Ò|ë÷¼ç=}dª‹z °[%–œÌqPÍ'™:{Ú-ÉÃàÖµÖ*Eòu ž°ª[†«Hì)§œ2Úþ° ±¾«»±¢¥‚Nߨ©5M‹¦*¯ŽWÝ ÖmYÛÏÐi‘ິ‰­€­æž)[ çô°lø°?î^jºNíööÛowë'QØ—YÆxâ‰î¾ûîs .tû¼å-î³ ¼Þ›<÷7}ÝëJ¬Öd Ó¿øE7yòdwúé§;i^Ò8W%Á±&’z衇،¨+ز^CÛã†6öÆ4_ÜçùÅòÙ•b}ä²ù´.»@Ë–Qçq½ØJï„~X]t¯Ÿú.à5ƒð!Sùl|¬ [ûô] FT Ž•O7†þZغ4[]utYîÔÀØJoÂ…°qN7 À6¼h_ܽœ§S;€¬žvÚi­Ô°Špj-Õµ“îÄê^¼E²W^ye+úÙnâ0›°Ú¦2=ôÐÖøX]U×à0 ZõÑÚ²šÑXǰñö@¶°ëuÀPìå—_>åŸë·¢óߨ-ª;lS§>{·Ž`Ð…Ø.nžYëƒ"©28?«·':ÖŒPoTÂȪÿ†….Ä­ßÕY#©²_åA-€srX6|Øw/5]§<€¬n’<“}€ýÙÏ~æÞ¿ï¾îô¤+±–ÛÙ#Yãµ(Àªq»¤ ˜vØa7=™LJ€jÑVÝ™ŠÌêX-ÏÀ·C¶2€ 8ÄzKš¯ÎS£ï 6) %ÿÝt‰ ,H•5¬õÕ±´0ë»x˜ ]v؆¬šiëª.´ŒÏòùýéµÍ 3,'4€-ò(`㜀m:xѾ¸{9O§€Õª ,hu!¶u`µ®ëFÉÌÄ «(ìUW]Õ8W?]qÅ-·ëûùçŸß{òÉ'·ÆºþùÏn›ýõ¯ ÀFtN»þl¯.Äi`æšüýÖ32+¸åOëíàÕzr*€¥rÂh® ØÐCåULe)¿þêØ¬RŠ@f7ó° Ø<`MQ3ôp0¸o|lÔ-Ø(™Æ`ãœ^€ÍöÇÝKM×)`5)’f öVëÀþýzë¹ù‰Óøõ$½÷ÝïÎعsç¶À3L;n±…ûç·¿=uŸòÞ}÷ÝnÝu×±`ËÛé0äg#pHØ•7 2é»üúp¬jZoKÈ`ZM0éBÜ2ÖIÊ]F'߬ äè÷1°z¢7þ< •á© òµ{ËŽ­%›jXläýÀÆ9=,Ûtð¢}q÷ržNìç?ÿywH2K±Ö‡œDcï¼óN÷‹_üÂ=ùä“­®½O?ý´{æ™gZ‘R9¬’¼›'“4Í™3Ç=ÿüó£iÇÍ6sãÿîïÜ7Þ8f»åÑX\¬¡`Ë_6ÒaÈÏÀkóÖ¤l¸š‰Í£#ˆõZqÕþ0²² À¶…É´Yˆ­p8ËX ÀÚx×°k±ÿfÆ&yÒ`¬þú“Eùohºl&qÊ"t3çô°lø°?î^jºN¬¢²’а{ðþû§¬ vÊë_ßšµXiï¤Ë±¬&wÚaÓMÝÌdûÛ“1¯ú&-é#€ #·Œ-gƒle^llâЧ3„«²l8!e³D`³`Íú¤ÛtÙö7P{›ÎBœ–Ï¢°~¿x?*+˜µ.ö×k«ÚŠþöËXX6ê&ìZ&6Îá`ئƒí‹»—ótê`~øa÷{ìá’>k-ˆ´Ür­®ÆaöÝÓ¦¹‹—^º5^Ö’²“'MrIO´ØžwÞyîÙgŸ“4þV‚-[ÎØÊ\Œ‰II‰éVóI Z÷–ä>PzðÇ?nôr0Yc`å‹û>z‘lÚ2šÆ#,Ëâ"°D`ÛÞ0Š~ÊH,µ$­<5•Á…ß}#¤ªÛ7k lí—§Õ©má¦ÚÖ9°ÕüЗ-€sxX6|Øw/5]§NVÏæW\±¦]vY÷ÙO}j ÀžpÜqî£ ˜úð‚ì~I×âÅ#ÑYEf³6ÜÀ–³A¶¬ÑÝãú`å[—_ßذ·¥1‚qDZM°l¶aÛßôÙ‡í¼z9‰“º hÜa%Ÿ~[U7Úa.'`í žþ–ýÈ+³Ç²è‡ãØ8‡€`›^ÃÔ¾9^┺qάžç$Ô‡Õº°“'Nt>úhk ì·“hêîÉwXDb'¼úÕî£I´VùC°}_ó“Ò‚SK[«¬¿Íþg¸ßtßvØfz+ý°aoLlHŠXýŽø½-íëB¬ý6Ó°•©úýe8ýãýãšÈ½Øiw˜¦.·®µV£» 4Ñú­MÀ¼nÕáÏ)) çì°l7`‰2ãî¿P§ý8¨5Tf™eÆ»éûìë¾ræÙîÑÅÉ$H%—Qñ«`™9Ó›Œ…µ(ìç>ýiwûí·»-VYŽèAªàõíIÇzÈÍúÌgZ°«¨­²ÁZûU@)ÍŸ?¿5á£&… [ܦØH‡¡ælý°~o̬ž“òÏ´>Ô†ßÓ&c²ub³z\j¿®~Úžµ¯I¼ÀÖ¼ŒN“.~¿¶Åö»ãÆý¿‘— «~b§¤šnû¯:6ÎÙ`Ø*àˆ2âî·< `±~Ú~Ç݉'ên[°¨4ÌV°‚ÕÕ’nÀXmÝlíµÝfk®ÙŠÊZ„Õàõç?ÿù(„êÿYÇïVK¢±~Þc“ïŸJ†¬À VÓ’`÷±Çk±Ö©U·CMütÇw¸Ÿüä'nrZËöºë®sçž{yºÇì_¸èwÈaG”Ö?¦Žªó°Íô]ú `ûÕoJ»»°SË>4Ϻ‰ÀÏ¥¢åØk’‡úZãÆ%C~ÆU¯y¶Ê~O6Ρ`تfÊ‹»÷ÒtÊXf7ØpãH]{üB0UÀ ÷˜:Õ]1¬Û%0{^2+±¯{&‘×ÇÜýò—¿\"mÿÆ7¶&q²ü¶,Ï=÷ÜÓÊ+UV“C¥¥^¬àU/ô·Ÿì€m¦[À>Rʯn ”iG—6ʰX¶Ô&€]1yP¯:nܳ#XÍ`GªIƒ¥–Zê7rôät²5Xî5¯q‹/Χ¸‘r¤‹|ŠæWÙEŽÑ„«®ºjt“ŠæWÁE¹þúëÝJ+­ŒÝqï¶I«­6&òFbÃï‚RAoL7ã<€½å–[ÜÁï}o«[Þ}÷Ý׊pþìg?s>ø Ó,ÄrÚ°š8§Ö-Ò]–¼K‚9]ˆÈRÙË›É&qºzܸ/Ž<<4èšOM °qÎ6 Àv\wŸÅê”°‚(ÍHüÔs/–~1ìÇ;Ìm–D@”6&5`ÕÅwó¤Ë¾ßuXãaßöæ7ç« V«¥uŽýèGSóÀ RÛ¥:&qzìÉçÝvS·wŸuRáHzSzþì÷Ï `¿E°Ž{À½#Ñ0¹U:.§Õoʸqû9¹o4Oʃ?þqßù·½ô­û­n€í»ŽÑ) `Ó¢°Š¬î˜Àé±GÕZFG>æk¬áÔ]Ø¢¯»%‘×óÎ:«µÞ«­ {z¿ï·_k[Ùd«zóRk‘XÍú£uSò°•¹J°ZËzdvÒ+Ó‚bí˜ Ì#9øàƒ£øC/²Uº° †ÍγîJ»}ÖÙ…XF"ˆyƒ!#1CÓq‚_ß(wÛm·Ö›˧ïaWdÝÚ®r|hÖqÚî—§òµÍ¯'­{»cõfEŽÕfk¿ºKûu©]:Nos¶uÆ»°³ƒUâÀVâH°lSœmÚQ¬¦i™µŒÎ[¼±°šœIÑTEXL–ÊÙmÊ÷Žwvç%pjðzTi=þãdÉÖ…Õ1ŠÂj¢§¼I˜²ök *ù-<ð@nª`ûÑ>ØÊ\¥Xë‰é÷ÊÔÿEýá²Û€mk»3’½sò¬»o6Ƹd°–Oݕ̸}c¶.È‚LƒIm3µh¯àQ ( 4øÔ÷pRmK„oåµz¬·_‡A§¶)OÃÚ¯m*˺Lûoxì8«í±7OÑ›9ÌÀæÝZÝÝO6Î`Ø~tÌisÜýÝn'u÷½óÎ;Ý·¿ýíѱ°ï[f7!IЦ Xç&ëÂ~rÙeGáõôñãÝÁÿø­õ^ý4idžc“cNf÷Ç~¿é¦›ZÏyÁiLÒ ú»îºËÝqÇN@éøýèGîºë®sçž{nËÏPV{`+ó3 `ýȦùèE}ãƈɓåk°C °lí@LÀFiõ@Ð6?’Ž›µ<F•oà˜VW€õo Exõàña4¬ÇÚï×æÑªrô–¸S(-r<[Ùƒ£TAlœƒ À°ÃêÜÛygE`€oI"­šˆI“8}âŸpïK uÃ$ë/±#˜Ýc›mRáôÃ3f¸M Wܵ“5bŸ±Ðêç3€]´h‘‹Ilûßy¶”ûvÐÀ¬È2×zD†=1}?ÚàTÛìÿ°Ëo°òç}Ÿ^.ùýˆòÇÔÀ¶ëù©ö…½?ý2¬·§~'tNa¬ˆO_6o—ºGE`·J,9ù]¯æSgâ€Íz;nO{âo³lZ×â"¾ £´aYªÏ7r¯›CÇùÝÃpYC,r[Í=S¶€MCSýV­š¬cYäSôuƒÜq§‡6ê3l`Ø/çÛ`/ºè¢VÖŸ…øÊ+¯t;o¹¥Û-Y"ç;É3õï“Û‚Ê´Ùo¼ñF7yd†â£’ˆí§ΛI8«{©.§3&Íþ­`ËzK7ðëÃîA¿‡¤|ßü®È¾ïíûñ!K¤}·!~ HYPÌê³^—i=?ýà™õþTY~ïPcëÉ©ýáÉ"~}™¼½ØÊ¬_Õ °1¡ûªVUFaÆ£rËt!. °vùÒí34ëB\Æð:9€­ôÖ)\ À°q6Ð/F;;»žíváÂ…­(ìj÷wîÄOt÷Ýwßè,Ägœq†[1‰Æ RÛAéÔM6i­«Éž6Hf2î`ï¹ç›X¶°ƒPü€Ø´^ŠEÖ÷“l[;€3„Aªpè¢ß+3ìù™Õá6®;qT'>Ö±l‰Yˆm§volÒ¥Pø0z›õ7#3ó7 ~;²ÆÀXëzÐÎèØâ¿ÔƒpçìÒ…8?K6ΖÌfëôºµ'·Æ7ÞØ}öÙ­%o4V(€½øâ‹[ûC€}ðÁs—´Ñ’7'tR+‚«±³ïL¢¶_ýêW£Žó—ËÑøUùwß}wt`Ø|–XÝc~ò#Ÿ1Øpò' ýó{<ÆD`•'¬?N$Óï•XÇbYÔVe¤±F7 µ]™l €µn½2e`—2€ß¯Ý¶ù³ çl¸”õg·È¬?¦V€«ýá›—°Ž4à »§µÕê$[ÃOzƒ«`ãœi€<ãî•~×é+gž=:©‘ž¿z}íµ×ެ"ž»î°C*À bó’ží“’H­f1¾7I›®µÖÇüÛÉ'»n¸!³,í“/ (-’˜Ä)݆éB\™“2P+è“?µŽkl6XóÛ­Ü<€•ŸîO´v!ÇÔúœàGWÃ^˜Æ4leö_ob{û¾i AQ0«‡™šö§½igH6™’_†cøŭ7H€Õ9úÝ–­K±ß—Ÿl…ÜGE°qN9 Àö;˜Ñþ¸{]:ݶ`‘Ûvêö£ »l2^õðÃo-}#€7ož[°`Á˜.Äyë±úû÷ß{o÷õ‘õbwJ¢°sæÌ³ž«ºï¶í¶™k¼^ýõ-€UŠ$€í²{2P›·¾j ¤ A 'VX 6…Ýóg¤ —ôô£¡l…wEc`ËhƒœÓBÝ‚M@ÖåXF®'n³2²ŒIùU‡•vOȪ#loxœö«nAwÚ9ªÝiÇt»c`+¼qJÀÆ9µ, ÆÝ+ƒ¤Óœ /qk¬±æ(Èjb³ÓN;­õœ×DM66f=V?Ï¥—^:º$f5Þy«­Æ¬ç*€ÌV¬I£ÒÊ6€U÷æ" €`K¸ E:€õÇŸZ*œ IÁ/?àå÷¾”Ý`õ[ãó† {4ß<`­üv3 °EL<'o¯¶Û°Fù,±D[áS¢(6Î)`ØA3Î%î¾—NO=÷¢;þs_pË%Ý~åˆ*m•ç÷¾÷½Q€Y‹5ÌóƤ밺k,¬ÖÝy‹-Z^åÛ.;;Ù®—¤m’ï+&`«mú¾ö„ ­‰šü²¯»îºVVNhÑÀ°eý…ºë%Àâûw×÷õí%ÀªÛÀ¬ªŒ›.ÄõN/oT¶ª»¦\9lœó À°€^ܽ2,:uÎnå•WÙµ×^Ûýû¿ÿ{kll‘tÎ9ç,²Ÿ_j©¼ j¥ýD5ñËüÁ~ÐXM&U&ÝtÓMNKñ„Ï=÷ÜÑs–kž']ˆËùÝ> €è%ÀN«¸ÛÀ4u¸5ÿÑK¸¢îîß<l·íË`ãœr€VçžóÎþÐøXÁÏÒK/= o~ó›[ËîÜ{ï½…Ò7¿ùÍ%@Ö¢²›®²Š»ñÆGËSùØ[n¹¥T`Ç^S¶·~HVíl÷}ð¦pKâ¾þ.ìôäQ‰OŽ,›†¦ê–¨W‹|ŠsóÍ7»wÚÙHq6ˆNÍÕIãc÷xÛ^£ûªdá|à­è¨ÆÑIgŸ}¶{Ýĉ£< bOO"²í³Ïh9×\sM `u•I,ÛÎ Àvh§S“ãÍ+ƒ,ð\ ž»°yöÊþØ8§˜l>ʰq¶ˆ®Ns¯ºÞmò†7Ž‚ì„düê§>õ©ÖÚ±E’Mâd<½¤±°É,È7ÜpC«œ«¯¾º°?N&¶)›èBüŠm¦KÀ°uX& À°uÜi×ÀÆ9Ó, xÆÝ+èô’ûÒig¸•V^ydßð†7¸9s渻ï¾;*m»ÑF£; b°ÿ´Ì2îÐ:þª«®j¬ ´“ÄØ—m€­Ø±¨¨8€­È”ÚÀ°lwZÅu°qNù†mì¶ÛnªÛy—]¢Ò«_ýê¨|VÞRIÁزËó÷¿µ[VkMF¶_ù5®/6¿ò=fË-·royëît!Nh oð4xtñóîșǸW½ê•ñ±o}ë[Ýõ×_Ÿ ±>ÀjyE_úЇÜwÜ1`‘í$]|ñÅNcv“ÇJ+ «°;À°™Û”Á΃Ôº×q{f×ÀÆ9Î =âÔ=06]úýDçU™—^Q,™cŠÖqI‰6=Fc‡Õiæ¼ãî½~×é¶‹ÜN»ì:f|ì‡Õ2:Z'- `ç$@¹ÛòË»7ß¼5[°ŸïÊ+¯lE`ÃeÓÁìô¢ÍàõøÏ}ahïE¶·~HVí,[‡e%K¶Ž;­â:Øáp¢ûh?vÚï6pñ¥Wºu_¿Þ(0&¿½îÔSOMØ·ØÂ½.?û•¯|%u¿¬À¶hš={¶›_.€€-æû{ÃMw´ºÍªû¬9‡CmûÈ3[)ܯï!ðúãj‚iy|çÓÊ×_µGU†uãÕ¶äò·êSÕ¯ÛoÛüè¦"r&¦Ê—6VTgǨ ÕqþE—f:ž…>4«r©( ¬ÓœYZÛ§6«µ%Öù ë ÏSc¶˜­ÅjO¾ÁÔUKÛl±å›FtÅWtÇsÌ€UWâï~÷»îÝï~÷˜5]œyŒÓDQØFœm°‘C~¶Jv$˜µ.¬¡dµ–ù&V(GÀn•yE¡bÉŒÝS€Ô€spÌ4€Õ_?òªý‰ŠlúÉÔhÛe–óFhU—Ö"ª:΢ÃV†ZyÂè¯ö‡kåtÇ8Á!ÀZ”Ø?–1°Ål-Fwò ¾¦gs[Éëüú׿ÞÍš5«=þøãÇtÞzʶNCaÅì€tò³U°ùU’2ˆXôC&)ÀF^ ¶˜£cQİk°  ч\ûߨúpg]‹8œeVå« Ö~¾ ª5^Uðª6i9 `CX`‹ÙZ› ï`k«ñ±‚¬¥—^z4"ëÿ?aÂD÷¥ÓÎ\K.QÀF: ùÙØ|ÈQŸl}ZSSE °‘B°Å_H«~wa`Ì$ErÖå8œè)Fʬ•+õ!Zí±q¸i‘å¼ö¼ûçž6Ž€-fk1º“g¸4]¸è·ÇÛö3>v¿¢»pIpµû€tò³°ù‘£>Øú´¦¦Š`#…`‹9Àþ$N6I“uµµnºi3üú aà*hL^EA5f´Ê.Äayþy´é2¶-„Ó´(.c`‹ÙpŠ^Y6 —^ïÿÀ‡œÆÉb'Û é0äg`ó5"G} °õiMM)ÀF Às~ÂetÂï6y‘àVP§¿iÝqxŠà†3ÛøÓ´±ªæ¨–‰ÀZ”ÕÚ¤º­[³MÚ¤r-ù³Ç8È!ÀZ·d§±Íö˜òÉSÌNÑ ½°x`#†üll¾Fä¨O¶>­©©"ØH!Øx'G¡º ûë®*š*@ó£—ŠÀ¸¦­kå"Ó¢žayiÑδõeý(«þ!XíDªmþñiãpâEÆÁÚ:¯~[µMå(éÿ´6ád³?ôB/l z`#†üll¾Fä¨O¶>­©©"ØH!Øê¡³( Æ”Y6¢®á²=Y‘ã²up\oì ÝÑÈ·6ÒaÈÏÀækDŽú`ëÓšš*R€€Íwnºá ýHn7êˆ-ÓºöZ·gë-¨µ¨©¢¹Y©ÝxÝØ6¯7vˆîèŽ ¼Ôšá9ydÊoàÓ™“Óå<ù @#`qhDØHµØúØ&v¥µ.À‚TµºF˱`ë·€ͱzm€tȆý¥@À®ËÛ«þºªÞZ6ò°õ:J8¦è `Ø@³l€tȆý¥@ÀN£Û@]Õo-yØf9R8¶\lÀêµ6Òa  ô—l]/Z›(0}$!FŽl½ŽŽ)zcØ6Ð,`q•P` ˜šœÕ¡ygF6O!ö£@`›åHáØr=°l i6 õ©óÚ¤Éàš21]^[Ãýlš„5)ÀÖ$4Õ @• °8ËE=òc3ØÀðØ€&tKž9K¬+Ú€fVמî;`«ô*( úK¶¿®­E–ìð8¢ýâLÒNlh– (›·\‹S(Ð l¥7t¤Y9'=1Ùÿ"ågª„>ýx×4 Íl³Ew®6PÞ-4ÐtµëúÚnÿí ïou™Õß°Œ´mþ5StŒ•QôzZ}VŽ-i¥rl[V™íÎÉöåé’VvÖ¹øç À6àÞû&ŒOšðp¾úœœS üö¡OïïZ€(P‡lyg¹¨cJ~´ÆºkÛMÝÞ}䈙NTJúßj¿m·<ç_téèOuƒÕ18c´ å¬iœ§_nx=í«_ý<ª[e´P•¯±¤Vþª}ÖnÛîC¥öûçä×qæYçÙ'¸( ›a~+ÓÚbçMâ:žÚÔ(€(€C®Û]‡`A_l >0p´É„!|ù°ªk#øòAS&ÈÓvíü¨ "µÍÊõ'-ÒqÊgSg l6`Ü`U·ÝvŒþªL+ׇTw+×ÚÛ>}·óˆµÍPC}7}LCµ€r‡‚ÓG@@:`ës®cEòqM°r6 £žiÛ|} (PH¶\dîX€¡[N!åb[uÛ€àÍŸWõ‡ÛoþxQ}÷#§‚Î0’ªnÆiåú³«.¿lAŸÊñ—»É[FGyÕ¶p‰œ°=ª×oEdÃ:ý1¯Õ6˲AÍ»Nªß?WµÏŸYßÕžP¼r›²€ío¥m<0iÅÜ”–l”l#¤Áe•½7Õ“!…0…þjD°2bÒ©I’Q)ÉPí#COëú«ãtŒ},šV†ò„X+W7Œ}ü6¨åñ?Ú¦Kç¶¿¿®­­LÈhŠI;°El ZÈêj\f"§A¾6le.Å $¿[=-äOûùéÚî3€í¯Òç`ݺt~!À Ê`·N’ÆÇÊ€mœì)°hÛÌø <÷i³þ† ì¬ÀTuø€jeÐZ;üÌàØºøðÛ%¹(¶`«uÙÉãܰl ¿l@Ý{å3øÝšý.ÆŠœZd6í¯ äkÀöƒ·Ò˜6ÊŸöƒPj˜ñ@´Ê d•=¶¬rC~\Àj›ÿÑw¿{þ÷¿ .}×ÿáÄPaXArVt5Œ¸†m ¡xÈ/%§o °ýå²ɹa‹Ø@õ6 ˆUw_·ªn̶̎`Ö¶§ý »Mêõ`ñ‰ (  Uèû›/¯¿ TÙ'-¯Hâ ,©, bÙqY½&C€ ™CœàÃtØÝY|¡mÖáy¬ý¤@,ÀúaFb­}öæF†¦·£úëÂíÖ5! B-¯Êõ“òú†Ë[›~²´Û ÀVï0ª“Çya+Ø60ˆ6ÀÖètôU¡_?mFufaP*ô½­¦ÀVÿ+éÿ|³zMúå™ÏoŠZÙÆú®Äl¼nüêÿ«Ã¤*P`}ƒ6ã²î»EÖnŠ°ï½•!#Ôÿ~òß°uª,é :¤œv `±6Àâ TÀï )`´€‘ßë2ôùU… ù « »%gõš4_>-²ë7´ò}¸Ö6ËÃ0‚¼ß³—X34…îýèlÚYi”Õ…XûìMÍòž˜iÝŠÓn.£‹“ëä‘[Á°A´Ǩ òåmÂ&ýoC}ÿ=mük5µjÃíYA'‹Ì†[•c]‡Ã™~OÏ4È-xêdïGʬ½ »Hë¯<úXÞ´YŠ}CÇÙªm~ÄUå„ Àö£Õu¹Í,é :¤œv `±6ÀvÙѼâ ågË··ˆ¦¦ÔR~z8+q+>»Àj{ØÓ¦Àž-F‘-›c™Ã š HÓÖˆ²·&>dúåøoUÂÙÍBƒ“1†ogüáÚ-]ˆ£.ñðe`qòb<òa+Ø60ˆ6ÀŸïÓáÛr:aIkÝÓzGZ/ʰúÐGo¤q@V÷d¿.¶Ã Ïáé „“9•Ñ©Š2ÊÔË1}ª‹C:ˆ)ç„]û60,ËÁÔe÷Zr§®ºê¨€íS¦·Í¶ÀS´2HÕþð3-Ù .½~ *k§´^“>؆KfZÙþÊ& šù3°½µjG¨JG¿‘:°³^Ù€–‚é÷å`´ Ž’ixò)³Ýù]Ú3ˆT[´¶l¯®iÕõ°UyCUŽg¸¦@R ™ÖSÙØX`ëöë‹—µ¼aD7,Û¢Á–€*ådQ`€`‹ªAÊæšdë­¿›{Õõ£°¥h¬°ImÌk‹­ÑjùôÝÚ¼ãËîÏÒJkËJײå6í8v€œæžšº!§ +¬¢Å*—™†«P’2P©Àø &>·pÑ#ãˆ4Í1¢=À6Ð;H-Eû ¾B€­Ë¦Úi¾¨«MݨçâK¯tWYe~#ŸÒ4 PPP`¬K½êU{ê¹Øß÷ÎÉî†CF™\Olà¥V”ÒTªÛ­¾ ¾gÖ VÙ´q²~7Y‹Ú*Ÿ{ßË—øíô÷û‘ß"×Cåúu„«óð»ëÕeuûõª,µ]å èÓÚ¡üVŸé¥•î÷®Ùv.×Þ0ÏM˜8ñaü@@@>P`¹åVx,°SĹ&/öÒ/6` jípi[òÓì x!è*¿ ÏÔêÿ1³µMÇꯒ±ñU>ËBžŽi×ýWàhuøõèÃïvn~÷bM¶¤²´O€šÉU;¥‡i ü‚Ø,­¬ :ÎoC¿ØDZ;ç\x‰[i¥•nêƒG6MD@@Xyå•ï-%èg‡…¶aØÀàÛ€`,Œ:¦u‹5ðôaTpæÃ§AåO¿ÐIý58ÔwÖ§ßVÁ¡Ý( þ²ì/¬× 4`UOø[–em¶h²µÙ¬¶Ú9¶ëBÜ]±³4?ñ¤SÝrË-÷5<@@@>P`ÂÊ+_{Ö9Ð…˜.ÄØ60P6`p.ù’^‚Ïì|ð £¹‚!®¥A°êóSDg”µ9 Dó6Œ†ú@í·Ç‡Ú0ʶkXö#GÎü[ò¸>®Ù4PPP Q`ÆÞÓÿñOD£?Å5æ“ >•ŒXFÓÀ- `ý|úßïšlÝuõ7vɵ5­Íi³·Z]g+Ëo‡ýoà¼(ê°ìFoòb¢ûT<@´ŽS¸ÆS¿žÛÖ^Ã5ŶևêÖÞýªÑ0¶{õå—_á/ÃäØr®€606Û…Xö ¨§iMƒÕ¼l^43Ææ² » Àf•å·€}ÉÝÿÈ“nÙe—ýã0>ü9g@ÁT@k( ä TõfW‹ÛGp®Eù ¹š}Q³0Æ8XäÇŸëÌuHƒÎv i,…“3I kº„c` ³fúµã”/mÆcÛ﫵HjXwÐf•^Ó´6«ë±µ¯Vƒ2‰Ó—N;í¸âŠç¹Àé£ §&çrë"°Ë{çsà Ð)r*e?~üg;ü£/ ‚ÃÊ9^Ø6`6àÏÈë¤^èªË°’?q“ß 8´#›uXeÚŒ¾!èÚ̽‚;åIƒ¼FÓÆZûüY‹ŽU¹‚i+Ë–ÉQ9~·j[VÈŸ9Ù Ü7M«p‚¨~½ï¶Ýnªº¿­Ì³“c«€4 BñéÒZoª¦V) è«±ôU ú]róSÔŽ)z¬•mÇ¥uûõoTåK»qÓ¶é1²¼«7øûÇ/¿ü„ß©+U¿:!´hÁ°Ð²"ˆ“jKËøÇØüÉœü}ÍÜÙLÂiëÀj¿« 1œImÊÖ¡2ü™‚õ»ï~»mW§Ú•!VYvNa{³´*29USïÍç-p'NüÅà?â‡ê Îôþr+(ˆåÓlŒh©ú.èSdVIÿëfi÷Ñ…´côWoDõ7€÷Éï—¡2}°Õ>ÕÕ_«#ìœäí ½²Se¯H¢°ÇÌøà!nª£A»€l(jY³úf•c3 §íOëŽ\´=ƒ’¿NýtŽ“×YïÉ3w«^=w©·+ 0<®:Y-Š–8HC0‹ž{Ïó .šaÔRÓ‡GƒÆ¬Éì"¬O+p†Êë—ouú«¶ ¨-Ÿòh›_O¬ˆhYTñ+¯òÚ§n[°ˆ(,Ë©`ØÀÀØ€u«´.ÇþxRETcÊô<ŠÖúkäöãùιð·ÒJ+Ý4 Ïóa>-zVwõ; r•…ßêZ?¤% ü$¾ÿ±mþ8Ríow³XÄÖ/§ƒP9×Åáw«+¬€Rc.pÚS7ÝlóžzîEœ4ÀÆÂ¥t²€K€š5ÁR»}ýp´9VÏNêèæ±.3yòº¿Jžëx>’µù ¤ùîæ7[¯IùÂ1Ý[5„Ðz5ú‹¨ žv¬ê÷ƒbê]¶ÉßïÀ,Ÿ•áž‹XÚf]©­÷¦ßë3M¬>ýµÿ­Çh¨™_vØ3}Ô†AšG¨Èuïi^3\¿Úæ_Lß ³–Ú±ˆ«u3–Ñ¥EvóNVå[´TUFÀ*ŸoØlžÊì÷Ú×¾öcûî·ÿ‹Ýt(›® Ø6€ `½°½ Õ‹ÚäqϺ¯ƒçó¤ùîæ‡[ïI}ýãP 4ó³m(`ìÐ?•gð*¿_uªmJú ;Ñœ’ï®üúÙCÈ i ¼Ó¼ã|˜LØ´à˜Á©öYO«SõQi|‘v-ÏâxFi·ÌŰLJݼ1³¡2Lßu|,ÀúÝØZ›´êª“ÎýÜçOúÏ^8Ô‰S‹ `Ø6Ð-Ð Z½¨mâ³—6u¬€üô0ê—æ»teU–“ÕÓ±]ƒåsgùûi-ˆõ‡þYŠªËÎÅÔ†+¦X¿·¨Î7äµÏÿ¤c7„s줭ÚR†™:6 X²›®4±‹+Cˆý¤ÝL±ÇZ¾Ð€ÒÞ’È0ý(°³¿-Ö‹¶üƒ§Àøbçù±cþÔ-'‚rqP±lÀê²E^ߺÛÛ~¿úšk~yðٜшaS›ýî·Y=CùfŠÎlœ¼iþ¸ÚàoO+#«w¨íYàèuL6lÏ$*Ë×Ò‚t¾ŽloË´·'vÁ±2ýõßDX¿që&`ßý‹®O¥ïá›ÿ´­NÕgýÞÓ"°fø~7¿œ´FÛ²º?÷Pzªn‚É[êÛqçi¿{tñó3®.g‰zp̱lh† ,\ôH²<Ò†¿]a…j³•6tM´H ïCË?öSVC ðÌwoMM+cÐV:‡Z†“Þ°]3óü‚eôa˜\GY—^3lƒUí³¾õò÷ûŠÛŶ~ñÖ‚¼õ’d¹¶8°Áv£è ‘Ÿ/4&íóaÕº+°Øs¾- mŽ &ì»ÖZ¯ûflÄk†3Æuà:`Ø6oŠºÎúâÉ[uÒ¤g’‡øVCû žOƒ&¿ZDùË:Nþ±?þ3¶Œpþ›ð8ñCÈæ“«>}:Àæu!çã±q¹V¿ÚÐIÏM6ÖZº/ìBP¦ A°?Õ7ʪÀ1ïMOV»ÓÞT•9GŽ|ÖÕrm¼É‹á"÷8QùN¡6€ `õÚÀWÎ<Û­¶Új/Nœ8ñìä=qðÓœa¢À´èòÅж´avþØÒ0dÐ+ˆ)ùãY-XÕ®£ÍÂë/oiÐjpgeZy~WàNÖ_ZS ›ÖµÚ‡hÓÍïÝ) ý@ž‡µ]ÇØG<ã×vf!îñ-© ^”"M²h«ŒÔºË b¦ðŽ­§,ÀÒ}8Vaò™SW^yå{×XcÍ?~äÈ™»qÞ¢²,µ‚ `Ø6ÐPO¡ý8è/Ë/¿Â_p½4yp­Ëã{¨°Èg跇˾ÈÿõƒKáP>Ëoðj@kÇØðÀvk3 ÛøPùþ~»¬LÛrucö#¢i5µÑ?Ö3\ÂÇ/GF‘¶ÌŽÚâçóóøu„m×qþ •Ÿ½*cìõÉÚ4Õ´#\§ê1§v“ØÛ‘˜¶êŒßõ9æò €)°IòÏq‰ƒð 9 ›oõ¦wßs¯?sìñn‡vù«¾“ÐÀ°l 6`Ï=svÚùþ :Æ_ö¯ê)”<›MÒê<®‡V¬áD~rLïÇ´1¯e»ÄªÎvþy–^¸°yç›·ßtKk—ŽM;7†(vz9P ë ÈQ˜–¤ý“4k$½md›¶“ÐÀ°l JðŸ7ÓGž3»þ´£‚~P èŒÁiçŽQµ.¾ióâ4M“² ]åy0D±J5) PPPPP` ° OËž¤­øaÝ{Ó&]*[v·kÀJÿ°Kq·Ï›òQPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP†M[”>f};_›qrØô”ó-{Ýåü9@@@@@ŠX>)G€¡Eæý¤õâ´n_••ï’Ttø2kФ¿E?lÓB:ùh=B›W¯Ö/´5ùìü츲ºë8+£ÝK[Š¢®åt~e®{ÝÉ‹(€(€(€(€(0 &YIðQ¨BùêX«KÃÏáÉ¥¬ÖëóõÐ÷˜àÕ?. Ô¥§AòûkÚñyœÖ;g•‘vÞvŒ_ìyÅœ{»<l§ r<          @KXA†þ·4m‚ °1ìôS'Àª½´´vçEt ð‘vþ1çîGR³"Ž‚WƒTo)XðµƒÐ´ö˜¾v|¤°1W“<(€(€(€(€(€SÀذqê^l0TE´®N€m't,Àêœíüôí>‚E©u!NXÁtŒžº&1ùÂö˜¾êšÜ¼ØÆÝ†4PPPPP Fv«ãÛuÅÕ~Eo‚"¥vÝC€Æ§ê°ãTFÚ'-kÑe+Ï8ûeø€çGcÛéiçfãgÓÖÀ2&²š¡imñõµÿÓ^<ĬÎÃ4λ®ÖiîÛ‚E¿óº۵ʻ¦1öL@@@@@V `ÃÉŒ|P ÇÏZwäP6ƒ*A§uÏõÕþv]~ÕVœ§ß7¬+ Ò ¤ÒÆûúàíkQS›ÕZç£2mÜjV¤9¯k¯Ò×Ú;k³_¯uUNkwÀ¦]‹èf½ ÇõZ~¿KtÚ˜`Ù•éâ_KìyðíÉ©¡         ø ´X¿+« ÍÿX÷ZˆÉ´‰ŒÒ¢ŒþMÁŠÊб>ü¤M^äŽÊP~g &X #ifÑA‹ÀúFu—¶Ox¬]Z4SÇØ~ƒ»<€M¹4«´óµÏ²à°^û®k^»4͔Ǹ›Îj¯?AUØ}÷_&(¿¶…/ÂóömÅÊ´¶³¡¬sg;          À(ନ`ÂŒ…$Y|à e2ð #h>À†0ãÃrxœ•—Å6öTyüO»(£lÖå õ£™á1~„Öö¥l»Yí°óÎçð¸°^]G‹„úšficZ¦é¬º ,× ðiv⃯Í-¬sL³%;@@@@@h)``•Ö­VpQf­Ð¬¤mÏ*Óö‡ÀfÀ™Íz> U °Ò)+jõø]¬«Ø,=²L7­^ƒK:³´±ã³º ûÐé·!ë……åI»>6Æ7kÝ4]¹eQPPPP†\?2h]j53èÈŒP6‹ÞêoÀ†ÑW+Ë &ŒðåEL­>Ù˜ª6¯mþøØª¶Ó¬tõg>ž6"t–6i ^ç0OLd9­\ÓH×ÚìÎÿkà òÛ–ÓG@@@@áT @;•5ð R$U€a€FqCPÍ[+×À*„–X€õ#·UlÚdNÖµ8î´óô'eÊøÛq¾-©¾pMà!»=9]@@@@@_‚ ¡]´OТ(€(€(€(€(€}ª€œú¸ëÖéÕ°±mÏƒÌØrÒòå•]G6¯ œ_™ce{U¿10N+W/jô†          @* pM‹8ÊÉ×v%ßá#xú®ýÖ Ùò«Û®ÿ±®ÇV¦_nÀj›€.­ ¡ÌYP¢c÷ÉFä Ú­µßÎYÝ™­^¬ïaç°LƒC;6ŒjçÁ£¬Æjf•¡ÓÑ9…Úøã—õ¿¿_åéÓîüBMUF– Ä´!Ï.T¾ÎÑ×Ú·3ßž”ÏÎAuÛ>ÿœÍòÊmZ:Ôï(€(€(€(€(€mHƒ*sòmR%¿Ëe˜ß¾ ˜¦ÀU½‚ í·ò*þ¤Q!ÀÚ˜Uƒ}×ñY‘3šÃq½a¤ÏQíW»>Jj“¬YëGùtžvŒÊÌܬˡsÐY9ºúîw±¼úÛ Vý±³~—pibßîüÂ6L›.þ¹Å´!Æ.¤•Á©þ·ë§òý—¦¥¯µo¶ß€6«\£ÙZ¯&,˺ölG@@@@ˆP@PNdÀ™vxÀú3‡yT¾áþ`Ó Ú %ë”BX Çõ¦lڸ߬(i À¦A ݱX¿œÜÓ@^0&¨•Fú„K¿¼¼6(¯iFÑ­œ˜6X=íìÂÚêÛ†¶©üðÚèÅ‚o§i܇ö•U®¶§Eû#n²          4A‹´ùm18оp‚,€mI!L†€B…uµ¶Ùßv3‡e âböIØœ”·S€UäWeøHkC<¦]‹p[xiçgÑgëÚÝîÚ¤Ù`^;cÚVFÚ¶´²´MÉ¿þ¶Ío¯ßÕ:<¬6°MøÕ¡ (€(€(€(€(PR0²•;‚‹Æ•Xæ Nýî¹i«|Ú¦¬Óœ–ìà ¾Ó–ª`U‡ ±×+t½lܱ¹ÌƒS›—§€•]†×>ìö À–¼é9 PPPPúU!ÀœøÀèOF”%›‘ %`MµÉjp)pµI¥ü2 ` " ¬ŠD?cÖô·ëam ë1¬]v.YçjBª´ _bäµ!Í®Âm>¬†Q~ßFt=ýïþqj‡?Xç’V®¶3 qÖÝÃv@@@@èlÆ_|´¨¦J€µñ©¯únÑ3•kìG- ¨ú-…Q¶P^ìnY´Tû²"t?Vg8³­mWû¬»°Õ.;£ó,2‰“ÍÎì·=m[x}ÂÙ™ývø/¬\{¡ }Y‘Hp;_•é}^b#°þuöÏ#,_í°Ù”­l‚(ëC|V¹í&(ëƒÛ•&¢         duÙLë’ZT-ƒÊpF[Õ™µ,Ž_‡ŽëEwϬxóڢ㲎-ª]^þ¼ëÓ®­±mÌ;ß¼6äƒío§wl[Óê ‡“Ŷ‘|(€(€(€(€(€ P mÒ£ªšeã3}h˜–ž¬ªNÊA4ÌîPPPPPP ÏP4T~7>ÖåÓïìwçíF”‰¡ê–œ× ÕPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP KÿäE­mžWòƒIEND®B`‚manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/arch.svg0000664000175000017500000004404712301410454023255 0ustar chuckchuck00000000000000 Page-1 Box.8 Compute Compute Box.2 Volume Storage VolumeStorage Box Auth Manager Auth Manager Box.4 Cloud Controller CloudController Box.3 API Server API Server Box.6 Object Store ObjectStore Box.7 Node Controller NodeController Dynamic connector Dynamic connector.11 Dynamic connector.12 http http Circle Manila-Manage Manila-Manage Circle.15 Euca2ools Euca2ools Dynamic connector.16 Dynamic connector.17 Sheet.15 Project User Role Network VPN ProjectUserRoleNetworkVPN Sheet.16 VM instance Security group Volume Snapshot VM image IP addres... VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone Box.20 Network Controller Network Controller Box.5 Storage Controller Storage Controller Dot & arrow Dot & arrow.14 Dynamic connector.13 Sheet.22 AMQP AMQP Sheet.23 AMQP AMQP Sheet.24 AMQP AMQP Sheet.25 REST REST Sheet.26 local method local method Sheet.27 local method local method Sheet.28 local method local method manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/flow2.svg0000664000175000017500000005775012301410454023376 0ustar chuckchuck00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Sheet.36 Worker (e.g. compute) Worker(e.g. compute) Rectangle.57 Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.63 rpc.cast(topic) rpc.cast(topic) Sheet.64 Sheet.65 manila-2013.2.dev175.gbf1a399/doc/source/images/rpc/flow2.png0000664000175000017500000007367212301410454023364 0ustar chuckchuck00000000000000‰PNG  IHDR°W›c¢ÑsRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?w#IDATx^í½ ¸$E™¶Íõ!CÓô¡±›foA•Æ@<Ã("È¢¢ö0(=à‚,Úþ.lƒàö3ŠŠð1Š Ò #8 à†¢ ² (´è àÖàŠÒ"*îùÅSœ÷dVFUeUETÝu]yÕ9™‘‘‘O¼™ñÞõƲÚj|PPPPPPPPPPPPPPPPPPPPPPPP`ì˜p ,vÛ©ÚfÏž}ÇZk­u¾ýÏ÷cº°¡6€ ä`³fͺqbbâÒ©²ï¾Ž}+‡(€(€(€Ù+ h=rbýõož1c­ßpЋyÓq'Ú.ºô3­o64À°l ?8ç¼ Š3Ï>·õ?êØ%Úló-~å~”|xÝu×ý˜{ïOfßzq(€(€(€c¥À ÷y“‹²þ|Ñ!/ÿ­`õ‡eClÀFØî¼ûÞâÝï;«X°ð+Õˆ¨ìXµûÜ,     d«À®³fMÜ¿øˆWÿFÎ à ¸cØ60~6pÙg¯*•}âŸx™kÍfdÛ¢Qp@@@ÑUÀ9*oxÚv ¼mùÝ€ëGY€‘ñƒêœ:ïÖÞwæüqΜ¹ßq-ßüÑmý¸3@@@ì˜3gÞÅ/yÙ¢•÷Ý¿x^±lÀ¦màšën)6Þx“ŸÓ¥8»¦£    Àh*à~]ÿÈ›?ùÑn¡ç<¢;Ø6€ Œ¶ ܳâbó-¶|Àµ‚ÛŽfKÈ]¡     d¡ÀÌ™k¿jÿ_ø+œÏÑv>©_êÀzµ /™3wî]ã6‘E—V!µTÑ2o[êþ®[žãŸ_÷7ù¯ªw¨WÎú\Î3˜ÖÃMiP«À®óJ·aÛ^[Îdž°ñ°Mî45Cñ`[«ü¯¶ÂÝÂ"·MNm»FÜ’¢Ý–¾Óoòo¯]Îúh†ð˜òG˜IPP 3Ü"ö?ÐØ&Ïñp<©gêÀš°ÍR¯¥Ö2kò†]\ìüa‚ë„óFâ.¸ @èTºãÈ6áÈ’v„ ŒŸ h‰µµ×žõK×îÔuí´iåôì(×.÷†(€(ÐwfÈù`×ñs< êÀš°¢°·ÓlÇ’q     üME{?¿‡›pbÈgÀ°ñ³ýºÖZk=Lí- QPP P`ÖìÙWœsÞŒ}eGlÀ°®m`ëm¶]éš&“‰ó2Ø8H…(€(€S`ÆZ3gþZkú5¿¨ uNcØ@S6°äÇýi5Ö|'íl”l”L$B@@Ç+°«~5oÊ!œalÀÆÓ4‹½f³§¡R€’‰D(€(€(ðxpЋÁáO‡“z§Þ±l )`lG.Û‘\$F@@¿)püQÇ.ùSS ùà cØ60¾6àš–‚6J6J&E(°Ø¥™ˆHG@ fΜùÁ·¿ëtÆ¿2q 6€ `Ø@Ï6°ÞìÙ¸Öqþh´}½ ¶¯òŽUæËÜÝNŽÕs³(€ã­À¬‰‰»®¸úºž".ãq¡î©{l0`£} ?Z*Ö(Àb"(€ã¥ÀÌ™ëƒU2üáé²Ï^UÌž=ûކ¬€mHH²AÈDǵ)Ç•|°%l`Úø°«ޓì@Ÿ.†(0j °8œ@6€ `MÙ;P/€`epD`úØq1@¡+Àâ¸6帒¶„ `ì@›u€`úÈq1@$`q8l`6pßý+[Ýæb·K.ÿB_Òþ÷§>ŸïeWD§½´ƒòþwùæ6K<;Ц€`Ø>r\ P XàeðÂ5°³“ÞúŽbóÍç{>ûÙQÛ¬Y³ŠwÞ¹6í;ìX¬±ÆµétÝ6Ú¨Xýõ£Ò*½Òn·`ATz7lTº7Þ¸£2l°Á¼bé…—ô<;ð žAv M; À°}ä¸  @ °€Å [®3Þ¶ö¦ãN*N9å”"ö³îº³Š+VÔ&¿þúë‹9sæÔ¦S‚#Ž8¢X´hQTZ%Úd“MŠeË–E¥w/ô¨t–á°Ãgž}.›D‹Ùh!¹Üfô˜# À°=>DœŽ(¡ìxCPIýÊØÇø€Í°¡ìO‘W¸lç÷˜5 À°=>DœŽ(¡,3(€á:ãmk,›aÙÏ"°™ÂgmËèôóÑ"o@‘W€o¨h¢!&l(ÆXväÔÎn€`ïèÌd*S³ŒNCB’  @& °ÀG |;éÕX6“fqPÅ`XvPO×A-XÀ¤W0á|l(ÆXv´ZÏžï€`›Ø+5îÚ³E’   ä¢ |ÄÀi°“^m€`siTΑØ£ŽYRì²ën¥Û!‡.ndVí}_p@¡­×wÓ0Îox ìÄ€l–Ë   @ °€É0o®9~vÀ°i´zÉ”b¤ö›w­(®þÊM­Í`Öþ¿ñ¶;NAàÇ>~i#y ºMj`“1j ‚(€Q€?tCÍõ°1Ù À¤QËç"# °þ{ÿ„{[+[Öø ÷AWð‚ï÷ô`¡óýó,¿¦ ¹_í›ÏƒÚï’*|~jÛš’{r×§ßö™lþ,pѯš|±-ßX6Ù†p8{€=ûœó‹-¶|òt7cýíGT½ŠÞZ}ûÝC0ÓêÿTÛ!v8ݰ®*HªúäP”¸€<,›úuX #Õžr–m°ìм´ 0Ö«Hi¬ öîÀ*ES}® §ÿ÷VitLiìü”»°i=Œý,Í<—ù]ý¼y£À8*ÀŽ$}Ôgª6À°ãØÆ¶¹ç±XÁ§à4ìþ«}ê.¬ýX¥óÓèøiï9£µÏØvÝ”S|'°ãó6˜ïnU;@`žwÊ4zv À° 6]£ÕØlÙ¸Ø:€õ¡€…Ç`ôï€ý:懠;z üQ§)Ú À¡‰Kù’l0±“u+¶.ÃUX%›²iS6_{hR3šÌ,ç¼X`'EØ¡L£g—,›s[Ù‡²5ÀÚxVÌjØ ØÖ‘ÕlÃj4¦UZ›yØO¯|üè­¥Oµ-i¸ ññÎ>5Ô’O‚ ° VJÆEÒ`>Nvô@!Õ›r·­°ì°]×à/þ‹›G寫­¶ÄýŠ£¯y]äÛöò™\°ð+S·VOÕŒÂá,Ä6þÕÆÀÚDNöíÏ,æëçÎXœšF ì2gD“½çöO¶ÚŽcÎìT­°ã ©5ê”gtí€`‡ål`ÝVØæ`öÊ©}3†U¦.#f*ï‘^™³XEP«ÖeÕ~MÊäGbýYˆ©Š¸ê»lØpŸþ/K›ZÛÀñÉð¥Ø >â—`ØdׇK­¡¥<£ •ƒ¬[€–_¬²:˜]ê"³û «l]\w©;gñÔyYl·ï™²1°Ýæ•Úyl–Ÿé)l¦—h±X€ucŠRkÔ)ÏèÖ À謯 *{¿ƒÙ3ܾm‡UÎÈë* «.£»ºm¤vßð¸etF¥`#­}’°#P‰ Ý Ào,60@`Øaù1ÀljãeCé4Ž÷f·-Êa ì¨@g“÷Àëm0øë°ƒ×|”¯À°ÀËá¥É†Ÿ¼òŒÒ°yìÏî_Yüî__Uüa÷=£¶×XãÏ8¸rÑÌe©lv8ù€ÚÉ߉Œ—-óǽãi ¶ÿïÄüÞ‰ì(#ƪ÷ÀŽO]âNX€`‡bÏßo½Ær;ì°Ã£¸ˆÏºëÎ*V¬XQ›òúë¯/æÌ™S›N Ž8âˆbÑ¢EQi•h“M6)–-[•^uóé´ ÏzÖîCµ•ÅÞäG€ß¨¥}ÔéàÖ*)\ÿÝ¡ÖGøîXkæÌ?°¬º“Âyæ+Àv®gT+À°Cü¦ëlÓÍ6/n[~÷ØÙØ<#°?¿ûÞâO[m3=ƒï¨iÝý¸ìSËï¤66v¡"°O}ÚvD`3ü1–ìø ;>u=ˆ;`Ø„ ¥l©€¦‰üÒ†d6&NYD`Óé°Ãgž}îÀßWݾGÖ›=Ûõ îy}ÓFý€.ÆÀ^é õHw^*kƆz^5‘Ó ›öû¾ê9`}ēΠ€Mºz²+ ÀÌ!¼Ï!{Ý’73fÌhu=ÛjëmÙ 1ïÖ¡Ï`ØNº1°½ûu뢬ZNçb­‹\Ú‰Þ¯•ƒë‘¼Z7ëмªœ…øê¯ÜThkê]8Îù°QÏÈH$j `ÝOVû¸íT¶l5ÐĽ~Xv ðÒ /)æn°A \×\sÍbÓM7?õüýË®¤ãì´èÞX€íµ ïìü2€uÀªes–ºcu–[c©W¸œäÛvò´*òj=€=ûœó‹-¶|ò*Û ÿö¶´¡£Ú^°˜}Þiõ+Õ‘MÜ‚{ò­s&Ù²Õà®ì€`ûÚøÞpËòb¯çì= «ÿøÿXÜtÓMÅøÃâ”SN)fÍšÕ:¦¨¬¢³÷¬x ¯åU' Çû`X¶V¼ƒ, `°®˜Zëu²ƒÓû•´€ ËÒw€ýØÇ/mµU§½çŒé6JðuÈ¡‹i³zèIÀöë±á|®_]wÝâëÇÖ™S?:è¥ßë€`ûÒøª»°&«±îÂsçÎ-þó?ÿ³®þöío»xå+_9 ¸ŠÒæ4Î-GpL¥Ì, ÀöÚ„wv¾{Ñn«­³³úž: €Ýe×Ýjaõ›w­(‘Õæƒ®Þ¹ßÿуÓûtLin¼íÎUÚ_ýoçûóD„Q^óó×ßÊ_­´ŠÛ5-?•-|÷[9²*ÛÊÊÙdÀöýù½ ‚®ßxãâî»ïfËL¶ùçqæÌuÇQ›lˆ,¯‹.ýL±™›aV¿Vk;ú裋ï}ï{Å~ô£ÊMKt(:kçìô;W\}]_àº÷LžO À°lómY†9&°‚?un7ù àSi‘ô xõ¿ÀRíƒÆÌZ÷㣎YÒJ§ÿ ,í|ÓùÙÕyjý1·–¿µ;º–m:_éu¾öé;æ·SäÊkßÐJcÇmŸÊ§ý!à6ÙÞ°>±Ã.2›/¸7°MDq‡mÊ\€í@ÂFìN·Ì„ß]xÇw,®½öÚâÇ?þqôvÞyç9øÝldzÑK åÛdƒI^½×u°,ÛHó•{&ÉlD†ï@A`ØX(€4€Xú¬ã] ¡ÔòX]ßÎÑ5•·A¯ Úÿ_@ê«ÎÓq‹Üª,~ú&Þ÷Uy4 °—;æþ@PþX–‡äo °ÝCº ŸôÖw¬Ò]øŒ3Î(~ò“Ÿtµýà?(N8á„UÆÇª;²®Óφ”¼»·n´`X–VØ)0[¡µè§ £ °9‹œúÝ}cÖïflê_ËRÁ«€×º[ú:˜îæ=_wNÃË5 °ì8Øyì=°ÝÁ‹º k9ëú{Øa‡µ†$üô§?íy[¾|¹[ûñ°é¼Õ-Y³×5ˆï®.­ À°±-ÔH§K`Õ½7Œž†ï˲ˆ¥Uµ.ÄU«ý‚9eYä¶® q¯«ó-:\ îGÀŽô³ÝŸ›`ØþXVž¹°Aºõjù×í¶Û®øüç?_ÜÿýoÊw§vš¾Ö³vß³¸æº[Ùf~ì‡#Òiž, ÀæÙ^6\êäVﶺIœü.¸ö.ô»ǬßmXùÙ$O!û‘]+['[ÖÝÙ° [x³;Úå½vóï$ë;IÜKZ€íÅ~Fí\6`Õ]xÖ¬‰Pjœ·¿ýíŸåoú¼sÎ9…f3žŽöþë«›1İ,;j-iW÷“ÀÚ2:‚;ëâ«1£6–Ô&M²cŠ,úÝŠëVùÛ¹–Öþ÷#²Jg“+tª ¬­gëÇÕß6áÛ•ü¤IwÅïôùª[»ü¯w›¾ë>—¹êvŸùu™Ä`Ø;—4l=ÀjÖ-¶Ør _øÂßúÖ·ŠŸýìgÛ´Ï›ßüæéñ¶é·¿ët¢±‚, À°ãÒ¶½Ï,V°hàg³ ”~ÄÕŽ…Ù2€µq¨:߯¡ú3[¾þuuMýïOÚäç£s4I“ò©«ý6Ñ“/Ì6 q§½jºIOâúw€àQp~Îs;N÷vV¥«¿Bu Ewuí #29t v«’ ^™9€`#ìql’°õ{Îy´àuÍ5×,N?ýôâç?ÿùÐ6­»ï¾ûNôÆájô­ÒÊq²nÂ*‡_~¥ñË%}tܺ+?âªc!ø›6óÝDÞ®ue¾~ã[K^°å¥êÎmMØÁR3¬qÿ`;Ø›o¾¹¸æškнèEÅ.»ìÒúû—¿üåÐ6­+¨žz; W¼û}g±‰:G, ÀŽ{ËÛº6Ñwt¿ ÕÏ·a€=ÕÙ“8©í§ ²bÚï|àˆ¶ËÜ"£1cFý.¿vÞdIæe]ƒËÊ[–Î@ÝÏV÷b‘Óª¼}8ö6Œ‡Å•F¡Ž–FÓ¸¬&J`¯¿þúbÇw¬Ü¾øÅ/ö ÜŸúÔ§ŠÍ7ß¼ÐwŽð®ºs[v gŸS€í`o¹å–âÖ[o-–.]ÚêÊûŠW¼¢¸çž{Їzhh›ÊôœçÀ¶㪼Û>;5fWÝŒ·öýÜç>w:¢*Õþƒ>¸•Æ6t}0¶nË~~Ú×T´·é|Øæ›h¶9€ýæ7¿YÜyçÅç>÷¹bçw.<ðÀâ»ßýnñðÃmûÎw¾Ó‚ê©6‹¹n|¬–ê¶Ñå¼z{©ÓhœV½N9唨muÖií®Køá‡ÿ÷_›Nùì°ÃÅvÛm•VéçÎ[,^¼8*½ž±º²vS†§»ÙÏÕ ¤Î®R9¾ÞìÙLšo°È1T€`e=u!¨€ à| 50Ü)BªãÖ¥Ø&J2£´®ÀÚo똀UNˆö[”Ó®¡ ˜,O;ÇÀ×®­ýþ„P:ן@©,]Ó/·ÑW›¹Øò÷£¿¼¸–ŸÒZwa~U¦ÉŠ÷ëV`ê«`SðiÝ‚°ª5èJïŸãlxì‚ .(n¿ývvŒI¶Hêº_}õÕÅ¿ÿû¿°Z«õÿ÷‹³Ï>»ØØM§nÅZ;ö׿þõÐ6E‡÷ØciÝév)®¸úºlœâTœó&Ê1®«nìê»ýó¿ÖxÚc—¼±xÅâWFç{è+/–¼ñ¸¨ô‡º8*]§e^9Í,ÀÔ`Øh€<†e0¦ýIZ8“þ×~[†Ç,\ç„KdžùØXÛï«Uví°|:Oê_§l‚¦²tÊËÊ–Ó"°a寭L?Ú¬ãeiÃ¥}Â7ëÖÆÅú‘MA­¢®6†ViüãoxÃZçÙ>`õ· µéHi¿ò»–IœoØîVÝ„}á ‹¹®{î&ë­÷8€½ë®»ZÙ×¼æ5­nÅŸùÌgŠGyd¨ÛÅ_Üzè‡.m‹yyqçÝ÷²t‚Æ`›€ò¨_ [#¶ñfº]†ìßÝM>[ÃèB\fHí&¨%wp1‹wpÊã’ÖM:ÕIÞuå`§6«*@m°6ópÀæ0öÕÊÀvòHÅ¥`ëÂ0ûùϾ8ä ƒŠM]·Å÷¯¾zq—ÁM&&JV3«ñµ×^[<ûÙÏ.ž÷¼çµ¢³¿ùÍo†¶iíÚO<±5.V«ewM9/ìà´nªÎÈ'¾Îظ¶·¡T,+SŠêB<*k]†c–ì©zΚØv݇u}v `ȩ߭8„UØRóe)YØzÇÌØ£Ýr9®:øs†T¸Ùåjö{ßû^¡õZ?úÑ[n¹eñ–·¼¥µnìoûÛ¡l¿ûÝïŠÿøÇÅ„oA,³×ÛASÀNë¦êŒ|âë €mMã²`ØžV]dýñ¤qf7üT«”Ùy1]©Ý¬ÂdW—ë`4³ªñª~·b«åw,ji“:•E`•ŸØŽi²'ÆÀvb¶ù§m`¶â‘ìŠêìQn|ù[ÖZ«®¶u°+V¬(4±ÒñÇ_lºé¦Å%—\2p€ýãÿXüõ¯mõFmÀØ`ÛÃÐ7ïZ1’ï‘q`v ~;à÷wSÏq*]ˆj­ýº˜óbNýËj«Ýå¾w›€1¥ëV€i3 Û,Ä>€ZVÇì¸Òû3 ûc`-??}ÙÒ<ýÓÚi¾×2¶ñg²I€ýóf›¿ßoÿâWn–ÛŸÐr-aâ§»gêf`ïw¯ëºã~ùË_nyµIœ4Öº[V{ß}÷?úÑ -½³Ï>û´º/_¾¼PT´ŸÛþð‡¸>ôÐCÅ5×\Óš-€,5帰՚_ý•›Š-¶|òHì÷ô`q¿½­ÐwS¶“C>lt3}ªKÙMàÈ¿ÀäŒký~ÁÂg¬dËKƒ'm±å#n™»£­¥}®»7týáf#€õ£ f¯tÿ/v[‹-÷zsc°íƦª±faÓïB¬(¬‰5X,Ë·*m§€Ùïôl¯ÑãÏo`íòW·þèïþõUÅ/™éV3î^tégºr$C€ýÄ'>Ql·îº…u!Ö=_î¶M×^»xÑGßøÆ7Zã\ëV]xúÓŸ—]vY±ÕV[¯{ÝëŠx xôÑGÝ®ùË_Šßÿþ÷Åm·ÝV|éK_j½;ØÁë€M` 6›Äo»³ãún2ßÔó`£Ûi×y§‘`Ѥˇ-O æE[ [­@°æˆ:}ÔmKÿºÚjû4$t7Ù¢usbxŽ èz·´E¿akù—F9úuM¶‰'`Õ<ú°þaŠÌþÆ-ñ [FcXΗ&)Úm÷=»Z:¦lc^ùÊé®ÄÙ׬±F¡H¬º?mà ‹sÏ=7`­?ûÙÏŠw¾ó-ýÈG>Ò‚Í^7ëŸÿüç¼Þ{ï½Å7ÞØšL €¸ší°éì(E~‡õ޵ë°ÑítS}AެD`}§³ìo²+Üvš;¶m®fÀÞ-¼°Í?uƒXÿ]ò‘Ÿ}n1Œñ²š¨hÇvîxÙ˜ªu`ΟßêJ¬1°êB¼•‹:»V¤5+ñs×Y§8dß}Ûv!¶¬ìƒ>ØŠÜrÈ!Å®»îÚêú/íf¸ª»ðÊ•+[á¯}íkl"c¥Øx€=ûœó‹}_pÀ*?|i½Õ]vÝ­µ}ìã—¶Ž)Ê©ÿÃñ³:×" §½çŒÇ×¹Ú¯sÝÛµõí_OùÛµ´_ãÖ Ô”¯Ê¢oS:uöRûý2é~,¿0í°Á³©ë°Ñí4- k`ë6èb|‡‹ÊéöMädZ£ °ýŠ|¦’/Ûü“6h€îb¼æŒâѽ¤x¨Ën½Ý:[r@÷ú§½;Z6¦ `5“ºÛ2:»Ë6ÛLGf·wûéOºµŒNÙØ2€Õ7šøª«®*vÚi§Ö²?ÿùÏ M¼³ýéOj«º!ëºßüæ7ØDÀ•l}äÛ„ öÔWûL;ÁŸ Qûì¸A¥Ò F-­öûãiõw4 @:fI+­Ž[‚WËS׳4ÄVV¥QY¬<þ5üò+_K«² ný{ëö–Úylt; ÀFKE¾¬&=r‘ɋݶ,ãmE'ÀìÅféâÛo3`‰À:»£ßvÖiþîy:hïŽV_ý¿vQIEF{Ýþê¢Ý¼Cþ2wƒâ·¯=¦øÅ-ËÒÅøÝï;«8ÌÏuüªöÖ[o-^÷ªW/]sÍU–Ñùÿ^ûÚVVãb÷Þe—®V«í½ï}o±Ùf›ïÿû ÁiÕfWu¾ÿþû[3+š›2ÀÞãf­¾óî{£ë!¶¾ROG¶>kp((¬RíTZÄT p-½@W›ý¯¼ªf8.ëB¬¼”§oOÐÚgçøy†eð¶  S·×NËÀF·þl´T$¬Qàbw¼»ž±UãG»qær>Ç9à+•MÙÔØ|öxêæ5Š›º¡æüì7Uv÷ËBáÞ©ã“O}W<µدýë…ºoâÖS,Ú,Ä;l±E«{ñf3g¶ÆÆF`Õ8Üt®"± .lu¬ú›"®ú<òÈ#­ÙµŠMu'žß7‘Ê€Í`ìBÜ55—Í8ÿÖš±ø—+/¼d Ëïš^æ® Bu{ÑEOÊSVØSN9¥xµ‹H¿ÿ O(ÙË:Ø÷žvZ«û±³Ë–-ku+^´hQñ“Ÿü¤ÕUX‘U:E^SX?~“›ÈKðæÿÀÖw«µÓQHgPhÝmý|°‚G¥ñ7?Žëܰûp6lsvÀFûl´T$D6 tâ@OÍL|¹;GËìLä$, Àæd¯ý.ë°ÆÀjfb-³ó7)J“ÙÄñNáU׬XM’¤5]ýì-·ÜRÌuÑוŠÂºåun¾ùæVdÔ_6«™ˆmûçý÷/6[o½â‡?üáô>;öḘ;wn¡ñ²ZÓUß)¬f~>Ç­ ¬1ÇU½&ØæÀ¡‰çcØyø i“5ù]€ëÖˆµ ™tnØý·›l8fÖïVleõ×y­ëBìѶÖýº>Ýj°ÑR‘zXuÖr:«ç¶Ö†íª>sX­Õ¸ãŽ;VÎ×ÿáÚ°e0)Ϙt©LÞdåålWÆÜÀIƒØ?mµMk9_\wKtô³I'«xí`5þô{îY¸)ÓQØ*€ýðÿý¿Å¾ÏzV«û±f$Ö¶è/(¶]}õâÍn<­íó¿7ÝtÓbùòå«DhS‰ÀÞàÆ2kŒ±[¤½¶+8 ÀúÏx õÇœ*u-¶¬Mždy¨û°Ò”­¿Nòä_WQ\ýÈâçg]–µO׳ÿ­‹²öé•Q_ë>ìÛõ»ÛùšJçÚdTM¾ãRÈ €n˜Øh©HˆìÔ²9roéÉÄ[ØD °'Ÿ|r±ù曯²½á oˆ^Ʀn]×ðøsŸûܶÀk ¨2éÜÔµ®<lOÀªyô`ÿè&ˆúõ»Nê°rêä`vÒmØw»‰À `µìÞn–b­»©ƒ¹»îºëqد~õ«Åönü¬@ws7ñÓIo|cqß}÷µöCnŸŽÝpà ƒXìí·ß¾Jt6€¸VE[ÙÿØØn½&°J6R+ƒ-IcåèùËãØqƒT™;GûÊ"µÚoËî”Ý·Í2ì/£cÝ–•_x-¢)[¶ŒNØ ÚÊÎ2:Í·g™åÀfVa7Q¬ ±ƒÖ»¦ÖzmºÝù.=ì= °‚UÁ¢™€VÌ?øÁ(xì`u­Y‚lÏf42ô`¿ßþÅÃn¦ßŸÈ ³Ý¬€uS× Xt½~­µŠ“O8a€ä.˜7¯u\ó<ê¶ã\·ão\l·õÖÅR÷ÿ•nÛËõ˜øéOºÊ&€U×eA«¿¥0Vp&Í4QS]v#°©A㨕§löà~ÜcÙ¸Ù~\'·<‰ÀF»l´T$D6 8j¡ÛŠ)~F`–ê¾kQXl³:¦ÈŠÒÀêÁ¯Ž]pÁÓ€®Žùù©›°Î Ï3€}Ï{ÞÓ:VÔÊ«ì\•Ëʯ2i«榎ëÇ·5òCFŠF>Œ25 °ZÓõgnÜcnW]y{Øc]àS\W`Ea7sÑT= а 4÷r³ a57û´Ö•=ðïÿ¾5‹±Àö™=ÿüó°š9Û6ÔS³+2Nà¤óXº×=·®À~Ô³“ó;I À–Û.Ý’°ÑR‘òU`$V0Žqõ££T9yÚwðÁ·6?‚¬ÀÒò¼ê¼Ã?¼œ:×ïB¬tÚ§ãÊS0kÇõ·ÎÕy:®¿}ÈÕÿÖ]YßMj]>lóp“Û‰ó—SÚ^ö+_ùJk)è+”ž~úé-€=ö¯(Þåþ×~m¯wº½ën|yÉrJ¿C«Ù‡µ)«uh5”¿¥°~]_ãÆ?k6âílßzï°l“ï ÔDIš¼iT»÷R'lt; ÀFKEBÈW‘XëBlQÔX€õáQÐiÚ`}˜ ÁÐàÔöûyj_¬Z´ ¨ë€³éã ¬ã>R€­ˆ^öž{î)žé–ØYæ`í·mçÀó?]Ï‹\4Öàõƒn¹Wp@kbµçï¶[±½ƒÙdµÏ«9¤¹Õ¶É&›šéX³‡[êëÀšs¬¥uî‘næ½8üœ[ÿ ¢Q>°Ñ¾- k8Ã'ñIPlÖ"¨6™“éŒX ­€Ð&†ò»(úÐ*HÖµ§á¸Øp ¬žA±uö£°ÃžªA€•6|بîνì»ßýîVôÕºoíUã]õ¿Àv¯í·o­kp*ÝnþüU VéwvãiuÌök_ûZ+šn¹,P’”PWÔU¬ °Ñ®…%²Ú¼èÔ$Djœ+‘Äd»ÔQ‰Ù¬ÁbÙäJݬÀÔ ¶]V°©´êâk]‰«´` fØÑ{6‰ÀÖ;¦1«Ù„ýu`5A“&qRöÛßþv1×ìJ¡Ç»ñ°tÑTÁ«ûù½5‰“އQÔ—í·_k'‹ÒìîöÔ§¶Ò*«µeµ®l¸°õu댓-±Îl€=?;J^6á*Ê`«ºÒ–uó ÇÀ†Ø2Ø,‹Àú×4еµ_ÛE`mìl»ÙŒ‡5‹1ØæŸP¶Þ9k°þð‡‹]¶Ù¦Øbà +ö»ßýnñ//|akYRÁì?<ñ‰…ÆÈ–EQ_ºï¾«¬ÆÁj&ã­6Ø øÖ·¾ÕØ›nº©¬–mßùÎw A´ Z³+Z{ã7×^{mñ¥/}©ÉýÜç>Wl´ÑF­žêÎ;HG]×{·›©Zcy]®Uoïh„F½ØÛ|;MŽ(P£›°‰Œ$ÀZw`Á¢f¶IšlV›ÄÉ&`R:üj»¬ÆÍ„Z:û¿ÀÚLÉþ¤OÚNâ4ŒudØæŸP¶ÞY-Ø}èCÅÎn©›=\w`EJ7qcZ«"°Ø×ºI›þÿ5לؽ]úw^qï½÷–n°ŠÒªû±f0~ë‰'¶"º:G«õaÕõ¸lK`à œÎLâTo½Àç¢ï m€m¾&G`óµ‘XƒE«èªºüú«ÿm&`›ý×¢«ek³Û´–·òh×…ØŸMXùª{³[×¹éÉšªò`›€ØzרsÏ=·ø‡­¶*vwàªñ«Ö¸À^zé¥Ånüª?îU]˺ÿÚ¾—<ÿùÅÓÝ;ZGV3ë™ðÓÀ ŽË¶Tvé…—T.¡ÀÖÛÞ áƒëQ'½ÚÛ|;MŽ(ÀækYl,ØY×Þvé=m×­·êÜ^"¥º^LÙbï³×tló0[ï°úû;íTœŒMU”´ `ï¼óÎb+×UXi¬û°ºoàºkáª.Àg½ï}­¥­ªŽ `5îVÙªm]ˆïYñ@+ªzЋ^R¬¹æŒV÷ävÛ1¯c«ñ8mW\}ݦªîz)ÎŽÆlóí49¢›¯ Œ4Àö vãr>Ûü ÀÖ;y>À~ò“Ÿ,žìÍ"\=Øõ 8jÒ&¥}µ[ëuûµ×.Nq“9½þ裋ïÿû]mXgÕDQUÛ ö¾ûWo×éÅ^ÿ´w-¸Ô>y«mŠÝvßs¬6ý ÇsõÏ95£Û|;MŽ(Àæk¬ëB8. JâÁ=¨l½ÓŽ}µ[õdAµˆªºïïÀtÑ>û·Ýv[kVaÁãÙgœQì?kÖtº÷»õ^wËZ=ÍÁçåîu®ÃZ·_»lÙ²âþçÚnÚÄI‘Øsλ ‰å4¨ŠÂŽãØM7Û€%;²Qxvpí7WB)˜Ä)aS`Ø‚lóO(Û9À^wÝuŖ믿J·`Áì8@Ýa³ÍZ³ûª{ïv®ë°º ëØen<ëóvÞ¹5^õ¤ãŽkÍ(¼·ƒÞ|ä#¥cX«Æ¶Úþ7Þ¸5ƒ±`¹Ý6,€ £Y]ú™Ò±°l½ýD£œl€m¾&G ›¯ °,ۇ瀭wŽËf!ÖøÔý]W`ëBüz×%XKãÜá¶Ü>mÓM§'yÒ¾7ß¼µü&Wúú׿^Ì9³5{ñþÏ~vk_§›öË_þr¡1¶í¶TÖwÀ5ñIo}G±Ý‚íÇrb"°õÏ\NÀFYW­O6º¡>Ã¥œˆNMB¨V€lÂÖÀ°lP¶Þ™®ZöŸÜLÝê ¬ šÖtã\7uccßï¾²Wz3ïäÖˆÕ¬þdKW¬@VÝŽÛMÆTvL{Í5×´–î©ÛR\vœ~¶þ™gûÈýÞØè†Ú5«É·åƒ½*Àöª`ŸÏŸl"uC½Þ9ã>ž4ÇûWݹM/ý^?“ÇÇ)ÀÖ;ÓUû™Ï|¦ØÊAë]S³k]ÖW¼øÅÅÖnÝV¬@VÑØ/|á {ß\pÁÅ® ñë\WâßüæŽßGØ«¯¾º¸ãŽ;j7¶¾Ž lZõ1Ⱥ‡k°Ñ®- k`ÇÁDØ|'ƒ`›BØzgº `o½õÖâ𗽬xßê/££1°Ï~úÓ‹Í\ãÿüÿ¨œ%X“8)‚«IÚÍ&\vL{ÕUW·ß~{íÀÖ×ñ Á€M«>Y÷ãp-6º`£¥"!‹ ¬À°î1 ;õ.`ëiØÍÜMZFçæ›on­á*€½é¦›Š9.⮫ÈèÅ_Üš¸j;ú5¯i-§³½‹â^vÙemÓ†y`µF³ºÇl_ûÚ×ZݘµôΗ¾ô¥Ö¹í6Ú¨5C0˺ÔÛASðÀNë¦êŒ|âë €v´Øh©HÀblÆciŒÀª»ºG-e¡%aæÎÝ z«;à\¼xqqýõ×·V2½ï}ï+6ž3§5U*iVàºåmt\³Ïu3ÈåûϸÊ9ýèG AgU>Ø+¯¼²U†˜ €wÀû +l:uÑïºÇüØh×¢ €pW;•-[ &£­¥}BçF¬¶mCy‘Mª %›ªm£\D`ãœéûî_Yuì’bu7I“@v]7~õôÓOo¬ºè~âŸX`ë–·±ã{º®Æk2'ÅU>¶ÿE{ï]ìºí¶•Kä`¯¸âŠäÆnD`ãêºßÐÀ¦Qý®çqÍ€nÉ›ØÉ9sæ>ú¦ãN*ØòÒàe‡¼¼˜={¶[¤€ D*À°‘¦2ÉØÎœénY^<óY{´ VÛ;ìP|êSŸjM¤dX-—»yæ™Åþ® ñKÀ¾óïœ>O»Áÿù?Å[O8¡4/uýýüç?ßêλ°Õu¿€M£úU¿ãž/í:4° >cå¸Û\Ž÷Ùg¯`£¶À~õ‰O,ît³€²å¥êÎmMÌBÌÓ0¥Û3½ôÂK ÷Ë÷4ÈvØa…f!®[—5<®ñ«›ÎšÕšÉx²G»|”æ…Ï}n«kñwL³‡ç `?ûÙ϶Ƶv²1¶»únÒA`‡_MÖ'y±l—ûÐø¾ Ø.Ÿšq>m ‚lMQ¾ƒÂœ6¶Á€í¾U·â%o<®xÂT·âu€¾ûÝï®]—5\·UKï,qck '»uaŸñ¤'{ì´Sk؛ݦÿÃs `Íll÷õݨ°Ã¯ƒ¦ê’|_—D`£h€¥ qôã’wÂÉ&Šïfï9ÕÛ2¶<5põwdv@)ÀöîLkß½ž³÷t4vë­·.þû¿ÿ»X¾|yÔ¦_î v[vGÀªmîšk¶ÆÆ jßâÖŠ]âf,öóÀ*2ûÕ¯~µ£ €í½¾{…vøuÐkr~u°ÑÞ À°ÑK¾ 绢yË·þ(y¢ °Í9Ó]ú™b£7™Ù—¼ä%-¸ÔøØ˜MA)Úúr²êR,xÕöèTWâË/¿|:¬þvº±ŒNsuÞ ÈŒ+ÀªËÜî{ì½=m»Ånn¼yÝ9»=k÷â©OÝ®6òÙqÇŠíl•Vé•v—gî•þ)O}ZTºNËð,W½ïÆÖ†qÝØ°,ý¸ä›€Í·î(y °ÍÂŒºŸôÖwLw+žá–É9ñÄ‹Ûo¿=z{ûÉ'Ož˜Xb•ÝeË-[3+/¬ÖŽ]¶lYÇÛlw ã °š%UKPÅÚ¬fúÖZÊuéÏ:ë¬bÂ=/uét|¿ýö+öÚk¯¨´J¿á†gœqFTz÷šJ×iž÷¼} ­EÝ© +=Ýà°,ý¸ä›€Í·î(y °ý™;ï¾·Øg¿ý§£±O~ò“ ­íª¥rb¶ÿéŸZc`- Û뺿öå/o/€ýä'?Y|ùË_îx`ûSç±À0Î{Ê)§±Ÿu×U¬X±¢6¹ÖdžãÖaŽùqÄÅ¢E‹b’¶Òl²É&-(ù`c>–á°Ã° ·¡= €`Ø \N`s©©<ÊÉKcªžØþÂŒºMn²éfÓ »Ï>ûW]uUkýØv[À^©õgÿîïZç `/½ôÒB0ÚÍöÅ/~±øÜç>×ÊGŽ·ÆñÆéz³6ó ·Ö2+¥Ø<œŠ.J À°Mù¢nʌնíÂ9e °yŒ.!§S€í Fbaîíï:½˜1c­,ª[ñÑG]h ªÍX7qYk‰ÝäPçœsNëumÀ^}õÕ]mŠÞxàÓ`­ˆqì½®7›`ØN¢ÀìÈ6Õî7ÉÕæõxw“¬ÛÛûxXíYÃËè87aµÉm‰Óû¤Û'aÇ4[–ìÀMxà ž†FAè‡>ô¡âÖ[o}Ü&€uÓmOƒk˜Nçj¦cER;ÝŽ=öØBKþLklâ°ñq¼. À°cêy4Ûl¦Q\¶ù‡!ÕØTk&Ïr°ìР튫¯+¶ØòÉÓ ûÌg>³øÂ¾Ph9ÛqËê(â*põ÷ÛßXÍZ|å•WFoZ£v«­¶š¾®–þÉivÓQ]€`ót,5 ÀÊ,‰À&øpZ‘Ø„+'â°ìÐÖ@ìÝï;«XËM̤Hèžð„⨣Žj-»£™†ë6ìE]T\qŵÛùçŸ_ì¶ÛnÓàºÕÖÛZògT€0·û`X6C¯!Í"' °ß¼kEqõWn*Ýn¼íÎFÚŸÓÞsF¡-·6@å%›æÕR°ýPu|ó`Ø$½{Vk÷= Á1ŽC:3U°,;¾ÎGÃwž=À*Šj‘Y®ßV阺+Úêw?V÷aEzíu[¶ü´©µlÃOBÂÙ° WN†E`Ød¡N‘ÒÃõ4„jÆàÓN;íq{Á—_~yk[ºti±çž{NŸ³Ùf›K/¼$Ù{LÍ™dyX€ÍÐkH³ÈY¬ÀU ©±²ûø¥­nÆ~÷bë~¬}J£o¥±÷uÙUzK«c)w/`Ó| úQ*¶ªŽož,›<Üi†àgì°Ó4”n·Ýv­nÃ×_}1oÞ¼â£ýhk&âC=t•eqN:åÅ}÷¯Lþþ )] €`Øñu>¾ólöìsÎ_Fmb#ª"©ú߀Ôë¸àOû|€-Ë/¥÷~X¶á'!áìØ„+'â°l6€wÎy´&`R·bm/vKìÌ;·Ðz®YÛÿ¿´ÐZ³)7Ú”íÑ€`Ø ½†æ‹|¹Ër¢Çl³تq±TE[ `ÃnÅ:nûüêå`X6»¦²î`UŽm§ V0 ÀfÚ“¨a€í‡}’'   ¤« ÀŽ:0¦v, À¦Û&°dݬŠxÛ´ ÏH¬º§Ü ¸—¶€à“Æ¥PFO€í¥æÜÎí€`ØÑkK»¸£^V—;Ímç.XøŒ•¼‡;[3¶‹'†SPPÀ`ókø†ÝðrýÞlFûê×S¼é¸“Fb;ü•GFÝÇnnù§ãŽ;.Ž^]ªu×U¬X±¢6ýõ×__Ì™3§6qÄE'ð¸É&›Ë–-‹ÊÛ½S£ÒuZ†ý÷? Øgßý£4NÁ¦f¬µÖœpÛ©lm5XÙ€N÷Í7ïw¼“{{'C?v|üpõõ_4>·Ë¢À``ókø¾y׊ÒÙkm¾QíÚÕo—^xI60RDKÞx\qðK‰ºìë_ÿú(ÈS"ö1©ž÷¼}ØÑ„á^öÎK¸oÎ°Žƒ`ãã¦p•ù®ênÁP AØüVëá… À÷ºÂüGyvÈAk9.× ŸrÊ)l‡QàÃ[\œyö¹ÙÌ¿ÞìÙ¸&J>Ÿö ôÒ…XA} ]ˆókÃõÎ`ÇçõÀŽO]s§T€\ã'è|ö ,ìàê¬×ºâü¿Õû»wÚ…€`ƒ8ØKu °¯û¸âŽô$N£üþ`û° ójì0ÕçÚ#«@¿V ‹[#$ðòÿ·ýê«cepgût^x¼*?åkù)ïNA»VÙ¹–oÙ}ؾª² `Ï>çüUîÿ¿0Ϫku°U‡š~ÿG>®ŽÊêÆÀVÕƒéPV¯í´òíÅ®­ï°>ªl¦Óú&ý`~`Ø‘mH»»±nÖ‡W]u «÷õ¾/8 ÕóG›þfIoïM¶»‡&dzØk-Ý2Ó}ªnú °—T£§¿Ýe‹þímÓP鳯Ñí;äÐÅÓ §þÿØÇ/måU–Ÿ`ÄŽéÛ Ñ EÓòk´X#mçêZ–ÖöÙ·Òú°¥ýÚç§Óq5þ*‡îÝîÑ.¼¿ØkuÒ…XÐlúÛ·éëÿoõ£zïÙêÆ¿×²zuô5xViUvÿ¦›lÅ~œ°û°ò•ý ˜öæ`5­ À¦ë ¥dݬ ºÐ+mßÖÚ/µö£¡Ú¿ýkú]1ù5 °êNîÛÅP š‹–+ÀbM* ˜àãÀ 2 |ÔðùðþŠŽ­4À³ÍТzR??5²>)ÿx;€µ¼…ÔõTf+ŸòôlS¾:Ç J eÿ+ÿ¸Ò”u!6³k*]̵bÖÊéGAu©æœè>Cm”οg¥±rÚUõꃿiaeÐw;­ ¸­ÎCgI×6•g*-P­r>X–Fw–ºÿ&zÔ¤ï¶ö|ûm–ßë©ÓÞIaÿýQ×CÈ®e=Œü¿ÛõбÞAaYuOv_M ÷iâ ì2gG“=Ú§÷I¶OÂŽi¶ìTÅ`ýˆk]÷×2€õ'¿ÑóaΠÑïz;öÔ¢ÅeNÉ5дÈbÙ}ÕÝK;¨ÄùåÔ¶»VUCiPêë¡H¦µ(¸E·ý ªº‰…÷VW¯~Ôi‚´Ê`åõÛî)Þqøõ>Ç{`Ø1õ7úyÛ}ز¶/|ÿtÛ;É~(ö{MÙ¦öC§ßþË—ðÛ.k³ýFjüžPa¤ØztY™ý¼•¿ßãË÷]š~ç°ý|,ÒÊ€M«>r/ ›ÀZwRW£o¿Ñá3`ýÆÐþŽébB™ß`•A°ß˜ÖAY;X éíæZU«E¼CMüFÛ¢´¾cPU^»NÀʉ°kÊPÄÕÀ†d???ºmðêwV^áýÐ-ý(, Àæî8$Xþ¾lD†mML¡v=nÂ,ý¨ßnXo›`ý^Ÿö°µËÖþ‡?vZï û¡ÖÚËðä¦áUù° >M}*Û'aÇ4[6€ »ÙÖE-c¶ÛÉ%ê"°a¾~ÃÝ4Àvz­v‚i•þÒ®ÿ»‰ÀZ”ÔóN"°ö ºEýîÂu‘Þ~8äÙ °ì˜úý¼í¡l¯½“ *ChŒÀú?x‡C”ÂbõC§?–×~@·<Âo?ßýl?‹´ò`ÓªÜKÀ&°þxÉ^#°Ö@ù ?.Fp呟.lüôë¬\8È~¹ éõó+ƒñË¢­Ý\«ª¡-ƒI¥µ_¹mŒ°4²û·c¡–ÊË[Õ5Ü Óÿ%½€5'ÂïöFÃÃ:ìf¶é~:'äýxè`ØÜ‡ËŸÀ†½ˆ:íä÷2 ëwuö{ñX{À&há#P$v*1¡[`XëòcѶ^#°l6&Æ,в_hCèôÇÓXÃæ«4~7ܘ¬?3¯5øeÛ͵Ú“?æÇô°û õÖÿv_VkèUVëŠU×…ØÎ±zí`ý.ÍV^ÿÚþ¬Ê~7e ±™Hi¿t`Ø„|€Q)J_Ö"˜6„£ìÝPÖS§ÓÞI–¯M¤h?H†ícÙØN"°ízY•uQî×».Ä£òøÅݧ©â`°~Ô΃ð×Z›W–?  Ò‡i•¦l=а¡QžjØêf1,k üò”å«F¶lLmx­²{WÞ~¶ÝL‡VŽ˜kÕ5´yüùù•éîÓ9a÷®˜zõÏ+[ãÖ/³ŸŸ@?t˜B`– (ÕE; ëtáøà €`ãšgRu @ßÖ~ôõÛ-½í‡Înz …sØ{Øz6ùËÌYÛc˺…c`;Øp¥ðýO¶Ë#i´l´T$ŒP€Àƒ„QÑ:ü•Ü"²ƒ˜XcT4Lñ>X6¢m&Ig ô`õ.ñgæµ^1ö#c7=†|€ gö'ä ׉gÐج«{ »û½{ØÎŒÔq °q:‘*N€]e™šþ˜2©ñ­Úr™×föï«]¶H3üRX6®y&U `õþ´µVÕã¥lÎNz …=x”gY)ëj«È©]ÓïÍSÖ¨®·õìÒõTfÿ^ŽY± ‰ö„Iœ:°òÌ“°™W`bÅ`Ø‘Ø&Rò>ÜC°lb~À°‹s¥+À¼ 10€‡wÔ ï€íÑò3:€Í¨²2(* À°îWõA6Ø\k¼õ`Ø |ƒAq…»˜|Û^>l¦íÛ‹Ùçwn¯z~wL‰û¥ Ào™6ü€pž À°ýjÐ3Í€ã6€Íô©¥Ø(0dX€cç<°ìÛýÔ.ÀŽqÀ¦ö8RÈC€]`5qCnà„.ƒ½Ül$¶¼,›‡{0°R°ì YÛ2—ÏdCy‘  @ œ‘pÙZ´™3×yð¶åwu4RÓøÛ,·šÕQ[|H[ï¯.-ÇÝv6À°mÜÒ¿ À6°Ç;sïuB°ôŸJˆ(€¦ûhk*}M¯¯éôS²~”GP³ÌŽ­ñ—Š”#_HÀ.^¼¸X¶lYÔ¶îºë_|qmÚ³Î:«˜˜˜¨M§ëî·ß~Å^{í•Vé7ÜpÃâŒ3ΈJïÞ©Qé:-Ãóž·OqæÙç&ó>ª{×›=û§Å|ZÙZX¶)€­56   ÀH)0î{ö9ç·ÖwÊô¿Ö¥«sÔu|ØåÑõ¥Ó î—ëä ©íêné…—»ï±gô¶å–[ÏÜm÷Úô»>s7÷ oY›N×~úÂ…ÅSžú´¨´J¿å“·*vÞå™Qé7Û|ó¨t–áY®W\}]6Ïí"°,ý¸P<Æ`‰´.²Š6î²ënÝú֦Ȭ"•ûø¥«8:Ç Në\EJuŽÒ‡cG혎ëoßÑWÞšÐ!tþ«Êcét]+g˜g]™Ô}Øï6­û´üÂòëº&TòcõÚ´ °Ñ. À°Ñ QP€†Å(‚Š8 î paw[¥1HUú_§È­¾ý¨®òR¨†×4-s$ËÊ£tÊÊSl×·<êÊd@í§·ò똿aÚ¦^ò¢°Ñ±6ÚÅ`X6úqÉ7á Wô#ó->%G4÷lY÷ذˮ ÓÒ°Û±`1ŒP*½EU}Ø•£æ§ÿË"°æÔ‡å)ëölyÚXÞº2ùPZ–ŸŠÖ†Ý¬ŽÑê’ºlÒØè¶€`ØèÇ%ß„ó]Ñõ°óAhPöñã]ËÆœú «è¤ßý¶¬[°Ò !õ»$[´µ ËSõÓµ+“œU?º«î¡“ò6é “p… äelt À°lôã’oB6ߺ£ä +À>~‚¢2€õgí #ªukù ý-Ö16ÀÍ bíŠtÔk?l€nðØ'm±å#Ö‹‡ïÇz3å°½ý]§³gÏ`£—|°ùÖ]Š%W—t>NqØ*ø g!¶ådÊ¢•ÊÃÈj ƒNué ·S§1Ø2  »%·+S­Ôºm§÷CzÀ ]`£]‹&vÞ¬Y³n±å§Áœ9sŽŠ¶–ö ñi²Ù°ýPu|ó¼|o}Õ;w€-›a×&qúëÁÚÄH>ß·ýÖeXyjŸ¥±ÿ•ŸMääoÙ$Q~þeåñ'²‰¡ÂnÍ~7æ°L!”úùiL¬?‰“?S3à1ºàÑTÝÊöÃY¸›Ê{ÐùèyífMh=?á{bÐeÖõØhï¢ €¾ GZ+ÝÝ-é;ÌøæØŒ+/Á¢k\""°-'3ß)€ó—Ñ1GÐf¶‰’l¿E;möá²et옮¥ôþ²<:æÃgèx–•G€ ëX~á2:ueÒ}ûOùù)_R©²‰®†åsÝôº¬ þ ê­k7Û@–¿¬gG§yT¥ïÇ}6U6åÀF»g¸”Ñ©IˆÕ ,s‡&(MØ4ë%×R°S57îXëâBi™CgËá„Çúé¬vëX6U&Ó§Ûrp^úÀÙt `û5ÑXÌ{¡L¿¦ž¿²¼‡¡o'6ÀæêQîŒ`®<6áÊɰh,»Jß0‚Yå8–u ì§³Ú‰ãvk޹§ºüíáAuçp|ü Õ¯ó²IÇün¸êQ ÛÔf€¨ãeöª}vnLª·€ìÔ¢¥á3ªÿÃëù½*ô·•Ißþ½èܰ;´å¦-{þT6]»ì½¡û±r•W/ ¿Ü¦‘ )hâùîdz ÀfèQäÜ`®A6áÊɰh,;í¨ÊA­¯Vå`ËÔ¹íÖq퇓X—gSe*sàë®ÍqÖº¹†ëëÇûÁǺÕ˜†]Õmâ4³'çͶý=s¯´Ö·ŸÆ†öìza÷xƒn\µD–MΦô6†¼ê½aãâí^mŒ¹•ÙîMe²¼”Æ`݆6è˜Êc°ïß§þî6BÜÏg€ÍÐ#¢È¹+À&\ƒl•“aÑXv•HK?:òo ·ú·l8q˜A›õ'ó—ª’f7F-Y¦gYb³î§7€¶}Í´ÿUõÇÄû½+ʆTMð‚t˜o»ÉãT–pñvîÔì €ÍÐ#¢È¹+À&\ƒl•“aÑX€ãEäSsúG©<6±X89𥍾}È —‚êdò°2€-ƒÀ²kø“" ²Éõ#ÈÖU·“¡aÚ°|º·°×†ȾV¡m06Cƒ"£@`û«oO¹°=ÉÇÉ, À°Ø@lÀ¢šecamœª±~7\ƒÖ°ûpàWl8N´ $ÃY}ýrû Ú4À†× —¶²ÙÇC`ñgPØ„M€M¸r2, À/}€—:Ðàøèw§6À »ë–-YÚƒuîtò°*€ ' ËÂ`˜O°±šÅD`ÃI ÊºÛ{?š ÀfèqPdè¯lõí)w¶'ù8™l¹ Œû2:Õèu<Ø:#˜}¶$“qÕ>¿+­E^«ºØV­—là©ülb#wjùk–ÖÝÙα ü‰£,ŠkùYäTù„ª\U“?)¯Œ îíºÖµÙÊêíôR¹ýq»íf@¶½3 ®;pÉã/ÀÆkEÊzˆÀ%Kèƒ øk@jÀe¨4¶iaºlßÄB¸ Ûy~´Òf¶k…ìww6˜õ—ͱ‡íZ~~ºžgX¶ð¼²±Òø:øPó5òË.ñ3lxÕõØzd*ÅÍî{^tj¢@µlÂÖ¡‡ü®„ËGÑòR€`—>ÀK 4elĵ½^UKÀTu¥¹NUžáxS?šiÇ:ÁNÒÖ•»]^M^§®M`£¡.¥‚3|P WØ^ìóù}ΟìÇG€`Xl 1°nƃZW9õñ¤MAå ó`£)6Z*Ö(Àb"(0& °,ð’¼ ÒÉæZiFiÕUÖ_ûµßõ¤kååì·.½äÀF{Ql´T$`±@)À°,‹ `Ø@Ã6ÀF;Yl´T$`±@)p*2<¦³§‰ê%¹Ô)6€ ËØhï€–Š„5 éŽ3!f‚(0> °8ºÃrt¹.¶‡ Œž °Ñþ- QPPÀS€=( N±l`X6ÀF»l´T$D@@–qo {–³Ìu5l -`£] 6Z*¢     À°Ø6€ ôÁØh€–Š„(¯3\ÑÏ·ø”ÒT€.ÄiE/ˆ&QØ6³ °Ñm=- Q _滢ëaçƒ(Р,ÎrÎÎ2eÇ~±´l€n Øh©Hˆù*Àæ[w”Õ¥œˆNMB¨V@;‰@i*À¦Y/¹– €ª96-çgœúÀ°œm€ÍÕ-¢Ü+À&\yl•“aÑX–5 û°dÎŽ7e±Þm€ÍÐ#¢È¹+À&\ƒl•“aÑX€`±lhØØ ="Šœ»lÂ5À&\9 €`q\v\‰^õ½BC4ÌÝØ ="Šœ»lÂ5À&\9 €`XÀ°†m€ÍÐ#¢È¹+À&\ƒl•“aÑXǵaÇ5÷Èå'ú‰ ônl†EÎ]6á`®œ ‹À°,‹ `Ø@Ã6ÀfèQäÜ`®A6áÊɰh,‹ãÚ°ãJôª÷è¢aî6ÀF{DÑ)Iˆí`¶6áÊɰh, À°Ø6€ 4ll´Gt—K9/:5 Q Z6aëÐC~GÂå£hy)À°8® ;®¹GŽ(?ÑOl w`£¡.¥‚3|P WØ^ìóù3úœ?Ù, À°Ø6€ 4ll´#ÀFKEÂXLÆD€ÅqmØq%zÕ{ô Ñ0w`£½(6Z*°Ø  €`X€Å°l a`£,6Z*°Ø  €XŒ )0sæ:Þ¶ünœ¸†¸Ü£(”ŸH 6€ tcl´wÀFKEÂrÇ'P PÆF'µ'•s°l(³6Ú}`£¥"!     x °8¡€6€ `MÙíb°ÑR‘PPXºLÓeÀ°>Øíb°ÑR‘PPX×>8®MEpȇh 6¯ °Ñ.- Q _&\ÑÏÈ·ø”ÒT€.Äù:Š8ùÔ6€ ¤flt[ÀFKEBÈWù®èzØù  4¨‹œšLy°Il _`£h6Z*¢@¾ °ùÖ%OX6_G'ŸºÃ°Ôl€nðØh©Hˆù*Àæ[w)–\öÄÇ)Àâ§æSlÈרh×€–Š„5 àÓ&l"l•“aÑŠ ËÜ—"°ù:Š8ùÔ6€ ¤fltS ÀFKE–¹ã“¨”¦lšõ’k©Ø©š`q€Ss€)6‰ äkl´[t¤K9#:5 Q Z6aë`®œ ‹À°,¥ÃR:Ø6€ 4ll†EÎ]6á`®œ ‹À°8® ;®DÍòšQwÔ]S6ÀfèQäÜ`®A6áÊɰh, À°Ø6€ 4ll†EÎ]6á`®œ ‹À°8® ;®MEpȇh 6¯ °zD9wØ„k€M¸r2, À°,6€ ` Û›¡GD‘sW€M¸Ø„+'â°,ŽkÃŽ+Q³|£fÔu×” °zD9wØ„k€M¸r2, À°,6€ ` Û›¡GD‘sW€M¸Ø„+'â°,ŽkÃŽkSò!ˆ äkl´G4/:% Q ½lÂÀ&\9 €`XÀ°†m€öˆîr)åÛòA^`{U°çO¸¼UA|P  XǵaÇ•¨Y¾Q3ꎺkÊØhe­ ‰Àb(€N€`XlÀ¶6ÚÇ`£¥"aD`1Xǵaǵ©ù Äòµ6Ú‹`£¥"!‹   H€`XlÀ¶6ÚÉ`£¥"!‹   Hƒá1fÎ\çÁÛ–ß×°G)ßuGÝaÝÛí]°ÑR‘°F}Üñ¨„(€c£Û½£†“‹vØ6€ ¬jl´ûÀFKEB@@ð`qÀqÀ±lhÊØh€–Š„(€(€(ÀÒeš.ÓØ6€ ôÁØh€–Š„(€(€(Àâ¸öÁqm*‚C>D±|m€v1Øh©Hˆù*0ኾ4ßâSrHSºçë(âäSwØ6š °Ñm=- Q _滢ëaçƒ(Р,pj0åÁ&±|m€n Øh©Hˆù*Àæ[w”D±|m€v²Øh©HÀb(€¬g3g®óàmËïæ€9lÀ°žm€v²Øh©HÀb(€R`S€Í7ÒA”ŠºÃ°Ôl€ö.Øh©HX£À®îø TB@±Q€ÅNͦ<Ø$6¯ °Ñî- QPPÀS€Í×QÄɧî°l 5`£] 6Z*¢    Ûó8¯ÔœFÊÈ`Ø@ 6ÀF»l´T$D@@€e²lÀú`l´‹ÀFKEBÈW WôËó->%G4  1Q›¢6”;ÄFÃØè¶€–Š„(¯ó]Ñõ°óAhPv4œFœêÀR°6º`£¥"! ä«›oÝQò„`qzSpz)vˆ Œ† °Ñ >- Q _Ø|ë.Å’”b¡†Q&v4œFœêÀR°6º%`£¥"aû¸ã¬›¨™°‰VL¦Å*2-wãÅ`qzSpz)vˆ Œ† °ÑÍ4- kXæŽO¢Rš °iÖK®¥`§j€ §çŸzİl€v‹ˆšEKEB6_`ó­»KÀ°,¡Ñ‡%4Rp ) ‡ ÏØ]Ê4â M¸‚Ø„+'â°, ÀbØ6а °zD9wØ„k€M¸r2, Àâ¸6ì¸õ^Ô íÑ>`3ôˆ(rî ° × ›pådX4€`XlÀ¶6Cˆ"ç®›p ° WN†E`X׆×T"@”ƒh$60<`3ôˆ(rî ° × ›pådX4€`XlÀ¶6Cˆ"ç®›p ° WN†E`X׆W¢^Ëz¡=Ú§bl†EÎ]6á`®œ ‹À°,‹ `Ø@Ã6ÀF{D“Ñ)Iˆí`¶6áÊɰh,‹ãÚ°ãšJˆrĆgl´G´Â¥”oËzU€íUÁ>ž?Ïå½Òmª¤n¶ãkÊ6Ã_ÚeÞ*ù·¯—Ôô`X€Å°l a`£=aìÍ]úòWå·¶ûÈïêÆ_Ö9äß^»ÔôMF[ ®À®S¤JêtÛ6¢´äß^¤QÒga„=ŒE’™3×yð¶åwãÄ5ìÄ^íÑž °Ñ®ƒüÒN}YK/¬îCþí5}êìã(€(0: °Ãsôp²ÑÀFÍØÑñ¸@@HRzÔhî›Æ†gl’M=…B@ìØzª›Jv§ÀýW€ž£‡“öØ60j6Àö¿Ýæ (€(0,NwÖ6ˆÏ‰î"×âB\#?XèQs ¹lž °ùù”PbP *ØØZÃtìð=œl´Ç°Q³v ‰ÁÞòyîrkö’\ PÀ`±…$`q GÍæ~°il`x6À&Ñ´ª¢ö·m\P= Ë‚4—¹ý½ö>ÔõvÔ r@UVä¡S¼þÖq{!è׿ôð÷éogçà%#°ÊÛèoÿ\ÿÅ |ýréeÅg„`‡çèád£=6€ Œš °#ä ÄÝŠù¯~jí+ÜúŒeiã®ò·Tl§Š‘T  `µO»ºFèou“ÐG©6|9h¿>¸Ö¥Â€ÔØX?_kÇìÜ£§®m×Rz][ûùÅ«AH%+zÔhî›Æ†gl*­ûÀÊ!ÔüU]T¾§Sä;ÚÇ¢µaÁä[j+ Ø>7Ÿ6Ø2ßÔò,»–ö)_23.4J ”ì¤wƒaÔÔÿÕÊ èïª_¹ìÅayévé¨úåÌ^a´v”ê€{q °Ãsôp²ÑÀFÍرs-äkúAëµö,›Å‚$öí÷,”j¾²ò·c>À†~­Î1x¶<}PÕ1c½Ç®²¸aèU2€õE t=pöðú/{XÃò(塾­;‡Em-½=Äá·°–O¯÷Ìù *Àâ@šÍý`ÓØÀðl€M°¡ïo‘,âj (U~êd¶a@Ä÷kUBËÇ‚/òC ñ£»Jgk½ ý t ?ê+ÿÕ‡bƒ[&ê¯Mû+Ð)ÀÚ‹@5XZë¶×ÿÊ»êãƒðWÇøÞ;eׂؕ2©µ\ýnÅíVÇtp̬Ïe%%;–æÉM7©€úè‡KÝø¿$é¡öûñëÚê¡—@ÙÌiŠÀêÁ׎s ó²|ì~tÜÖìRþþ Ee_0Mê@^CV€Å5šûÁ¦±áÙ;äF}x—†ë¼Z·^Áf蛆KDÆ‘3µÞ†þ<-~WfS"6Gvx6•QP Øá9z8Ùh `£fl3ms†¹RÕ£/ z*ÃùWt{·6±’¾ý†uXåa½íš6æÕïFLâ ‰"£  @[XèQs ¹lž °cët(ÒYݨ¶‹z >ÕÛ¯¬waÙyá>ç÷`T9²Ê3LKôulÍ“G)fMLÜuÅÕ×8|ÃsøÐí±l`Tl€)›A@@ô˜9sæßþ®Ó؇p GÅæ>°el`x6À¦×ÎS"@@5Ž?êØ%ÂážÃ‡öh `£b®Ô8H>(€(€(€(Ð7pЋç‰û°lŽ Üy÷½ÅZk­õpßZ+2F@@@§À®[o³íJ¾á8|èŽîØ60*6pÍu·? eE@&Ý­ø <çrgšµÍfxÓÌnþ”ä¹Üål£Àß­¹æïîYñã`‹ `Ø6е ,yãqZc5ßIƒ‹(€(0: üü—s¹3­Í.í/ Ë}PÎ Ü/æ—žyö¹];-£=à>ˆ„aØ6н l¶ù¿rÍÌB[@@ÑQ@ X¶ÎUêwª@üôÔ Mù:R`ÑÞÏßïa·î7´C;lg¸mùÝ…Öï¨å!ñ P0â¼A_t ®'?Y>>9&+Œ[]rõBÑVÕT–Ö¾cØ¿Žòñ?]µ4úö?áÓJŸcWè‘3ªohƬY÷ËgŒ{@°lèÎñêß̘1ãM ¶KdÕ¼‚¬Ðlþ*ùç(ßÚ†ÎÅÞüâÐŽ=—t(¬eЧ_Ádðö‹˜^,U«ަ¦WäS›þÖù1«ˆ©u_Ö9:×麖Fyë?Âje4q5&–_š’5µ® vÐ{þã¯pÞºsÞРݰl`\m@³¯½ö¬_ºÖgF×-'ö[ù}ønq*‡~oÌY ö܉QŠ4Y) £ö'?xêEâÿÂ#¸¬êÚvÛUºØñ´á¯Hძ~Aò_re+Žç¬*iÜ «Ù#5‹ä¸:aÜ7‚ `Ø@ç6ðœçîóÐ:ë¬óòqoC¿&Ê&áTO@h¬÷ `Úÿ´ë%(0”{ÀÔùåÔ¾²ŒÚ¯sôÑ·Ò„× ýg·stÌ;úÛ÷u•®êî$þP¼î!пK°^,U¿ÞXý¨h'k%ÖÃ¥­ `µß>zX¨¶¯ì—(=Üþ9Ý©ÂY©)°p‹-·úÅ}÷¯b™‰À°l Ö>rÁEœ3gîu©5f”çq ”ùrƒê (ð“ßh¾¬þ60Ô>ý/Øzú€©rÛæ÷@Ô>ýoÇ|_Wùi¿†äÓ €õ‘ïêû¸Úç÷’”/®ó-;Ï‚;~9í˜]ߘoä°Ín¬ `µÏ~ °G«=ì±P;׺Û‹¢Ýç?„áKÏ«°#g¦«­6kÖ¬—ì¾çä/‰Bt…@34ðq²õØqð*®Ãéûª'ßÇdO@ƒÉP¥É)Pô‡Ï•õô#ǹæƒ*âêû¬¨>\¨Ç¬ÒTnüùùé:þCW »A”u!î¤ìu÷ÆñÄØ`ƒ Þõ²EÿòëqrĸWÀÀ°x¸á–åÅ“¶Ør…k¾æ'Ö„QœrÂèà {VE&ýÈgè·Võ´rW¥/ëÊëûÜuØ2€µ<ý^“Ò~9ÊÀ{D¬R‹bú¿†ißäÔ]êoA«ªu/¶‡Gçùk“;•‰äÿ"ä?„–ÖºIø×òí H•€ÍÚë ïÆÃž°ãN;ÿòžÔv!Ãé‹wúÐ ­°l w¸ð—ÿaÞ¼ ¼Ö·¥ ¥ßf>¦Š5Èž€ÃXßom `ÍAZûù ÀÈ(Pö‹u“°¾ö~¤ÔÒ[7a›ÝÌë£_¾pv'ŽŸÕuí—$ƒY¿{²u=¶<«&™ÊâFS`bbý6ßbË.ûìU@,cá°lsÐüo>þäGŸ8gÎ-j"h+³R >6螀6ù’/ZY4Õº6›Qº‰À†A¤¨m,­•-¼fX¦ªŠ$ÈÊ@(, tjØz0꺗ýzUõ‹VÕõ«ÆÌúùÄŒ«Õƒ]ö"¢æGSmgÏž}‡–ØQ—±Ü#”Ÿè6€ `ÛÀ™gŸ[Ìš˜xx½õÖ?k4›º‘¿«p.–¦{*ØNjd¢Ú0:[Cðl¾¯ßKPé•¶]/À€µ¥&•ŸÝ§µÀŽ«Ì¼ñVåÐÇÊ\6Ž·l§?zä-l¦nîÊ6Efµ•=ÀÝä«s:aûªÛkq^¾ ´îºëþD +GFëþávaØ@.6pÅÕ×G»äOëÍžýˆVr©k¾æåÛ„}ÉÃ(¤i²'`¢¡àþì¾úÛÀ0,C¸ÔOØ °,rìû±ÖƒÑ&<Õ±0Oÿš”ñÓÔú÷¤|•Öï5é¯29ulì FO×ÚÍéÁ°q°ö+PS¿ò(ߨ¼ôÐûl7÷Â9y+p™µÖZëá­·Ùv¥€öMÇTh{Æ;=²`á3V²¡6€ `yÙÀâ#^ó{½Çõ­º[{íu~ë"®w¹æêx·Íϻ٢ôS~žà«ì3¨ž€òƒ«zÆú¡u•éÙkw=åSwͲ²j_Ùy½©êî‰ã(€(€ *°«Ëë ·:µ-vß“lh€ `Ø@v6`ïñ%SuG´µÁÆ2‘¬zÖÏž€MÊS6çL“ùWåÅø×A¨Ì5PPPPPÆFuµ±Þ´~œîWOÀNËÒ.½¢£a—á&ó/Ëk×ì÷=‘?                                                                               ôG­3=ÙAÖJ«sø¤£€fãÔ2|PPPPPP`è N­©Bi=m±­iXtLZBBË+LÆ^`*AYÝ¢÷~¶±ç(ÝÑn;qjoØaù”Ü®×îÜAÀ¥ê¤“:ìâV9PPPPPêd ÃMÀÒĺ{ýX^ÝGøi5T¹ìžËÎ ó([ú*˜³u«ô<½¾:VIaeÔwÄv ü¡•€íF5ÎA@@@@h\XÁ•Ea=40Òw/Ÿ~¬ÀR÷FRí¾ª Öؘè¢Aœà´,½®oyZD¸W=ý2Vý˜Àöbœ‹(€(€(€(€(•zeQH¨Éî¨ß[U´€UÙÎs› ´Ý= N•Fi«îÇ@Riª>í¢ÅeçèZ:ÇÎ++#Ûƒqr*          @^ ´ØvÀ¥q:W‘A}WM¼äŸÌÎQ”·ì㙺Íú×(ëF+¸ô#°k{ç:YÖqm:f+—ʤtíºKä*¢Z°–G]´Úº!ÇD|UNXi«2–å_°ÒYX=©¬í>º–îW›Õi».Ä~é:LÀU#0‡QPPPPºW `}èÑß™5À²ÿË¢|ö¿uÅ »þ †,­½Âòûi×°|üîÄ>ˆ U3UZÜ2€µ{SYê>v~̤N¦¯òÔõu?áýW¬éj`‘ä²rZ^~éÞªV÷kiý.Öu _§ÇQPPPPP T*€Uä®,ê§ý‚<c?!úðéG] úÂȧåcÑS+´•'Œ^V•ßö·kyYÞe“,Y>Že[õ…ï$­°“õQ–ŸêÃÊé¯_O!l[}ø?Bè\¿þüû°h²®ïG¶MË&&ã‘E@@@@@U0@3Xò#i‚Øefª@²,úi08ò¯a@VÖÅØ¢þ±&Vå)Ó²ýÃX•¥,Ò[°qýPöáÖ7„ªû¯ê6Ý.jÝ®Ë1           t­@ÀZ·ÓªèeÙÅê¶ìœ²n±í"”Ýó£„M¬Á¡±´±§>`}ø´.ÈezÕu‰ÏQ=·\¤~t8¬ÛN¢Ì].'¢         ŒŸehàv×5u1õÇFÔ•Eýª"{Ê«ìÚíàÇ@ËcÙÀN–@œ[å÷Ç«`}Í âËôj§¹òϱÊ"¶–Þ·«‡p|­o:ÖÉ ã÷äqÇ(€(€(€(€(€+P€Ui죊Í:+P©ê¶š ÀJ8+«€Õ–Î Çr–ÝE’«Ð¯”:¸ Ó–Mˆä—s˜«k«î«¶Ž‘PPPPPP U[5VÒº“†ÑµnºWu­ŠÞ•]£©¬4²¼ãð“xeÚn™ÿtK;ÁQU7]¢¤vV_öXå+‹lûå®êB\¡ç‰C@@@@@¾(P€ºXÙÕªb»lÙò/í¹j©ƒ5*ë¶j]Ò²ûðËTU÷¯ý*wÕú¶6’Ò„P\U±UePzÓ®ëÿ ÐnY]»lvi»/Fa]§ª;yY]ôÅ@ÉPPPPPLv[Yl7pÕXí·cÖÝÔ(\ºÆöÄZzƒ²°KmUùýe€”‡À͇³*µû«‚ѪóL++·®ge·5mÛM”Tf‘íÖ@Ô´õÖ@Ü4·1¼þ²8!DûúZ¹M[ëê—Ñ–ÑQý õµ¥–xÊPPPPPP QÚ¬.d>뎪oÛg0§ïvXA§®cçtù³ ÛMYwU¶,½Ž…Ýbە߇á°[rˆVu­:OåÔ…×´²Ûyáú¶í*³Àê<ƒN¥ó'šÒ1?âë_[«œeŸ°ì§e]ˆu~¸N¬oeuۨᒠ         Àø) ¨d8žÕWAÀXv\ð"xô£oJvAÕ>ƒ+}+:§óªÆgj¿Ì®3YQ5uå¯:_åª*C»cUzøÅÓùvŸú6ýt®ßõW:´û´+‡W—Fºé:Úª4ôË`éUn¿ÞÚÙHx¦kn“Ã(€(€(€(€(€(€©*àG£Ûaªå§\(€(€(€(€(€(€c¦@UWÞ1“ÛE@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@1Pàÿ·ßŽ×Å‘>7IEND®B`‚manila-2013.2.dev175.gbf1a399/doc/source/_static/0000775000175000017500000000000012301410516021202 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/_static/.gitignore0000664000175000017500000000000012301410454023161 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/_static/default.css0000664000175000017500000000707712301410454023354 0ustar chuckchuck00000000000000/** * Sphinx stylesheet -- default theme * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: sans-serif; font-size: 100%; background-color: #11303d; color: #000; margin: 0; padding: 0; } div.document { background-color: #1c4e63; } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { margin: 0 0 0 230px; } div.body { background-color: #ffffff; color: #000000; padding: 0 20px 30px 20px; } div.footer { color: #ffffff; width: 100%; padding: 9px 0 9px 0; text-align: center; font-size: 75%; } div.footer a { color: #ffffff; text-decoration: underline; } div.related { background-color: #133f52; line-height: 30px; color: #ffffff; } div.related a { color: #ffffff; } div.sphinxsidebar { } div.sphinxsidebar h3 { font-family: 'Trebuchet MS', sans-serif; color: #ffffff; font-size: 1.4em; font-weight: normal; margin: 0; padding: 0; } div.sphinxsidebar h3 a { color: #ffffff; } div.sphinxsidebar h4 { font-family: 'Trebuchet MS', sans-serif; color: #ffffff; font-size: 1.3em; font-weight: normal; margin: 5px 0 0 0; padding: 0; } div.sphinxsidebar p { color: #ffffff; } div.sphinxsidebar p.topless { margin: 5px 10px 10px 10px; } div.sphinxsidebar ul { margin: 10px; padding: 0; color: #ffffff; } div.sphinxsidebar a { color: #98dbcc; } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } /* -- body styles ----------------------------------------------------------- */ a { color: #355f7c; text-decoration: none; } a:hover { text-decoration: underline; } div.body p, div.body dd, div.body li { text-align: left; line-height: 130%; } div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-family: 'Trebuchet MS', sans-serif; background-color: #f2f2f2; font-weight: normal; color: #20435c; border-bottom: 1px solid #ccc; margin: 20px -20px 10px -20px; padding: 3px 0 3px 10px; } div.body h1 { margin-top: 0; font-size: 200%; } div.body h2 { font-size: 160%; } div.body h3 { font-size: 140%; } div.body h4 { font-size: 120%; } div.body h5 { font-size: 110%; } div.body h6 { font-size: 100%; } a.headerlink { color: #c60f0f; font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } a.headerlink:hover { background-color: #c60f0f; color: white; } div.body p, div.body dd, div.body li { text-align: left; line-height: 130%; } div.admonition p.admonition-title + p { display: inline; } div.admonition p { margin-bottom: 5px; } div.admonition pre { margin-bottom: 5px; } div.admonition ul, div.admonition ol { margin-bottom: 5px; } div.note { background-color: #eee; border: 1px solid #ccc; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.topic { background-color: #eee; } div.warning { background-color: #ffe4e4; border: 1px solid #f66; } p.admonition-title { display: inline; } p.admonition-title:after { content: ":"; } pre { padding: 5px; background-color: #eeffcc; color: #333333; line-height: 120%; border: 1px solid #ac9; border-left: none; border-right: none; } tt { background-color: #ecf0f3; padding: 0 1px 0 1px; font-size: 0.95em; } .warning tt { background: #efc2c2; } .note tt { background: #d6d6d6; } manila-2013.2.dev175.gbf1a399/doc/source/_static/.placeholder0000664000175000017500000000000012301410454023454 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/_static/basic.css0000664000175000017500000001462512301410454023006 0ustar chuckchuck00000000000000/** * Sphinx stylesheet -- basic theme * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* -- main layout ----------------------------------------------------------- */ div.clearer { clear: both; } /* -- relbar ---------------------------------------------------------------- */ div.related { width: 100%; font-size: 90%; } div.related h3 { display: none; } div.related ul { margin: 0; padding: 0 0 0 10px; list-style: none; } div.related li { display: inline; } div.related li.right { float: right; margin-right: 5px; } /* -- sidebar --------------------------------------------------------------- */ div.sphinxsidebarwrapper { padding: 10px 5px 0 10px; } div.sphinxsidebar { float: left; width: 230px; margin-left: -100%; font-size: 90%; } div.sphinxsidebar ul { list-style: none; } div.sphinxsidebar ul ul, div.sphinxsidebar ul.want-points { margin-left: 20px; list-style: square; } div.sphinxsidebar ul ul { margin-top: 0; margin-bottom: 0; } div.sphinxsidebar form { margin-top: 10px; } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } img { border: 0; } /* -- search page ----------------------------------------------------------- */ ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } /* -- index page ------------------------------------------------------------ */ table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } /* -- general index --------------------------------------------------------- */ table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } /* -- general body styles --------------------------------------------------- */ a.headerlink { visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } div.body p.caption { text-align: inherit; } div.body td { text-align: left; } .field-list ul { padding-left: 1em; } .first { } p.rubric { margin-top: 30px; font-weight: bold; } /* -- sidebars -------------------------------------------------------------- */ div.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; padding: 7px 7px 0 7px; background-color: #ffe; width: 40%; float: right; } p.sidebar-title { font-weight: bold; } /* -- topics ---------------------------------------------------------------- */ div.topic { border: 1px solid #ccc; padding: 7px 7px 0 7px; margin: 10px 0 10px 0; } p.topic-title { font-size: 1.1em; font-weight: bold; margin-top: 10px; } /* -- admonitions ----------------------------------------------------------- */ div.admonition { margin-top: 10px; margin-bottom: 10px; padding: 7px; } div.admonition dt { font-weight: bold; } div.admonition dl { margin-bottom: 0; } p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; } div.body p.centered { text-align: center; margin-top: 25px; } /* -- tables ---------------------------------------------------------------- */ table.docutils { border: 0; border-collapse: collapse; } table.docutils td, table.docutils th { padding: 1px 8px 1px 0; border-top: 0; border-left: 0; border-right: 0; border-bottom: 1px solid #aaa; } table.field-list td, table.field-list th { border: 0 !important; } table.footnote td, table.footnote th { border: 0 !important; } th { text-align: left; padding-right: 5px; } /* -- other body styles ----------------------------------------------------- */ dl { margin-bottom: 15px; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } dt:target, .highlight { background-color: #fbe54e; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } .field-list ul { margin: 0; padding-left: 1em; } .field-list p { margin: 0; } .refcount { color: #060; } .optional { font-size: 1.3em; } .versionmodified { font-style: italic; } .system-message { background-color: #fda; padding: 5px; border: 3px solid red; } .footnote:target { background-color: #ffa } .line-block { display: block; margin-top: 1em; margin-bottom: 1em; } .line-block .line-block { margin-top: 0; margin-bottom: 0; margin-left: 1.5em; } /* -- code displays --------------------------------------------------------- */ pre { overflow: auto; } td.linenos pre { padding: 5px 0px; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; } tt.descclassname { background-color: transparent; } tt.xref, a tt { background-color: transparent; font-weight: bold; } h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { background-color: transparent; } /* -- math display ---------------------------------------------------------- */ img.math { vertical-align: middle; } div.body div.math p { text-align: center; } span.eqno { float: right; } /* -- printout stylesheet --------------------------------------------------- */ @media print { div.document, div.documentwrapper, div.bodywrapper { margin: 0 !important; width: 100%; } div.sphinxsidebar, div.related, div.footer, #top-link { display: none; } } manila-2013.2.dev175.gbf1a399/doc/source/_static/jquery.tweet.js0000664000175000017500000001635312301410454024217 0ustar chuckchuck00000000000000(function($) { $.fn.tweet = function(o){ var s = { username: ["seaofclouds"], // [string] required, unless you want to display our tweets. :) it can be an array, just do ["username1","username2","etc"] list: null, //[string] optional name of list belonging to username avatar_size: null, // [integer] height and width of avatar if displayed (48px max) count: 3, // [integer] how many tweets to display? intro_text: null, // [string] do you want text BEFORE your your tweets? outro_text: null, // [string] do you want text AFTER your tweets? join_text: null, // [string] optional text in between date and tweet, try setting to "auto" auto_join_text_default: "i said,", // [string] auto text for non verb: "i said" bullocks auto_join_text_ed: "i", // [string] auto text for past tense: "i" surfed auto_join_text_ing: "i am", // [string] auto tense for present tense: "i was" surfing auto_join_text_reply: "i replied to", // [string] auto tense for replies: "i replied to" @someone "with" auto_join_text_url: "i was looking at", // [string] auto tense for urls: "i was looking at" http:... loading_text: null, // [string] optional loading text, displayed while tweets load query: null // [string] optional search query }; if(o) $.extend(s, o); $.fn.extend({ linkUrl: function() { var returning = []; var regexp = /((ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?)/gi; this.each(function() { returning.push(this.replace(regexp,"$1")); }); return $(returning); }, linkUser: function() { var returning = []; var regexp = /[\@]+([A-Za-z0-9-_]+)/gi; this.each(function() { returning.push(this.replace(regexp,"@$1")); }); return $(returning); }, linkHash: function() { var returning = []; var regexp = / [\#]+([A-Za-z0-9-_]+)/gi; this.each(function() { returning.push(this.replace(regexp, ' #$1')); }); return $(returning); }, capAwesome: function() { var returning = []; this.each(function() { returning.push(this.replace(/\b(awesome)\b/gi, '$1')); }); return $(returning); }, capEpic: function() { var returning = []; this.each(function() { returning.push(this.replace(/\b(epic)\b/gi, '$1')); }); return $(returning); }, makeHeart: function() { var returning = []; this.each(function() { returning.push(this.replace(/(<)+[3]/gi, "")); }); return $(returning); } }); function relative_time(time_value) { var parsed_date = Date.parse(time_value); var relative_to = (arguments.length > 1) ? arguments[1] : new Date(); var delta = parseInt((relative_to.getTime() - parsed_date) / 1000); var pluralize = function (singular, n) { return '' + n + ' ' + singular + (n == 1 ? '' : 's'); }; if(delta < 60) { return 'less than a minute ago'; } else if(delta < (45*60)) { return 'about ' + pluralize("minute", parseInt(delta / 60)) + ' ago'; } else if(delta < (24*60*60)) { return 'about ' + pluralize("hour", parseInt(delta / 3600)) + ' ago'; } else { return 'about ' + pluralize("day", parseInt(delta / 86400)) + ' ago'; } } function build_url() { var proto = ('https:' == document.location.protocol ? 'https:' : 'http:'); if (s.list) { return proto+"//api.twitter.com/1/"+s.username[0]+"/lists/"+s.list+"/statuses.json?per_page="+s.count+"&callback=?"; } else if (s.query == null && s.username.length == 1) { return proto+'//twitter.com/status/user_timeline/'+s.username[0]+'.json?count='+s.count+'&callback=?'; } else { var query = (s.query || 'from:'+s.username.join('%20OR%20from:')); return proto+'//search.twitter.com/search.json?&q='+query+'&rpp='+s.count+'&callback=?'; } } return this.each(function(){ var list = $('
    ').appendTo(this); var intro = '

    '+s.intro_text+'

    '; var outro = '

    '+s.outro_text+'

    '; var loading = $('

    '+s.loading_text+'

    '); if(typeof(s.username) == "string"){ s.username = [s.username]; } if (s.loading_text) $(this).append(loading); $.getJSON(build_url(), function(data){ if (s.loading_text) loading.remove(); if (s.intro_text) list.before(intro); $.each((data.results || data), function(i,item){ // auto join text based on verb tense and content if (s.join_text == "auto") { if (item.text.match(/^(@([A-Za-z0-9-_]+)) .*/i)) { var join_text = s.auto_join_text_reply; } else if (item.text.match(/(^\w+:\/\/[A-Za-z0-9-_]+\.[A-Za-z0-9-_:%&\?\/.=]+) .*/i)) { var join_text = s.auto_join_text_url; } else if (item.text.match(/^((\w+ed)|just) .*/im)) { var join_text = s.auto_join_text_ed; } else if (item.text.match(/^(\w*ing) .*/i)) { var join_text = s.auto_join_text_ing; } else { var join_text = s.auto_join_text_default; } } else { var join_text = s.join_text; }; var from_user = item.from_user || item.user.screen_name; var profile_image_url = item.profile_image_url || item.user.profile_image_url; var join_template = ' '+join_text+' '; var join = ((s.join_text) ? join_template : ' '); var avatar_template = ''+from_user+'\'s avatar'; var avatar = (s.avatar_size ? avatar_template : ''); var date = ''+relative_time(item.created_at)+''; var text = '' +$([item.text]).linkUrl().linkUser().linkHash().makeHeart().capAwesome().capEpic()[0]+ ''; // until we create a template option, arrange the items below to alter a tweet's display. list.append('
  • ' + avatar + date + join + text + '
  • '); list.children('li:first').addClass('tweet_first'); list.children('li:odd').addClass('tweet_even'); list.children('li:even').addClass('tweet_odd'); }); if (s.outro_text) list.after(outro); }); }); }; })(jQuery);manila-2013.2.dev175.gbf1a399/doc/source/_static/tweaks.css0000664000175000017500000001155412301410454023221 0ustar chuckchuck00000000000000ul.todo_list { list-style-type: none; margin: 0; padding: 0; } ul.todo_list li { display: block; margin: 0; padding: 7px 0; border-top: 1px solid #eee; } ul.todo_list li p { display: inline; } ul.todo_list li p.link { font-weight: bold; } ul.todo_list li p.details { font-style: italic; } ul.todo_list li { } div.admonition { border: 1px solid #8F1000; } div.admonition p.admonition-title { background-color: #8F1000; border-bottom: 1px solid #8E8E8E; } a { color: #CF2F19; } div.related ul li a { color: #CF2F19; } div.sphinxsidebar h4 { background-color:#8E8E8E; border:1px solid #255E6E; color:white; font-size:1em; margin:1em 0 0.5em; padding:0.1em 0 0.1em 0.5em; } em { font-style: normal; } table.docutils { font-size: 11px; } .tweet_list li { font-size: 0.9em; border-bottom: 1px solid #eee; padding: 5px 0; } .tweet_list li .tweet_avatar { float: left; } /* ------------------------------------------ PURE CSS SPEECH BUBBLES by Nicolas Gallagher - http://nicolasgallagher.com/pure-css-speech-bubbles/ http://nicolasgallagher.com http://twitter.com/necolas Created: 02 March 2010 Version: 1.1 (21 October 2010) Dual licensed under MIT and GNU GPLv2 © Nicolas Gallagher ------------------------------------------ */ /* THE SPEECH BUBBLE ------------------------------------------------------------------------------------------------------------------------------- */ /* THE SPEECH BUBBLE ------------------------------------------------------------------------------------------------------------------------------- */ .triangle-border { position:relative; padding:15px; margin:1em 0 3em; border:5px solid #BC1518; color:#333; background:#fff; /* css3 */ -moz-border-radius:10px; -webkit-border-radius:10px; border-radius:10px; } /* Variant : for left positioned triangle ------------------------------------------ */ .triangle-border.left { margin-left:30px; } /* Variant : for right positioned triangle ------------------------------------------ */ .triangle-border.right { margin-right:30px; } /* THE TRIANGLE ------------------------------------------------------------------------------------------------------------------------------- */ .triangle-border:before { content:""; display:block; /* reduce the damage in FF3.0 */ position:absolute; bottom:-40px; /* value = - border-top-width - border-bottom-width */ left:40px; /* controls horizontal position */ width:0; height:0; border:20px solid transparent; border-top-color:#BC1518; } /* creates the smaller triangle */ .triangle-border:after { content:""; display:block; /* reduce the damage in FF3.0 */ position:absolute; bottom:-26px; /* value = - border-top-width - border-bottom-width */ left:47px; /* value = (:before left) + (:before border-left) - (:after border-left) */ width:0; height:0; border:13px solid transparent; border-top-color:#fff; } /* Variant : top ------------------------------------------ */ /* creates the larger triangle */ .triangle-border.top:before { top:-40px; /* value = - border-top-width - border-bottom-width */ right:40px; /* controls horizontal position */ bottom:auto; left:auto; border:20px solid transparent; border-bottom-color:#BC1518; } /* creates the smaller triangle */ .triangle-border.top:after { top:-26px; /* value = - border-top-width - border-bottom-width */ right:47px; /* value = (:before right) + (:before border-right) - (:after border-right) */ bottom:auto; left:auto; border:13px solid transparent; border-bottom-color:#fff; } /* Variant : left ------------------------------------------ */ /* creates the larger triangle */ .triangle-border.left:before { top:10px; /* controls vertical position */ left:-30px; /* value = - border-left-width - border-right-width */ bottom:auto; border-width:15px 30px 15px 0; border-style:solid; border-color:transparent #BC1518; } /* creates the smaller triangle */ .triangle-border.left:after { top:16px; /* value = (:before top) + (:before border-top) - (:after border-top) */ left:-21px; /* value = - border-left-width - border-right-width */ bottom:auto; border-width:9px 21px 9px 0; border-style:solid; border-color:transparent #fff; } /* Variant : right ------------------------------------------ */ /* creates the larger triangle */ .triangle-border.right:before { top:10px; /* controls vertical position */ right:-30px; /* value = - border-left-width - border-right-width */ bottom:auto; left:auto; border-width:15px 0 15px 30px; border-style:solid; border-color:transparent #BC1518; } /* creates the smaller triangle */ .triangle-border.right:after { top:16px; /* value = (:before top) + (:before border-top) - (:after border-top) */ right:-21px; /* value = - border-left-width - border-right-width */ bottom:auto; left:auto; border-width:9px 0 9px 21px; border-style:solid; border-color:transparent #fff; } manila-2013.2.dev175.gbf1a399/doc/source/_templates/0000775000175000017500000000000012301410516021711 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/_templates/.gitignore0000664000175000017500000000000012301410454023670 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/_templates/.placeholder0000664000175000017500000000000012301410454024163 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/index.rst0000664000175000017500000000405412301410454021421 0ustar chuckchuck00000000000000.. Copyright 2010-2012 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Welcome to Manila's developer documentation! ========================================== Manila is an OpenStack project to provide "block storage as a service". * **Component based architecture**: Quickly add new behaviors * **Highly available**: Scale to very serious workloads * **Fault-Tolerant**: Isolated processes avoid cascading failures * **Recoverable**: Failures should be easy to diagnose, debug, and rectify * **Open Standards**: Be a reference implementation for a community-driven api * **API Compatibility**: Manila strives to provide API-compatible with popular systems like Amazon EC2 This documentation is generated by the Sphinx toolkit and lives in the source tree. Additional draft and project documentation on Manila and other components of OpenStack can be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_. .. _`OpenStack wiki`: http://wiki.openstack.org .. _`docs.openstack.org`: http://docs.openstack.org Developer Docs ============== .. toctree:: :maxdepth: 1 devref/index API Extensions ============== Go to http://api.openstack.org for information about Manila API extensions. Outstanding Documentation Tasks =============================== .. todolist:: Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` r Docs manila-2013.2.dev175.gbf1a399/doc/source/_ga/0000775000175000017500000000000012301410516020302 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/source/_ga/layout.html0000664000175000017500000000105512301410454022507 0ustar chuckchuck00000000000000{% extends "!layout.html" %} {% block footer %} {{ super() }} {% endblock %} manila-2013.2.dev175.gbf1a399/doc/README.rst0000664000175000017500000000164612301410454017753 0ustar chuckchuck00000000000000================= Building the docs ================= Dependencies ============ Sphinx_ You'll need sphinx (the python one) and if you are using the virtualenv you'll need to install it in the virtualenv specifically so that it can load the manila modules. :: pip install Sphinx Graphviz_ Some of the diagrams are generated using the ``dot`` language from Graphviz. :: sudo apt-get install graphviz .. _Sphinx: http://sphinx.pocoo.org .. _Graphviz: http://www.graphviz.org/ Use `make` ========== Just type make:: % make Look in the Makefile for more targets. Manually ======== 1. Generate the code.rst file so that Sphinx will pull in our docstrings:: % ./generate_autodoc_index.sh > source/code.rst 2. Run `sphinx_build`:: % sphinx-build -b html source build/html The docs have been built ======================== Check out the `build` directory to find them. Yay! manila-2013.2.dev175.gbf1a399/doc/ext/0000775000175000017500000000000012301410516017054 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/ext/manila_todo.py0000664000175000017500000000643612301410454021726 0ustar chuckchuck00000000000000# -*- coding: utf-8 -*- # This is a hack of the builtin todo extension, to make the todo_list # more user friendly from sphinx.ext.todo import * import re def _(s): return s def process_todo_nodes(app, doctree, fromdocname): if not app.config['todo_include_todos']: for node in doctree.traverse(todo_node): node.parent.remove(node) # Replace all todolist nodes with a list of the collected todos. # Augment each todo with a backlink to the original location. env = app.builder.env if not hasattr(env, 'todo_all_todos'): env.todo_all_todos = [] # remove the item that was added in the constructor, since I'm tired of # reading through docutils for the proper way to construct an empty list lists = [] for i in xrange(5): lists.append(nodes.bullet_list("", nodes.Text('', ''))) lists[i].remove(lists[i][0]) lists[i]['classes'].append('todo_list') for node in doctree.traverse(todolist): if not app.config['todo_include_todos']: node.replace_self([]) continue for todo_info in env.todo_all_todos: para = nodes.paragraph() filename = env.doc2path(todo_info['docname'], base=None) # Create a reference newnode = nodes.reference('', '') line_info = todo_info['lineno'] link = _('%(filename)s, line %(line_info)d') % locals() innernode = nodes.emphasis(link, link) newnode['refdocname'] = todo_info['docname'] try: newnode['refuri'] = app.builder.get_relative_uri( fromdocname, todo_info['docname']) newnode['refuri'] += '#' + todo_info['target']['refid'] except NoUri: # ignore if no URI can be determined, e.g. for LaTeX output pass newnode.append(innernode) para += newnode para['classes'].append('todo_link') todo_entry = todo_info['todo'] env.resolve_references(todo_entry, todo_info['docname'], app.builder) item = nodes.list_item('', para) todo_entry[1]['classes'].append('details') comment = todo_entry[1] m = re.match(r"^P(\d)", comment.astext()) priority = 5 if m: priority = int(m.group(1)) if priority < 0: priority = 1 if priority > 5: priority = 5 item['classes'].append('todo_p' + str(priority)) todo_entry['classes'].append('todo_p' + str(priority)) item.append(comment) lists[priority - 1].insert(0, item) node.replace_self(lists) def setup(app): app.add_config_value('todo_include_todos', False, False) app.add_node(todolist) app.add_node(todo_node, html=(visit_todo_node, depart_todo_node), latex=(visit_todo_node, depart_todo_node), text=(visit_todo_node, depart_todo_node)) app.add_directive('todo', Todo) app.add_directive('todolist', TodoList) app.connect('doctree-read', process_todos) app.connect('doctree-resolved', process_todo_nodes) app.connect('env-purge-doc', purge_todos) manila-2013.2.dev175.gbf1a399/doc/ext/__init__.py0000664000175000017500000000000012301410454021154 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/doc/ext/manila_autodoc.py0000664000175000017500000000035112301410454022405 0ustar chuckchuck00000000000000import gettext import os gettext.install('manila') from manila import utils def setup(app): print "**Autodocumenting from %s" % os.path.abspath(os.curdir) rv = utils.execute('./generate_autodoc_index.sh') print rv[0] manila-2013.2.dev175.gbf1a399/doc/find_autodoc_modules.sh0000775000175000017500000000072512301410454023006 0ustar chuckchuck00000000000000#!/bin/bash MANILA_DIR='manila/' # include trailing slash DOCS_DIR='source' modules='' for x in `find ${MANILA_DIR} -name '*.py' | grep -v manila/tests`; do if [ `basename ${x} .py` == "__init__" ] ; then continue fi relative=manila.`echo ${x} | sed -e 's$^'${MANILA_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'` modules="${modules} ${relative}" done for mod in ${modules} ; do if [ ! -f "${DOCS_DIR}/${mod}.rst" ]; then echo ${mod} fi done manila-2013.2.dev175.gbf1a399/doc/generate_autodoc_index.sh0000775000175000017500000000177112301410454023321 0ustar chuckchuck00000000000000#!/bin/sh SOURCEDIR=doc/source/api if [ ! -d ${SOURCEDIR} ] ; then mkdir -p ${SOURCEDIR} fi for x in `./find_autodoc_modules.sh`; do echo "Generating ${SOURCEDIR}/${x}.rst" echo "${SOURCEDIR}/${x}.rst" >> .autogenerated heading="The :mod:\`${x}\` Module" # Figure out how long the heading is # and make sure to emit that many '=' under # it to avoid heading format errors # in Sphinx. heading_len=$(echo "$heading" | wc -c) underline=$(head -c $heading_len < /dev/zero | tr '\0' '=') ( cat < ${SOURCEDIR}/${x}.rst done if [ ! -f ${SOURCEDIR}/autoindex.rst ] ; then cat > ${SOURCEDIR}/autoindex.rst <> ${SOURCEDIR}/autoindex.rst done echo ${SOURCEDIR}/autoindex.rst >> .autogenerated fi manila-2013.2.dev175.gbf1a399/doc/Makefile0000664000175000017500000000637212301410454017725 0ustar chuckchuck00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXSOURCE = source PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest .DEFAULT_GOAL = html help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* -rm -rf manila.sqlite if [ -f .autogenerated ] ; then \ cat .autogenerated | xargs rm ; \ rm .autogenerated ; \ fi html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/manila.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/manila.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." manila-2013.2.dev175.gbf1a399/setup.py0000664000175000017500000000141512301410454017223 0ustar chuckchuck00000000000000#!/usr/bin/env python # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools setuptools.setup( setup_requires=['pbr'], pbr=True) manila-2013.2.dev175.gbf1a399/test-requirements.txt0000664000175000017500000000044712301410454021756 0ustar chuckchuck00000000000000# Install bounded pep8/pyflakes first, then let flake8 install pep8==1.4.5 pyflakes>=0.7.2,<0.7.4 flake8==2.0 hacking>=0.5.6,<0.8 coverage>=3.6 hp3parclient>=2.0,<3.0 mock>=1.0 mox>=0.5.3 MySQL-python nose nosehtmloutput>=0.0.3 nosexcover openstack.nose_plugin>=0.7 psycopg2 sphinx>=1.1.2,<1.2 manila-2013.2.dev175.gbf1a399/tools/0000775000175000017500000000000012301410516016647 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/tools/conf/0000775000175000017500000000000012301410516017574 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/tools/conf/generate_sample.sh0000775000175000017500000000246012301410454023271 0ustar chuckchuck00000000000000#!/usr/bin/env bash print_hint() { echo "Try \`${0##*/} --help' for more information." >&2 } PARSED_OPTIONS=$(getopt -n "${0##*/}" -o ho: \ --long help,output-dir: -- "$@") if [ $? != 0 ] ; then print_hint ; exit 1 ; fi eval set -- "$PARSED_OPTIONS" while true; do case "$1" in -h|--help) echo "${0##*/} [options]" echo "" echo "options:" echo "-h, --help show brief help" echo "-o, --output-dir=DIR File output directory" exit 0 ;; -o|--output-dir) shift OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'` shift ;; --) break ;; esac done OUTPUTDIR=${OUTPUTDIR:-etc/manila} if ! [ -d $OUTPUTDIR ] then echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2 exit 1 fi OUTPUTFILE=$OUTPUTDIR/manila.conf.sample FILES=$(find manila -type f -name "*.py" ! -path "manila/tests/*" -exec \ grep -l "Opt(" {} \; | sort -u) PYTHONPATH=./:${PYTHONPATH} \ python $(dirname "$0")/extract_opts.py ${FILES} > \ $OUTPUTFILE # When we use openstack.common.config.generate we won't need this any more sed -i 's/^#connection=sqlite.*/#connection=sqlite:\/\/\/\/manila\/openstack\/common\/db\/$sqlite_db/' $OUTPUTFILE manila-2013.2.dev175.gbf1a399/tools/conf/extract_opts.py0000664000175000017500000001404512301410454022672 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 SINA Corporation # All Rights Reserved. # Author: Zhongyue Luo # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Extracts OpenStack config option info from module(s).""" from __future__ import print_function import __builtin__ setattr(__builtin__, '_', lambda x: x) import os import re import socket import sys import textwrap from oslo.config import cfg from manila.openstack.common import importutils STROPT = "StrOpt" BOOLOPT = "BoolOpt" INTOPT = "IntOpt" FLOATOPT = "FloatOpt" LISTOPT = "ListOpt" MULTISTROPT = "MultiStrOpt" OPTION_COUNT = 0 OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT, FLOATOPT, LISTOPT, MULTISTROPT])) OPTION_HELP_INDENT = "####" PY_EXT = ".py" BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")) WORDWRAP_WIDTH = 60 def main(srcfiles): print('\n'.join(['#' * 20, '# manila.conf sample #', '#' * 20, '', '[DEFAULT]', ''])) _list_opts(cfg.ConfigOpts, cfg.__name__ + ':' + cfg.ConfigOpts.__name__) mods_by_pkg = dict() for filepath in srcfiles: pkg_name = filepath.split(os.sep)[1] mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]), os.path.basename(filepath).split('.')[0]]) mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) # NOTE(lzyeval): place top level modules before packages pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys()) pkg_names.sort() ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys()) ext_names.sort() pkg_names.extend(ext_names) for pkg_name in pkg_names: mods = mods_by_pkg.get(pkg_name) mods.sort() for mod_str in mods: _print_module(mod_str) print("# Total option count: %d" % OPTION_COUNT) def _print_module(mod_str): mod_obj = None if mod_str.endswith('.__init__'): mod_str = mod_str[:mod_str.rfind(".")] try: mod_obj = importutils.import_module(mod_str) except Exception as e: sys.stderr.write("Failed to collect options from module %s: %s\n" % ( mod_str, str(e))) return _list_opts(mod_obj, mod_str) def _list_opts(obj, name): opts = list() for attr_str in dir(obj): attr_obj = getattr(obj, attr_str) if isinstance(attr_obj, cfg.Opt): opts.append(attr_obj) elif (isinstance(attr_obj, list) and all(map(lambda x: isinstance(x, cfg.Opt), attr_obj))): opts.extend(attr_obj) if not opts: return global OPTION_COUNT OPTION_COUNT += len(opts) print('#') print('# Options defined in %s' % name) print('#') print() for opt in opts: _print_opt(opt) print() def _get_my_ip(): try: csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) csock.connect(('8.8.8.8', 80)) (addr, port) = csock.getsockname() csock.close() return addr except socket.error: return None MY_IP = _get_my_ip() HOST = socket.gethostname() def _sanitize_default(s): """Set up a reasonably sensible default for pybasedir, my_ip and host.""" if s.startswith(BASEDIR): return s.replace(BASEDIR, '/usr/lib/python/site-packages') elif s == MY_IP: return '10.0.0.1' elif s == HOST: return 'manila' elif s.strip() != s: return '"%s"' % s return s OPT_TYPES = { 'StrOpt': 'string value', 'BoolOpt': 'boolean value', 'IntOpt': 'integer value', 'FloatOpt': 'floating point value', 'ListOpt': 'list value', 'MultiStrOpt': 'multi valued', } def _print_opt(opt): opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help if not opt_help: sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name) opt_type = None try: opt_type = OPTION_REGEX.search(str(type(opt))).group(0) except (ValueError, AttributeError) as err: sys.stderr.write("%s\n" % str(err)) sys.exit(1) opt_help += ' (' + OPT_TYPES[opt_type] + ')' print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))) try: if opt_default is None: print('#%s=' % opt_name) elif opt_type == STROPT: assert(isinstance(opt_default, basestring)) print('#%s=%s' % (opt_name, _sanitize_default(opt_default))) elif opt_type == BOOLOPT: assert(isinstance(opt_default, bool)) print('#%s=%s' % (opt_name, str(opt_default).lower())) elif opt_type == INTOPT: assert(isinstance(opt_default, int) and not isinstance(opt_default, bool)) print('#%s=%s' % (opt_name, opt_default)) elif opt_type == FLOATOPT: assert(isinstance(opt_default, float)) print('#%s=%s' % (opt_name, opt_default)) elif opt_type == LISTOPT: assert(isinstance(opt_default, list)) print('#%s=%s' % (opt_name, ','.join(opt_default))) elif opt_type == MULTISTROPT: assert(isinstance(opt_default, list)) for default in opt_default: print('#%s=%s' % (opt_name, default)) print() except Exception: sys.stderr.write('Error in option "%s"\n' % opt_name) sys.exit(1) if __name__ == '__main__': if len(sys.argv) < 2: print("usage: python %s [srcfile]...\n" % sys.argv[0]) sys.exit(0) main(sys.argv[1:]) manila-2013.2.dev175.gbf1a399/tools/with_venv.sh0000775000175000017500000000012412301410454021215 0ustar chuckchuck00000000000000#!/bin/bash TOOLS=`dirname $0` VENV=$TOOLS/../.venv source $VENV/bin/activate && $@ manila-2013.2.dev175.gbf1a399/tools/lintstack.py0000775000175000017500000001453712301410454021233 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """pylint error checking.""" import cStringIO as StringIO import json import re import sys from pylint import lint from pylint.reporters import text # Note(maoy): E1103 is error code related to partial type inference ignore_codes = ["E1103"] # Note(maoy): the error message is the pattern of E0202. It should be ignored # for manila.tests modules ignore_messages = ["An attribute affected in manila.tests"] # Note(maoy): we ignore all errors in openstack.common because it should be # checked elsewhere. We also ignore manila.tests for now due to high false # positive rate. ignore_modules = ["manila/openstack/common/", "manila/tests/"] KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" class LintOutput(object): _cached_filename = None _cached_content = None def __init__(self, filename, lineno, line_content, code, message, lintoutput): self.filename = filename self.lineno = lineno self.line_content = line_content self.code = code self.message = message self.lintoutput = lintoutput @classmethod def from_line(cls, line): m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) matched = m.groups() filename, lineno, code, message = (matched[0], int(matched[1]), matched[2], matched[-1]) if cls._cached_filename != filename: with open(filename) as f: cls._cached_content = list(f.readlines()) cls._cached_filename = filename line_content = cls._cached_content[lineno - 1].rstrip() return cls(filename, lineno, line_content, code, message, line.rstrip()) @classmethod def from_msg_to_dict(cls, msg): """From the output of pylint msg, to a dict, where each key is a unique error identifier, value is a list of LintOutput """ result = {} for line in msg.splitlines(): obj = cls.from_line(line) if obj.is_ignored(): continue key = obj.key() if key not in result: result[key] = [] result[key].append(obj) return result def is_ignored(self): if self.code in ignore_codes: return True if any(self.filename.startswith(name) for name in ignore_modules): return True if any(msg in self.message for msg in ignore_messages): return True return False def key(self): if self.code in ["E1101", "E1103"]: # These two types of errors are like Foo class has no member bar. # We discard the source code so that the error will be ignored # next time another Foo.bar is encountered. return self.message, "" return self.message, self.line_content.strip() def json(self): return json.dumps(self.__dict__) def review_str(self): return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" "%(code)s: %(message)s" % self.__dict__) class ErrorKeys(object): @classmethod def print_json(cls, errors, output=sys.stdout): print >>output, "# automatically generated by tools/lintstack.py" for i in sorted(errors.keys()): print >>output, json.dumps(i) @classmethod def from_file(cls, filename): keys = set() for line in open(filename): if line and line[0] != "#": d = json.loads(line) keys.add(tuple(d)) return keys def run_pylint(): buff = StringIO.StringIO() reporter = text.ParseableTextReporter(output=buff) args = ["--include-ids=y", "-E", "manila"] lint.Run(args, reporter=reporter, exit=False) val = buff.getvalue() buff.close() return val def generate_error_keys(msg=None): print "Generating", KNOWN_PYLINT_EXCEPTIONS_FILE if msg is None: msg = run_pylint() errors = LintOutput.from_msg_to_dict(msg) with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: ErrorKeys.print_json(errors, output=f) def validate(newmsg=None): print "Loading", KNOWN_PYLINT_EXCEPTIONS_FILE known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) if newmsg is None: print "Running pylint. Be patient..." newmsg = run_pylint() errors = LintOutput.from_msg_to_dict(newmsg) print "Unique errors reported by pylint: was %d, now %d." \ % (len(known), len(errors)) passed = True for err_key, err_list in errors.items(): for err in err_list: if err_key not in known: print err.lintoutput print passed = False if passed: print "Congrats! pylint check passed." redundant = known - set(errors.keys()) if redundant: print "Extra credit: some known pylint exceptions disappeared." for i in sorted(redundant): print json.dumps(i) print "Consider regenerating the exception file if you will." else: print ("Please fix the errors above. If you believe they are false " "positives, run 'tools/lintstack.py generate' to overwrite.") sys.exit(1) def usage(): print """Usage: tools/lintstack.py [generate|validate] To generate pylint_exceptions file: tools/lintstack.py generate To validate the current commit: tools/lintstack.py """ def main(): option = "validate" if len(sys.argv) > 1: option = sys.argv[1] if option == "generate": generate_error_keys() elif option == "validate": validate() else: usage() if __name__ == "__main__": main() manila-2013.2.dev175.gbf1a399/tools/enable-pre-commit-hook.sh0000775000175000017500000000232012301410454023442 0ustar chuckchuck00000000000000#!/bin/sh # Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. PRE_COMMIT_SCRIPT=.git/hooks/pre-commit make_hook() { echo "exec ./run_tests.sh -N -p" >> $PRE_COMMIT_SCRIPT chmod +x $PRE_COMMIT_SCRIPT if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then echo "pre-commit hook was created successfully" else echo "unable to create pre-commit hook" fi } # NOTE(jk0): Make sure we are in manila's root directory before adding the hook. if [ ! -d ".git" ]; then echo "unable to find .git; moving up a directory" cd .. if [ -d ".git" ]; then make_hook else echo "still unable to find .git; hook not created" fi else make_hook fi manila-2013.2.dev175.gbf1a399/tools/install_venv_common.py0000664000175000017500000001460012301410454023277 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Synced in from openstack-common """ import argparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, pip_requires, test_requires, py_version, project): self.root = root self.venv = venv self.pip_requires = pip_requires self.test_requires = test_requires self.py_version = py_version self.project = project def die(self, message, *args): print >> sys.stderr, message % args sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora(self.root, self.venv, self.pip_requires, self.test_requires, self.py_version, self.project) else: return Distro(self.root, self.venv, self.pip_requires, self.test_requires, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print 'Creating venv...', if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print 'done.' print 'Installing pip in venv...', if not self.run_command(['tools/with_venv.sh', 'easy_install', 'pip>1.0']).strip(): self.die("Failed to install pip.") print 'done.' else: print "venv already exists..." pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print 'Installing dependencies with pip (this can take a while)...' # First things first, make sure our venv has the latest pip and # distribute. # NOTE: we keep pip at version 1.1 since the most recent version causes # the .venv creation to fail. See: # https://bugs.launchpad.net/nova/+bug/1047120 self.pip_install('pip==1.1') self.pip_install('distribute') # Install greenlet by hand - just listing it in the requires file does # not # get it installed in the right order self.pip_install('greenlet') self.pip_install('-r', self.pip_requires) self.pip_install('-r', self.test_requires) def parse_args(self, argv): """Parses command-line arguments.""" parser = argparse.ArgumentParser() parser.add_argument('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install") return parser.parse_args(argv[1:]) class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print 'Installing virtualenv via easy_install...', if self.run_command(['easy_install', 'virtualenv']): print 'Succeeded' return else: print 'Failed' self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def yum_install(self, pkg, **kwargs): print "Attempting to install '%s' via yum" % pkg self.run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.yum_install('python-virtualenv', check_exit_code=False) super(Fedora, self).install_virtualenv() manila-2013.2.dev175.gbf1a399/tools/lintstack.sh0000775000175000017500000000420612301410454021205 0ustar chuckchuck00000000000000#!/usr/bin/env bash # Copyright (c) 2012-2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Use lintstack.py to compare pylint errors. # We run pylint twice, once on HEAD, once on the code before the latest # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) # Get the current branch name. GITHEAD=`git rev-parse --abbrev-ref HEAD` if [[ "$GITHEAD" == "HEAD" ]]; then # In detached head mode, get revision number instead GITHEAD=`git rev-parse HEAD` echo "Currently we are at commit $GITHEAD" else echo "Currently we are at branch $GITHEAD" fi cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then # The HEAD is a Merge commit. Here, the patch to review is # HEAD^2, the master branch is at HEAD^1, and the patch was # written based on HEAD^2~1. PREV_COMMIT=`git rev-parse HEAD^2~1` git checkout HEAD~1 # The git merge is necessary for reviews with a series of patches. # If not, this is a no-op so won't hurt either. git merge $PREV_COMMIT else # The HEAD is not a merge commit. This won't happen on gerrit. # Most likely you are running against your own patch locally. # We assume the patch to examine is HEAD, and we compare it against # HEAD~1 git checkout HEAD~1 fi # First generate tools/pylint_exceptions from HEAD~1 $TOOLS_DIR/lintstack.head.py generate # Then use that as a reference to compare against HEAD git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions manila-2013.2.dev175.gbf1a399/tools/install_venv.py0000664000175000017500000000444212301410454021732 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack, LLC # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Installation script for Manila's development virtualenv.""" import optparse import os import subprocess import sys import install_venv_common as install_venv def print_help(): help = """ Manila development environment setup is complete. Manila development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Manila virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print help def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') project = 'Manila' py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': main(sys.argv) manila-2013.2.dev175.gbf1a399/PKG-INFO0000664000175000017500000000306612301410516016611 0ustar chuckchuck00000000000000Metadata-Version: 1.1 Name: manila Version: 2013.2.dev175.gbf1a399 Summary: Shared Storage for OpenStack Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: The Choose Your Own Adventure README for Manila =============================================== You have come across a storage service for an open cloud computing service. It has identified itself as "Manila." It was abstracted from the Nova project. To monitor it from a distance: follow `@openstack `_ on twitter. To tame it for use in your own cloud: read http://docs.openstack.org To dissect it in detail: visit http://github.com/stackforge/manila To taunt it with its weaknesses: use http://bugs.launchpad.net/manila To watch it: http://jenkins.openstack.org To hack at it: read HACKING To cry over its pylint problems: http://jenkins.openstack.org/job/manila-pylint/violations Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 2.6 manila-2013.2.dev175.gbf1a399/babel.cfg0000664000175000017500000000002112301410454017227 0ustar chuckchuck00000000000000[python: **.py] manila-2013.2.dev175.gbf1a399/CONTRIBUTING.md0000664000175000017500000000124612301410454017744 0ustar chuckchuck00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in the "If you're a developer, start here" section of this page: [http://wiki.openstack.org/HowToContribute](http://wiki.openstack.org/HowToContribute#If_you.27re_a_developer.2C_start_here:) Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at [http://wiki.openstack.org/GerritWorkflow](http://wiki.openstack.org/GerritWorkflow). Pull requests submitted through GitHub will be ignored. Bugs should be filed [on Launchpad](https://bugs.launchpad.net/manila), not in GitHub's issue tracker. manila-2013.2.dev175.gbf1a399/tox.ini0000664000175000017500000000156612301410454017033 0ustar chuckchuck00000000000000[tox] envlist = py26,py27,pep8 [testenv] setenv = VIRTUAL_ENV={envdir} NOSE_WITH_OPENSTACK=1 NOSE_OPENSTACK_COLOR=1 NOSE_OPENSTACK_RED=0.05 NOSE_OPENSTACK_YELLOW=0.025 NOSE_OPENSTACK_SHOW_ELAPSED=1 NOSE_OPENSTACK_STDOUT=1 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = nosetests {posargs} [testenv:pep8] commands = flake8 flake8 --filename=manila* bin [testenv:venv] commands = {posargs} [testenv:cover] setenv = NOSE_WITH_COVERAGE=1 [testenv:pylint] setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/requirements.txt pylint==0.26.0 commands = bash tools/lintstack.sh [flake8] ignore = E12,E711,E712,H302,H303,H304,H401,H402,H403,H404,F builtins = _ exclude = .venv,.tox,dist,doc,openstack,*egg manila-2013.2.dev175.gbf1a399/setup.cfg0000664000175000017500000000372312301410516017335 0ustar chuckchuck00000000000000[metadata] name = manila version = 2013.2 summary = Shared Storage for OpenStack description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 2.6 [global] setup-hooks = pbr.hooks.setup_hook [files] packages = manila scripts = bin/manila-all bin/manila-api bin/manila-clear-rabbit-queues bin/manila-manage bin/manila-rootwrap bin/manila-rpc-zmq-receiver bin/manila-scheduler bin/manila-share [entry_points] manila.scheduler.filters = AvailabilityZoneFilter = manila.openstack.common.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter CapabilitiesFilter = manila.openstack.common.scheduler.filters.capabilities_filter:CapabilitiesFilter CapacityFilter = manila.scheduler.filters.capacity_filter:CapacityFilter JsonFilter = manila.openstack.common.scheduler.filters.json_filter:JsonFilter RetryFilter = manila.scheduler.filters.retry_filter:RetryFilter manila.scheduler.weights = CapacityWeigher = manila.scheduler.weights.capacity:CapacityWeigher [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [compile_catalog] directory = manila/locale domain = manila [update_catalog] domain = manila output_dir = manila/locale input_file = manila/locale/manila.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = manila/locale/manila.pot [nosetests] tests = manila/tests cover-package = manila cover-erase = true cover-inclusive = true verbosity = 2 detailed-errors = 1 [wheel] universal = 1 manila-2013.2.dev175.gbf1a399/contrib/0000775000175000017500000000000012301410516017147 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/0000775000175000017500000000000012301410516020630 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/README.rst0000664000175000017500000000077412301410454022330 0ustar chuckchuck00000000000000==================== Tempest Integration ==================== This directory contains the files necessary for tempest to cover Manila project. To install: $ TEMPEST_DIR=/path/to/tempest $ cp tempest/* ${TEMPEST_DIR} notes: These files based on tempest master branch (pre-icehouse), it is pluggable-like files without requirements to change core tempest files. But the way of its pluggability is work-around for tempest, which hasn't pluggable functionality for exceptions, config and clients modules. manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/0000775000175000017500000000000012301410516022311 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/0000775000175000017500000000000012301410516023062 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/0000775000175000017500000000000012301410516024347 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/test_rules_negative.py0000664000175000017500000001046412301410454031002 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import exceptions from tempest import test class ShareRulesNegativeTestJSON(base.BaseSharesTest): @classmethod def setUpClass(cls): super(ShareRulesNegativeTestJSON, cls).setUpClass() # create share _, cls.share = cls.create_share_wait_for_active() # create snapshot _, cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"]) @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_share_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.create_access_rule, "wrong_share_id") @test.attr(type='negative') def test_delete_access_rule_ip_with_wrong_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.delete_access_rule, self.share["id"], "wrong_rule_id") @test.attr(type='negative') def test_create_try_access_rule_ip_to_snapshot(self): self.assertRaises(exceptions.NotFound, self.shares_client.create_access_rule, self.snap["id"]) @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_type(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "wrong_type", "1.2.3.4") @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_target_1(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "ip", "1.2.3.256") @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_target_2(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "ip", "1.1.1.-") @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_target_3(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "ip", "1.2.3.4/33") @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_target_4(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "ip", "1.2.3.*") @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_target_5(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "ip", "1.2.3.*/23") @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_target_6(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "ip", "1.2.3.1|23") @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_target_7(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "ip", "1.2.3.1/-1") @test.attr(type='negative') def test_create_access_rule_ip_with_wrong_target_8(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_access_rule, self.share["id"], "ip", "1.2.3.1/") class ShareRulesNegativeTestXML(ShareRulesNegativeTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/test_metadata_negative.py0000664000175000017500000000726612301410454031436 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import exceptions from tempest import test class SharesMetadataNegativeTestJSON(base.BaseSharesTest): @classmethod def setUpClass(cls): super(SharesMetadataNegativeTestJSON, cls).setUpClass() _, cls.share = cls.create_share_wait_for_active() @test.attr(type=['negative', ]) def test_try_set_metadata_to_unexisting_share(self): md = {u"key1": u"value1", u"key2": u"value2", } self.assertRaises(exceptions.NotFound, self.shares_client.set_metadata, "wrong_share_id", md) @test.attr(type=['negative', ]) def test_try_update_all_metadata_for_unexisting_share(self): md = {u"key1": u"value1", u"key2": u"value2", } self.assertRaises(exceptions.NotFound, self.shares_client.update_all_metadata, "wrong_share_id", md) @test.attr(type=['negative', ]) def test_try_set_metadata_with_empty_key(self): self.assertRaises(exceptions.BadRequest, self.shares_client.set_metadata, self.share["id"], {"": "value"}) @test.attr(type=['negative', ]) def test_try_upd_metadata_with_empty_key(self): self.assertRaises(exceptions.BadRequest, self.shares_client.update_all_metadata, self.share["id"], {"": "value"}) @test.attr(type=['negative', ]) def test_try_set_metadata_with_too_big_key(self): too_big_key = "x" * 256 md = {too_big_key: "value"} self.assertRaises(exceptions.BadRequest, self.shares_client.set_metadata, self.share["id"], md) @test.attr(type=['negative', ]) def test_try_upd_metadata_with_too_big_key(self): too_big_key = "x" * 256 md = {too_big_key: "value"} self.assertRaises(exceptions.BadRequest, self.shares_client.update_all_metadata, self.share["id"], md) @test.attr(type=['negative', ]) def test_try_set_metadata_with_too_big_value(self): too_big_value = "x" * 1024 md = {"key": too_big_value} self.assertRaises(exceptions.BadRequest, self.shares_client.set_metadata, self.share["id"], md) @test.attr(type=['negative', ]) def test_try_upd_metadata_with_too_big_value(self): too_big_value = "x" * 1024 md = {"key": too_big_value} self.assertRaises(exceptions.BadRequest, self.shares_client.update_all_metadata, self.share["id"], md) @test.attr(type=['negative', ]) def test_try_delete_unexisting_metadata(self): self.assertRaises(exceptions.NotFound, self.shares_client.delete_metadata, self.share["id"], "wrong_key") class SharesMetadataNegativeTestXML(SharesMetadataNegativeTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/test_shares_negative.py0000664000175000017500000001060212301410454031127 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import exceptions from tempest import exceptions_shares from tempest import test class SharesNegativeTestJSON(base.BaseSharesTest): @test.attr(type='negative') def test_create_share_with_invalid_protocol(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_share, share_protocol="nonexistent_protocol") @test.attr(type='negative') def test_get_share_with_wrong_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.get_share, "wrong_share_id") @test.attr(type='negative') def test_get_share_without_passing_share_id(self): # Should not be able to get share when empty ID is passed self.assertRaises(exceptions.NotFound, self.shares_client.get_share, '') @test.attr(type='negative') def test_delete_share_with_wrong_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.delete_share, "wrong_share_id") @test.attr(type='negative') def test_delete_share_without_passing_share_id(self): # Should not be able to delete share when empty ID is passed self.assertRaises(exceptions.NotFound, self.shares_client.delete_share, '') @test.attr(type='negative') def test_create_snapshot_with_wrong_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.create_snapshot, "wrong_share_id") @test.attr(type='negative') def test_delete_snapshot_with_wrong_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.delete_snapshot, "wrong_share_id") @test.attr(type='negative') def test_create_share_with_invalid_size(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_share, size="#$%") @test.attr(type='negative') def test_create_share_with_out_passing_size(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_share, size="") @test.attr(type='negative') def test_create_share_with_zero_size(self): self.assertRaises(exceptions.BadRequest, self.shares_client.create_share, size=0) @test.attr(type='negative') def test_try_delete_share_with_existing_snapshot(self): # share can not be deleted while snapshot exists # create share resp, share = self.create_share_wait_for_active() # create snapshot resp, snap = self.create_snapshot_wait_for_active(share["id"]) # try delete share self.assertRaises(exceptions.Unauthorized, self.shares_client.delete_share, share["id"]) @test.attr(type='negative') def test_create_share_from_snap_with_less_size(self): # requires minimum 5Gb available space skip_msg = "Check disc space for this test" try: # create share _, share = self.create_share_wait_for_active(size=2) except exceptions_shares.ShareBuildErrorException: self.skip(skip_msg) try: # create snapshot _, snap = self.create_snapshot_wait_for_active(share["id"]) except exceptions.SnapshotBuildErrorException: self.skip(skip_msg) # try create share from snapshot with less size self.assertRaises(exceptions.BadRequest, self.create_share_wait_for_active, size=1, snapshot_id=snap["id"]) class SharesNegativeTestXML(SharesNegativeTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/test_metadata.py0000664000175000017500000001336512301410454027551 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import test class SharesMetadataTestJSON(base.BaseSharesTest): @classmethod def setUpClass(cls): super(SharesMetadataTestJSON, cls).setUpClass() _, cls.share = cls.create_share_wait_for_active() @test.attr(type=['positive', ]) def test_set_metadata_in_share_creation(self): md = {u"key1": u"value1", u"key2": u"value2", } # create share with metadata _, share = self.create_share_wait_for_active(metadata=md) # get metadata of share resp, metadata = self.shares_client.get_metadata(share["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify metadata self.assertEqual(md, metadata) @test.attr(type=['positive', ]) def test_set_get_delete_metadata(self): md = {u"key3": u"value3", u"key4": u"value4", } # create share _, share = self.create_share_wait_for_active() # set metadata resp, set_md = self.shares_client.set_metadata(share["id"], md) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # read metadata resp, get_md = self.shares_client.get_metadata(share["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify metadata self.assertEqual(md, get_md) # delete metadata for key in md.keys(): resp, del_md = self.shares_client\ .delete_metadata(share["id"], key) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify deletion of metadata resp, get_metadata = self.shares_client.get_metadata(share["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual({}, get_metadata) @test.attr(type=['positive', ]) def test_set_and_update_metadata_by_key(self): md1 = {u"key5": u"value5", u"key6": u"value6", } md2 = {u"key7": u"value7", u"key8": u"value8", } # create share _, share = self.create_share_wait_for_active() # set metadata resp, set_md = self.shares_client.set_metadata(share["id"], md1) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # update metadata resp, upd_md = self.shares_client\ .update_all_metadata(share["id"], md2) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # get metadata resp, get_md = self.shares_client.get_metadata(share["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify metadata self.assertEqual(md2, get_md) @test.attr(type=['positive', ]) def test_set_metadata_min_size_key(self): resp, min = self.shares_client.set_metadata(self.share["id"], {"k": "value"}) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) @test.attr(type=['positive', ]) def test_set_metadata_max_size_key(self): max_key = "k" * 255 resp, max = self.shares_client.set_metadata(self.share["id"], {max_key: "value"}) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) @test.attr(type=['positive', ]) def test_set_metadata_min_size_value(self): resp, min = self.shares_client.set_metadata(self.share["id"], {"key": "v"}) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) @test.attr(type=['positive', ]) def test_set_metadata_max_size_value(self): max_value = "v" * 1023 resp, body = self.shares_client.set_metadata(self.share["id"], {"key": max_value}) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) @test.attr(type=['positive', ]) def test_upd_metadata_min_size_key(self): resp, body = self.shares_client.update_all_metadata(self.share["id"], {"k": "value"}) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) @test.attr(type=['positive', ]) def test_upd_metadata_max_size_key(self): max_key = "k" * 255 resp, body = self.shares_client.update_all_metadata(self.share["id"], {max_key: "value"}) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) @test.attr(type=['positive', ]) def test_upd_metadata_min_size_value(self): resp, body = self.shares_client.update_all_metadata(self.share["id"], {"key": "v"}) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) @test.attr(type=['positive', ]) def test_upd_metadata_max_size_value(self): max_value = "v" * 1023 resp, body = self.shares_client.update_all_metadata(self.share["id"], {"key": max_value}) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) class SharesMetadataTestXML(SharesMetadataTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/test_shares.py0000664000175000017500000002510012301410454027244 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest.common.utils.data_utils import rand_name from tempest import exceptions from tempest import test class SharesTestJSON(base.BaseSharesTest): def tearDown(self): super(SharesTestJSON, self).tearDown() self.clear_resources() @test.attr(type=['positive', ]) def test_create_delete_share(self): # create share resp, share = self.create_share_wait_for_active() self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # delete share resp, __ = self.shares_client.delete_share(share['id']) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_resource_deletion(share['id']) self.assertRaises(exceptions.NotFound, self.shares_client.get_share, share['id']) @test.attr(type=['positive', ]) def test_get_share(self): # test data name = rand_name("rand-share-name-") desc = rand_name("rand-share-description-") size = 1 # create share resp, share = self.create_share_wait_for_active(name=name, description=desc, size=size) # get share resp, share = self.shares_client.get_share(share['id']) # verify response self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify keys expected_keys = ["status", "description", "links", "availability_zone", "created_at", "export_location", "share_proto", "name", "snapshot_id", "id", "size"] actual_keys = share.keys() [self.assertIn(key, actual_keys) for key in expected_keys] # verify values msg = "Expected name: '%s', actual name: '%s'" % (name, share["name"]) self.assertEqual(name, str(share["name"]), msg) msg = "Expected description: '%s', "\ "actual description: '%s'" % (desc, share["description"]) self.assertEqual(desc, str(share["description"]), msg) msg = "Expected size: '%s', actual size: '%s'" % (size, share["size"]) self.assertEqual(size, int(share["size"]), msg) @test.attr(type=['positive', ]) def test_list_shares(self): # create share resp, share = self.create_share_wait_for_active() # list shares resp, shares = self.shares_client.list_shares() # verify response self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify keys keys = ["name", "id", "links"] [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our share id in list and have no duplicates gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(len(gen), 1, msg) @test.attr(type=['positive', 'gate']) def test_list_shares_with_detail(self): # create share resp, share = self.create_share_wait_for_active() # list shares resp, shares = self.shares_client.list_shares_with_detail() # verify response self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify keys keys = ["status", "description", "links", "availability_zone", "created_at", "export_location", "share_proto", "name", "snapshot_id", "id", "size"] [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our share id in list and have no duplicates gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(len(gen), 1, msg) @test.attr(type=['positive', ]) def test_create_delete_snapshot(self): # create share resp, share = self.create_share_wait_for_active() # create snapshot resp, snap = self.create_snapshot_wait_for_active(share["id"]) # delete snapshot self.shares_client.delete_snapshot(snap["id"]) self.shares_client.wait_for_resource_deletion(snap["id"]) self.assertRaises(exceptions.NotFound, self.shares_client.get_snapshot, snap['id']) @test.attr(type=['positive', ]) def test_get_snapshot(self): # create share resp, share = self.create_share_wait_for_active() #create snapshot name = rand_name("tempest-snap-") desc = rand_name("tempest-snap-description-") resp, snap = self.create_snapshot_wait_for_active(share["id"], name, desc) # get snapshot resp, get = self.shares_client.get_snapshot(snap["id"]) # verify data self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify keys expected_keys = ["status", "links", "share_id", "name", "export_location", "share_proto", "created_at", "description", "id", "share_size"] actual_keys = get.keys() [self.assertIn(key, actual_keys) for key in expected_keys] # verify data msg = "Expected name: '%s', actual name: '%s'" % (name, get["name"]) self.assertEqual(name, get["name"], msg) msg = "Expected description: '%s', "\ "actual description: '%s'" % (desc, get["description"]) self.assertEqual(desc, get["description"], msg) msg = "Expected share_id: '%s', "\ "actual share_id: '%s'" % (name, get["share_id"]) self.assertEqual(share["id"], get["share_id"], msg) @test.attr(type=['positive', ]) def test_list_snapshots(self): # create share resp, share = self.create_share_wait_for_active() #create snapshot resp, snap = self.create_snapshot_wait_for_active(share["id"]) # list share snapshots resp, snaps = self.shares_client.list_snapshots() # verify response self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify keys keys = ["id", "name", "links"] [self.assertIn(key, sn.keys()) for sn in snaps for key in keys] # our share id in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEquals(1, len(gen), msg) @test.attr(type=['positive', 'gate']) def test_list_snapshots_with_detail(self): # create share resp, share = self.create_share_wait_for_active() # create snapshot resp, snap = self.create_snapshot_wait_for_active(share["id"]) # list share snapshots resp, snaps = self.shares_client.list_snapshots_with_detail() # verify response self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify keys keys = ["status", "links", "share_id", "name", "export_location", "share_proto", "created_at", "description", "id", "share_size"] [self.assertIn(key, sn.keys()) for sn in snaps for key in keys] # our share id in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(len(gen), 1, msg) @test.attr(type=['positive', 'smoke', 'gate']) def test_create_share_from_snapshot(self): # create share resp, share = self.create_share_wait_for_active() # create snapshot resp, snap = self.create_snapshot_wait_for_active(share["id"]) # crate share from snapshot resp, s2 = self.create_share_wait_for_active(snapshot_id=snap["id"]) # verify share, created from snapshot resp, get = self.shares_client.get_share(s2["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) msg = "Expected snapshot_id %s as "\ "source of share %s" % (snap["id"], get["snapshot_id"]) self.assertEqual(get["snapshot_id"], snap["id"], msg) @test.attr(type=['positive', 'smoke', 'gate']) def test_extensions(self): # get extensions resp, extensions = self.shares_client.list_extensions() # verify response self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) keys = ['alias', 'updated', 'namespace', 'name', 'description'] [self.assertIn(key, ext.keys()) for ext in extensions for key in keys] @test.attr(type=['positive', ]) def test_rename_share(self): # create share _, share = self.create_share_wait_for_active() # rename share new_name = rand_name("new_name_") new_desc = rand_name("new_desc_") resp, renamed = self.shares_client.rename(share["id"], new_name, new_desc) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(new_name, renamed["name"]) self.assertEqual(new_desc, renamed["description"]) @test.attr(type=['positive', ]) def test_rename_snapshot(self): # create share _, share = self.create_share_wait_for_active() # create snapshot _, snap = self.create_snapshot_wait_for_active(share["id"]) # rename snapshot new_name = rand_name("new_name_for_snap_") new_desc = rand_name("new_desc_for_snap_") resp, renamed = self.shares_client.rename_snapshot(snap["id"], new_name, new_desc) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(new_name, renamed["name"]) self.assertEqual(new_desc, renamed["description"]) class SharesTestXML(SharesTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/test_security_negative.py0000664000175000017500000001411312301410454031512 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import clients_shares as clients from tempest import config_shares as config from tempest import exceptions from tempest import test CONF = config.CONF class SharesSecurityNegativeTestJSON(base.BaseSharesTest): @classmethod def setUpClass(cls): super(SharesSecurityNegativeTestJSON, cls).setUpClass() if not CONF.shares.only_admin_or_owner_for_action: skip_msg = "Disabled from tempest configuration" raise cls.skipException(skip_msg) cls.client = cls.shares_client cls.alt_client = clients.AltManager().shares_client _, cls.share = cls.create_share_wait_for_active() _, cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"]) @test.attr(type='negative') def test_tenant_isolation_for_share_list(self): # list shares __, shares = self.client.list_shares() # our share id is in list and have no duplicates gen = [sid["id"] for sid in shares if sid["id"] in self.share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEquals(len(gen), 1, msg) # list shares from another tenant __, alt_shares = self.alt_client.list_shares() # our share id is not in list gen = [s["id"] for s in alt_shares if s["id"] in self.share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEquals(len(gen), 0, msg) @test.attr(type='negative') def test_tenant_isolation_share_delete(self): # try delete share from another tenant self.assertRaises(exceptions.Unauthorized, self.alt_client.delete_share, self.share["id"]) @test.attr(type='negative') def test_tenant_isolation_share_get(self): # try delete share from another tenant self.assertRaises(exceptions.Unauthorized, self.alt_client.get_share, self.share["id"]) @test.attr(type='negative') def test_tenant_isolation_for_share_snapshot_list(self): # list share snapshots __, snaps = self.client.list_snapshots() # our share id is in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in self.snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEquals(len(gen), 1, msg) # list shares from another tenant __, alt_snaps = self.alt_client.list_snapshots() # our snapshot id is not in list gen = [sid["id"] for sid in alt_snaps if sid["id"] in self.snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEquals(len(gen), 0, msg) @test.attr(type='negative') def test_tenant_isolation_share_snapshot_delete(self): # try delete share from another tenant self.assertRaises(exceptions.NotFound, self.alt_client.delete_snapshot, self.snap["id"]) @test.attr(type='negative') def test_tenant_isolation_share_snapshot_get(self): # try delete share from another tenant self.assertRaises(exceptions.NotFound, self.alt_client.get_snapshot, self.snap["id"]) @test.attr(type='negative') def test_tenant_isolation_share_access_list(self): # try list share rules self.assertRaises(exceptions.Unauthorized, # NotFound or Unauthorized self.alt_client.list_access_rules, self.share["id"]) @test.attr(type='negative') def test_tenant_isolation_share_access_rule_delete(self): # create rule resp, rule = self.client.create_access_rule(self.share["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_access_rule_status(self.share["id"], rule["id"], "active") # try delete rule self.assertRaises(exceptions.Unauthorized, # NotFound or Unauthorized self.alt_client.delete_access_rule, self.share["id"], rule["id"]) @test.attr(type='negative') def test_create_snapshot_from_alien_share(self): # try create snapshot in another tenant self.assertRaises(exceptions.Unauthorized, # NotFound or Unauthorized self.create_snapshot_wait_for_active, share_id=self.share["id"], client=self.alt_client) @test.attr(type='negative') def test_create_share_from_alien_snapshot(self): # try create share in another tenant from snap self.assertRaises(exceptions.NotFound, # NotFound or Unauthorized self.create_share_wait_for_active, snapshot_id=self.snap["id"], client=self.alt_client) @test.attr(type='negative') def test_create_access_rule_to_alien_share(self): # try create access rule from another tenant self.assertRaises(exceptions.Unauthorized, self.alt_client.create_access_rule, self.share["id"], access_to="1.1.1.1") # There is no need to perform security tests twice #class SharesSecurityNegativeTestXML(SharesSecurityNegativeTestJSON): # _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/__init__.py0000664000175000017500000000000012301410454026447 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/base.py0000664000175000017500000001327312301410454025642 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import clients_shares as clients from tempest.common import isolated_creds from tempest import config_shares as config from tempest import exceptions from tempest import test CONF = config.CONF class BaseSharesTest(test.BaseTestCase): """Base test case class for all Manila API tests.""" _interface = "json" resources_of_tests = [] @classmethod def setUpClass(cls): if not CONF.service_available.manila: skip_msg = "Manila not available" raise cls.skipException(skip_msg) super(BaseSharesTest, cls).setUpClass() cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__) if CONF.compute.allow_tenant_isolation: creds = cls.isolated_creds.get_primary_creds() username, tenant_name, password = creds cls.os = clients.Manager(username=username, password=password, tenant_name=tenant_name, interface=cls._interface) else: cls.os = clients.Manager(interface=cls._interface) cls.shares_client = cls.os.shares_client cls.build_interval = CONF.shares.build_interval cls.build_timeout = CONF.shares.build_timeout @classmethod def tearDownClass(cls): super(BaseSharesTest, cls).tearDownClass() cls.isolated_creds.clear_isolated_creds() cls.clear_resources() @classmethod def create_share_wait_for_active(cls, share_protocol=None, size=1, name=None, snapshot_id=None, description="tempests share", metadata={}, client=None): if client is None: client = cls.shares_client r, s = client.create_share(share_protocol=share_protocol, size=size, name=name, snapshot_id=snapshot_id, description=description, metadata=metadata) resource = {"type": "share", "body": s, "deleted": False} cls.resources_of_tests.insert(0, resource) # last in first out (LIFO) client.wait_for_share_status(s["id"], "available") return r, s @classmethod def create_snapshot_wait_for_active(cls, share_id, name=None, description="tempests share-ss", force=False, client=None): if client is None: client = cls.shares_client r, s = client.create_snapshot(share_id, name, description, force) resource = {"type": "snapshot", "body": s, "deleted": False} cls.resources_of_tests.insert(0, resource) # last in first out (LIFO) client.wait_for_snapshot_status(s["id"], "available") return r, s @classmethod def clear_resources(cls, client=None): if client is None: client = cls.shares_client # Here we expect, that all resources were added as LIFO # due to restriction of deletion resources, that is in the chain for index, res in enumerate(cls.resources_of_tests): if not(res["deleted"]): try: if res["type"] is "share": client.delete_share(res["body"]['id']) elif res["type"] is "snapshot": client.delete_snapshot(res["body"]['id']) cls.resources_of_tests[index]["deleted"] = True except exceptions.NotFound: pass client.wait_for_resource_deletion(res["body"]['id']) class BaseSharesAdminTest(BaseSharesTest): """Base test case class for all Shares Admin API tests.""" @classmethod def setUpClass(cls): super(BaseSharesAdminTest, cls).setUpClass() cls.adm_user = CONF.identity.admin_username cls.adm_pass = CONF.identity.admin_password cls.adm_tenant = CONF.identity.admin_tenant_name if not all((cls.adm_user, cls.adm_pass, cls.adm_tenant)): msg = ("Missing Shares Admin API credentials " "in configuration.") raise cls.skipException(msg) if CONF.compute.allow_tenant_isolation: creds = cls.isolated_creds.get_admin_creds() admin_username, admin_tenant_name, admin_password = creds cls.os_adm = clients.Manager(username=admin_username, password=admin_password, tenant_name=admin_tenant_name, interface=cls._interface) else: cls.os_adm = clients.AdminManager(interface=cls._interface) cls.shares_client = cls.os_adm.shares_client manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/admin/0000775000175000017500000000000012301410516025437 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/admin/test_admin_actions.py0000664000175000017500000000417112301410454031664 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import test class AdminActionsTestJSON(base.BaseSharesAdminTest): @classmethod def setUpClass(cls): super(AdminActionsTestJSON, cls).setUpClass() # create share (available or error) cls.share_states = ["error", "available"] __, cls.sh = cls.create_share_wait_for_active() # create snapshot (available or error) cls.snapshot_states = ["error", "available"] __, cls.sn = cls.create_snapshot_wait_for_active(cls.sh["id"]) @test.attr(type=['positive', ]) def test_reset_share_state(self): for status in self.share_states: resp, __ = self.shares_client.reset_state(self.sh["id"], status=status) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_share_status(self.sh["id"], status) @test.attr(type=['positive', ]) def test_reset_snapshot_state_to_error(self): for status in self.snapshot_states: resp, __ = self.shares_client.reset_state(self.sn["id"], s_type="snapshots", status=status) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_snapshot_status(self.sn["id"], status) class AdminActionsTestXML(AdminActionsTestJSON): _interface = 'xml' ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/admin/test_admin_actions_negative.pymanila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/admin/test_admin_actions_negative.p0000664000175000017500000000641112301410454033354 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import clients_shares as clients from tempest import config_shares as config from tempest import exceptions from tempest import test import testtools CONF = config.CONF class AdminActionsNegativeTestJSON(base.BaseSharesAdminTest): @classmethod def setUpClass(cls): super(AdminActionsNegativeTestJSON, cls).setUpClass() # create share (available or error) __, cls.sh = cls.create_share_wait_for_active() # create snapshot (available or error) __, cls.sn = cls.create_snapshot_wait_for_active(cls.sh["id"]) cls.member_shares_client = clients.Manager().shares_client @test.attr(type=['negative', ]) def test_reset_unexistant_share_state(self): self.assertRaises(exceptions.NotFound, self.shares_client.reset_state, "fake") @test.attr(type=['negative', ]) def test_reset_unexistant_snapshot_state(self): self.assertRaises(exceptions.NotFound, self.shares_client.reset_state, "fake", s_type="snapshots") @test.attr(type=['negative', ]) def test_reset_share_state_to_unacceptable_state(self): self.assertRaises(exceptions.BadRequest, self.shares_client.reset_state, self.sh["id"], status="fake") @test.attr(type=['negative', ]) def test_reset_snapshot_state_to_unacceptable_state(self): self.assertRaises(exceptions.BadRequest, self.shares_client.reset_state, self.sn["id"], s_type="snapshots", status="fake") @testtools.skipIf(not CONF.shares.only_admin_or_owner_for_action, "Skipped, because not only admin allowed") @test.attr(type=['negative', ]) def test_try_reset_share_state_with_member(self): # Even if member from another tenant, it should be unauthorized self.assertRaises(exceptions.Unauthorized, self.member_shares_client.reset_state, self.sh["id"]) @testtools.skipIf(not CONF.shares.only_admin_or_owner_for_action, "Skipped, because not only admin allowed") @test.attr(type=['negative', ]) def test_try_reset_snapshot_state_with_member(self): # Even if member from another tenant, it should be unauthorized self.assertRaises(exceptions.Unauthorized, self.member_shares_client.reset_state, self.sn["id"], s_type="snapshots") class AdminActionsNegativeTestXML(AdminActionsNegativeTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/admin/__init__.py0000664000175000017500000000000012301410454027537 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/admin/test_quotas.py0000664000175000017500000002564012301410454030374 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import test class SharesQuotasTestJSON(base.BaseSharesAdminTest): # Tests should be used without unlimited quotas (-1). # It is recommended to delete all entities in Manila before test run. @classmethod def setUpClass(cls): super(SharesQuotasTestJSON, cls).setUpClass() cls.identity_client = cls._get_identity_admin_client() cls.tenant = cls.identity_client\ .get_tenant_by_name(cls.shares_client.tenant_name) cls.user = cls.identity_client\ .get_user_by_username(cls.tenant["id"], cls.shares_client.username) # save quotas before tests __, cls.t_q = cls.shares_client.show_quotas(cls.tenant["id"]) __, cls.u_q = cls.shares_client.show_quotas(cls.tenant["id"], cls.user["id"]) value = 1000 # set quotas before tests cls.shares_client.update_quotas(cls.tenant["id"], shares=value, snapshots=value, gigabytes=value) cls.shares_client.update_quotas(cls.tenant["id"], cls.user["id"], shares=value, snapshots=value, gigabytes=value) @classmethod def tearDownClass(cls): super(SharesQuotasTestJSON, cls).tearDownClass() # back up quota values cls.shares_client.update_quotas(cls.tenant["id"], shares=cls.t_q["shares"], snapshots=cls.t_q["snapshots"], gigabytes=cls.t_q["gigabytes"]) cls.shares_client.update_quotas(cls.tenant["id"], cls.user["id"], shares=cls.u_q["shares"], snapshots=cls.u_q["snapshots"], gigabytes=cls.u_q["gigabytes"]) @test.attr(type=['positive', 'smoke']) def test_limits_keys(self): # list limits resp, limits = self.shares_client.get_limits() # verify response self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) keys = ["rate", "absolute"] [self.assertIn(key, limits.keys()) for key in keys] abs_keys = ["maxTotalShareGigabytes", "maxTotalShares", "maxTotalSnapshots"] [self.assertIn(key, limits["absolute"].keys()) for key in abs_keys] @test.attr(type=['positive', 'smoke']) def test_limits_values(self): # list limits resp, limits = self.shares_client.get_limits() # verify response self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify integer values for absolute limits self.assertGreater(int(limits["absolute"]["maxTotalShareGigabytes"]), -2) self.assertGreater(int(limits["absolute"]["maxTotalShares"]), -2) self.assertGreater(int(limits["absolute"]["maxTotalSnapshots"]), -2) @test.attr(type='positive') def test_default_quotas(self): resp, quotas = self.shares_client.default_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) @test.attr(type=['positive', 'smoke']) def test_show_quotas(self): resp, quotas = self.shares_client.show_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) @test.attr(type=['positive', 'smoke']) def test_show_quotas_for_user(self): resp, quotas = self.shares_client.show_quotas(self.tenant["id"], self.user["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) @test.attr(type='positive') def test_default_quotas_with_empty_tenant_id(self): # it should return default quotas without any tenant-id resp, body = self.shares_client.default_quotas("") self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertTrue(len(body) > 0) @test.attr(type='positive') def test_update_tenant_quota_shares(self): # get current quotas resp, quotas = self.shares_client.show_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) new_quota = int(quotas["shares"]) + 2 # set new quota for shares resp, updated = self.shares_client.update_quotas(self.tenant["id"], shares=new_quota) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(int(updated["shares"]), new_quota) @test.attr(type='positive') def test_update_user_quota_shares(self): # get current quotas resp, quotas = self.shares_client.show_quotas(self.tenant["id"], self.user["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) new_quota = int(quotas["shares"]) - 1 # set new quota for shares resp, updated = self.shares_client.update_quotas(self.tenant["id"], shares=new_quota) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(int(updated["shares"]), new_quota) @test.attr(type='positive') def test_update_tenant_quota_snapshots(self): # get current quotas resp, quotas = self.shares_client.show_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) new_quota = int(quotas["snapshots"]) + 2 # set new quota for snapshots resp, updated = self.shares_client.update_quotas(self.tenant["id"], snapshots=new_quota) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(int(updated["snapshots"]), new_quota) @test.attr(type='positive') def test_update_user_quota_snapshots(self): # get current quotas resp, quotas = self.shares_client.show_quotas(self.tenant["id"], self.user["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) new_quota = int(quotas["snapshots"]) - 1 # set new quota for snapshots resp, updated = self.shares_client.update_quotas(self.tenant["id"], snapshots=new_quota) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(int(updated["snapshots"]), new_quota) @test.attr(type='positive') def test_update_tenant_quota_gigabytes(self): # get current quotas resp, custom = self.shares_client.show_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # make quotas for update gigabytes = int(custom["gigabytes"]) + 2 # set new quota for shares resp, updated = self.shares_client.update_quotas(self.tenant["id"], gigabytes=gigabytes) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(int(updated["gigabytes"]), gigabytes) @test.attr(type='positive') def test_update_user_quota_gigabytes(self): # get current quotas resp, custom = self.shares_client.show_quotas(self.tenant["id"], self.user["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # make quotas for update gigabytes = int(custom["gigabytes"]) - 1 # set new quota for shares resp, updated = self.shares_client.update_quotas(self.tenant["id"], gigabytes=gigabytes) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(int(updated["gigabytes"]), gigabytes) @test.attr(type='positive') def test_reset_tenant_quotas(self): # get default_quotas resp, default = self.shares_client.default_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # get current quotas resp, custom = self.shares_client.show_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # make quotas for update shares = int(custom["shares"]) + 2 snapshots = int(custom["snapshots"]) + 2 gigabytes = int(custom["gigabytes"]) + 2 # set new quota resp, updated = self.shares_client.update_quotas(self.tenant["id"], shares=shares, snapshots=snapshots, gigabytes=gigabytes) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(int(updated["shares"]), shares) self.assertEqual(int(updated["snapshots"]), snapshots) self.assertEqual(int(updated["gigabytes"]), gigabytes) # reset customized quotas resp, reseted = self.shares_client.reset_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # verify quotas resp, after_delete = self.shares_client.show_quotas(self.tenant["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.assertEqual(int(after_delete["shares"]), int(default["shares"])) self.assertEqual(int(after_delete["snapshots"]), int(default["snapshots"])) self.assertEqual(int(after_delete["gigabytes"]), int(default["gigabytes"])) class SharesQuotasTestXML(SharesQuotasTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/admin/test_quotas_negative.py0000664000175000017500000002121112301410454032244 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import exceptions from tempest import exceptions_shares from tempest import test import unittest class SharesQuotasNegativeTestJSON(base.BaseSharesAdminTest): # Tests should be used without unlimited quotas (-1). # It is recommended to delete all entities in Manila before test run. @classmethod def setUpClass(cls): super(SharesQuotasNegativeTestJSON, cls).setUpClass() cls.identity_client = cls._get_identity_admin_client() cls.tenant = cls.identity_client\ .get_tenant_by_name(cls.shares_client.tenant_name) cls.user = cls.identity_client\ .get_user_by_username(cls.tenant["id"], cls.shares_client.username) # save quotas before tests __, cls.t_q = cls.shares_client.show_quotas(cls.tenant["id"]) __, cls.u_q = cls.shares_client.show_quotas(cls.tenant["id"], cls.user["id"]) value = 1000 # set quotas before tests cls.shares_client.update_quotas(cls.tenant["id"], shares=value, snapshots=value, gigabytes=value) cls.shares_client.update_quotas(cls.tenant["id"], cls.user["id"], shares=value, snapshots=value, gigabytes=value) @classmethod def tearDownClass(cls): super(SharesQuotasNegativeTestJSON, cls).tearDownClass() # back up quota values cls.shares_client.update_quotas(cls.tenant["id"], shares=cls.t_q["shares"], snapshots=cls.t_q["snapshots"], gigabytes=cls.t_q["gigabytes"]) cls.shares_client.update_quotas(cls.tenant["id"], cls.user["id"], shares=cls.u_q["shares"], snapshots=cls.u_q["snapshots"], gigabytes=cls.u_q["gigabytes"]) @test.attr(type='negative') @unittest.skip("Skip until Bug #1234244 is fixed") def test_quotas_with_wrong_tenant_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.get_quotas, "wrong_tenant_id") @test.attr(type='negative') @unittest.skip("Skip until Bug #1234244 is fixed") def test_quotas_with_wrong_user_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.get_quotas, self.tenant["id"], "wrong_user_id") @test.attr(type='negative') def test_quotas_with_empty_tenant_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.show_quotas, "") @test.attr(type='negative') @unittest.skip("Skip until Bug #1233170 is fixed") def test_default_quotas_with_wrong_tenant_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.default_quotas, "wrong_tenant_id") @test.attr(type='negative') def test_reset_quotas_with_empty_tenant_id(self): self.assertRaises(exceptions.NotFound, self.shares_client.reset_quotas, "") @test.attr(type='negative') def test_update_shares_quota_with_wrong_data(self): # -1 is acceptable value as unlimited self.assertRaises(exceptions.BadRequest, self.shares_client.update_quotas, self.tenant["id"], shares=-2) @test.attr(type='negative') def test_update_snapshots_quota_with_wrong_data(self): # -1 is acceptable value as unlimited self.assertRaises(exceptions.BadRequest, self.shares_client.update_quotas, self.tenant["id"], snapshots=-2) @test.attr(type='negative') def test_update_gigabytes_quota_with_wrong_data(self): # -1 is acceptable value as unlimited self.assertRaises(exceptions.BadRequest, self.shares_client.update_quotas, self.tenant["id"], gigabytes=-2) @test.attr(type='negative') def test_create_share_with_size_bigger_than_quota(self): new_quota = 25 overquota = new_quota + 2 # set quota for gigabytes resp, updated = self.shares_client.update_quotas(self.tenant["id"], gigabytes=new_quota) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # try schedule share with size, bigger than gigabytes quota self.assertRaises(exceptions.OverLimit, self.create_share_wait_for_active, size=overquota) @test.attr(type='negative') def test_unlimited_quota_for_gigabytes(self): # get current quota _, quotas = self.shares_client.show_quotas(self.tenant["id"]) # set unlimited quota for gigabytes resp, __ = self.shares_client.update_quotas(self.tenant["id"], gigabytes=-1) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) resp, __ = self.shares_client.update_quotas(self.tenant["id"], self.user["id"], gigabytes=-1) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # share should be scheduled self.assertRaises(exceptions_shares.ShareBuildErrorException, self.create_share_wait_for_active, size=987654) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) # return quotas as it was self.shares_client.update_quotas(self.tenant["id"], gigabytes=quotas["gigabytes"]) self.shares_client.update_quotas(self.tenant["id"], self.user["id"], gigabytes=quotas["gigabytes"]) @test.attr(type='negative') def test_try_set_user_quota_gigabytes_bigger_than_tenant_quota(self): # get current quotas for tenant _, tenant_quotas = self.shares_client.show_quotas(self.tenant["id"]) # try set user quota for gigabytes bigger than tenant quota bigger_value = int(tenant_quotas["gigabytes"]) + 2 self.assertRaises(exceptions.BadRequest, self.shares_client.update_quotas, self.tenant["id"], self.user["id"], gigabytes=bigger_value) @test.attr(type='negative') def test_try_set_user_quota_shares_bigger_than_tenant_quota(self): # get current quotas for tenant _, tenant_quotas = self.shares_client.show_quotas(self.tenant["id"]) # try set user quota for shares bigger than tenant quota bigger_value = int(tenant_quotas["shares"]) + 2 self.assertRaises(exceptions.BadRequest, self.shares_client.update_quotas, self.tenant["id"], self.user["id"], shares=bigger_value) @test.attr(type='negative') def test_try_set_user_quota_snaps_bigger_than_tenant_quota(self): # get current quotas for tenant _, tenant_quotas = self.shares_client.show_quotas(self.tenant["id"]) # try set user quota for snapshots bigger than tenant quota bigger_value = int(tenant_quotas["snapshots"]) + 2 self.assertRaises(exceptions.BadRequest, self.shares_client.update_quotas, self.tenant["id"], self.user["id"], snapshots=bigger_value) class SharesQuotasNegativeTestXML(SharesQuotasNegativeTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/api/shares/test_rules.py0000664000175000017500000001253212301410454027116 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.shares import base from tempest import exceptions from tempest import test class ShareRulesTestJSON(base.BaseSharesTest): @classmethod def setUpClass(cls): super(ShareRulesTestJSON, cls).setUpClass() _, cls.share = cls.create_share_wait_for_active() @test.attr(type='positive') def test_create_delete_access_rules_with_one_ip(self): # test data access_type = "ip" access_to = "1.2.3.4" # create rule resp, rule = self.shares_client.create_access_rule(self.share["id"], access_type, access_to) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_access_rule_status(self.share["id"], rule["id"], "active") # delete rule resp, _ = self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.assertIn(int(resp["status"]), [200, 202]) @test.attr(type='positive') def test_create_delete_access_rule_with_cidr(self): # test data access_type = "ip" access_to = "1.2.3.4/32" # create rule resp, rule = self.shares_client.create_access_rule(self.share["id"], access_type, access_to) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_access_rule_status(self.share["id"], rule["id"], "active") # delete rule resp, _ = self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) @test.attr(type='positive') def test_list_access_rules(self): # test data access_type = "ip" access_to = "1.2.3.4" # create rule resp, rule = self.shares_client.create_access_rule(self.share["id"], access_type, access_to) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_access_rule_status(self.share["id"], rule["id"], "active") # list rules resp, rules = self.shares_client.list_access_rules(self.share["id"]) # verify response msg = "We expected status 200, but got %s" % (str(resp["status"])) self.assertEqual(200, int(resp["status"]), msg) # verify keys keys = ["state", "id", "access_type", "access_to"] [self.assertIn(key, r.keys()) for r in rules for key in keys] # verify values self.assertEqual("active", rules[0]["state"]) self.assertEqual(access_type, rules[0]["access_type"]) self.assertEqual(access_to, rules[0]["access_to"]) # our share id in list and have no duplicates gen = [r["id"] for r in rules if r["id"] in rule["id"]] msg = "expected id lists %s times in rule list" % (len(gen)) self.assertEquals(len(gen), 1, msg) @test.attr(type='positive') def test_access_rules_deleted_if_share_deleted(self): # test data access_type = "ip" access_to = "1.2.3.0/24" # create share resp, share = self.create_share_wait_for_active() # create rule resp, rule = self.shares_client.create_access_rule(share["id"], access_type, access_to) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_access_rule_status(share["id"], rule["id"], "active") # delete share resp, _ = self.shares_client.delete_share(share['id']) self.assertIn(int(resp["status"]), test.HTTP_SUCCESS) self.shares_client.wait_for_resource_deletion(share['id']) # verify absence of rules for nonexistent share id self.assertRaises(exceptions.NotFound, self.shares_client.list_access_rules, share['id']) class ShareRulesTestXML(ShareRulesTestJSON): _interface = 'xml' manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/cli/0000775000175000017500000000000012301410516023060 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/cli/manilaclient.py0000664000175000017500000000172112301410454026074 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import cli class ClientTestBase(cli.ClientTestBase): def manila(self, action, flags='', params='', admin=True, fail_ok=False): """Executes manila command for the given action.""" return self.cmd_with_auth( 'manila', action, flags, params, admin, fail_ok) manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/cli/simple_read_only/0000775000175000017500000000000012301410516026405 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/cli/simple_read_only/test_manila.py0000664000175000017500000001222212301410454031257 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import subprocess from tempest.cli import manilaclient from tempest import config_shares as config CONF = config.CONF class SimpleReadOnlyManilaClientTest(manilaclient.ClientTestBase): """Basic, read-only tests for Manila CLI client. Checks return values and output of read-only commands. These tests do not presume any content, nor do they create their own. They only verify the structure of output if present. """ @classmethod def setUpClass(cls): super(SimpleReadOnlyManilaClientTest, cls).setUpClass() if not CONF.service_available.manila: raise cls.skipException("Manila not available") def test_manila_fake_action(self): self.assertRaises(subprocess.CalledProcessError, self.manila, 'this-does-not-exist') def test_manila_absolute_limit_list(self): roles = self.parser.listing(self.manila('absolute-limits')) self.assertTableStruct(roles, ['Name', 'Value']) def test_manila_shares_list(self): self.manila('list') def test_manila_shares_list_all_tenants(self): self.manila('list', params='--all-tenants') def test_manila_shares_list_filter_by_name(self): self.manila('list', params='--name name') def test_manila_shares_list_filter_by_status(self): self.manila('list', params='--status status') def test_manila_endpoints(self): self.manila('endpoints') def test_manila_quota_class_show(self): """This CLI can accept and string as param.""" roles = self.parser.listing(self.manila('quota-class-show', params='abc')) self.assertTableStruct(roles, ['Property', 'Value']) def test_manila_quota_defaults(self): """This CLI can accept and string as param.""" roles = self.parser.listing(self.manila('quota-defaults', params=self.identity. admin_tenant_name)) self.assertTableStruct(roles, ['Property', 'Value']) def test_manila_quota_show(self): """This CLI can accept and string as param.""" roles = self.parser.listing(self.manila('quota-show', params=self.identity. admin_tenant_name)) self.assertTableStruct(roles, ['Property', 'Value']) def test_manila_rate_limits(self): self.manila('rate-limits') def test_manila_snapshot_list(self): self.manila('snapshot-list') def test_manila_snapshot_list_all_tenants(self): self.manila('snapshot-list', params='--all-tenants') def test_manila_snapshot_list_filter_by_name(self): self.manila('snapshot-list', params='--name name') def test_manila_snapshot_list_filter_by_status(self): self.manila('snapshot-list', params='--status status') def test_manila_snapshot_list_filter_by_share_id(self): self.manila('snapshot-list', params='--share-id share_id') def test_manila_credentials(self): self.manila('credentials') def test_manila_list_extensions(self): roles = self.parser.listing(self.manila('list-extensions')) self.assertTableStruct(roles, ['Name', 'Summary', 'Alias', 'Updated']) def test_manila_help(self): help_text = self.manila('help') lines = help_text.split('\n') self.assertFirstLineStartsWith(lines, 'usage: manila') commands = [] cmds_start = lines.index('Positional arguments:') cmds_end = lines.index('Optional arguments:') command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)') for line in lines[cmds_start:cmds_end]: match = command_pattern.match(line) if match: commands.append(match.group(1)) commands = set(commands) wanted_commands = set(('absolute-limits', 'list', 'help', 'quota-show', 'access-list', 'snapshot-list', 'allow-access', 'deny-access')) self.assertFalse(wanted_commands - commands) # Optional arguments: def test_manila_version(self): self.manila('', flags='--version') def test_manila_debug_list(self): self.manila('list', flags='--debug') def test_manila_retries_list(self): self.manila('list', flags='--retries 3') def test_manila_region_list(self): self.manila('list', flags='--os-region-name ' + self.identity.region) manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/config_shares.py0000664000175000017500000000513312301410454025500 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function from oslo.config import cfg from tempest import config service_available_group = cfg.OptGroup(name="service_available", title="Available OpenStack Services") ServiceAvailableGroup = [ cfg.BoolOpt('manila', default=True, help="Whether or not manila is expected to be available"), ] shares_group = cfg.OptGroup(name="shares", title="Shares Service Options") SharesGroup = [ cfg.StrOpt('share_protocol', default="nfs", help="File share type by default"), cfg.IntOpt('build_interval', default=10, help='Time in seconds between volume availability checks.'), cfg.IntOpt('build_timeout', default=300, help='Timeout in seconds to wait for a volume to become' 'available.'), cfg.StrOpt('catalog_type', default="share", help='Catalog type of the Shares service.'), cfg.BoolOpt('only_admin_or_owner_for_action', default=True, help='This flag use tests that verify policy.json rules'), ] # this should never be called outside of this class class TempestConfigPrivateManila(config.TempestConfigPrivate): # manila's config wrap over standard config def __init__(self, parse_conf=True): super(TempestConfigPrivateManila, self).__init__() config.register_opt_group(cfg.CONF, service_available_group, ServiceAvailableGroup) config.register_opt_group(cfg.CONF, shares_group, SharesGroup) self.shares = cfg.CONF.shares class TempestConfigProxyManila(object): _config = None def __getattr__(self, attr): if not self._config: self._config = TempestConfigPrivateManila() return getattr(self._config, attr) CONF = TempestConfigProxyManila() manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/clients_shares.py0000664000175000017500000000514412301410454025676 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import clients from tempest import config_shares as config from tempest import exceptions from tempest.services.shares.json import shares_client as j_shares_client from tempest.services.shares.xml import shares_client as x_shares_client CONF = config.CONF class Manager(clients.Manager): """ Top level manager for OpenStack Compute clients """ def __init__(self, username=None, password=None, tenant_name=None, interface='json'): super(Manager, self).__init__(username, password, tenant_name, interface) client_args = (CONF, self.username, self.password, self.auth_url, self.tenant_name) if interface == 'xml': self.shares_client = x_shares_client.SharesClientXML(*client_args) elif interface == 'json': self.shares_client = j_shares_client.SharesClientJSON(*client_args) else: msg = "Unsupported interface type `%s'" % interface raise exceptions.InvalidConfiguration(msg) class AltManager(Manager): """ Manager object that uses the alt_XXX credentials for its managed client objects """ def __init__(self, interface='json'): super(AltManager, self).__init__(CONF.identity.alt_username, CONF.identity.alt_password, CONF.identity.alt_tenant_name, interface=interface) class AdminManager(Manager): """ Manager object that uses the admin credentials for its managed client objects """ def __init__(self, interface='json'): super(AdminManager, self).__init__(CONF.identity.admin_username, CONF.identity.admin_password, CONF.identity.admin_tenant_name, interface=interface) manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/exceptions_shares.py0000664000175000017500000000172112301410454026413 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import exceptions class ShareBuildErrorException(exceptions.TempestException): message = "Share %(share_id)s failed to build and is in ERROR status" class AccessRuleBuildErrorException(exceptions.TempestException): message = "Share's rule with id %(rule_id) is in ERROR status" manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/0000775000175000017500000000000012301410516024134 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/shares/0000775000175000017500000000000012301410516025421 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/shares/json/0000775000175000017500000000000012301410516026372 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/shares/json/shares_client.py0000664000175000017500000003161512301410454031576 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from tempest.common import rest_client from tempest.common.utils.data_utils import rand_name from tempest import exceptions from tempest import exceptions_shares import time import urllib class SharesClientJSON(rest_client.RestClient): """ Tempest REST client for Manila. It handles shares and access to it in openstack. """ def __init__(self, config, username, password, auth_url, tenant_name=None): super(SharesClientJSON, self).__init__(config, username, password, auth_url, tenant_name) self.service = self.config.shares.catalog_type # share self.share_protocol = self.config.shares.share_protocol self.build_interval = self.config.shares.build_interval self.build_timeout = self.config.shares.build_timeout self.tenant_name = tenant_name self.username = username def _parse_resp(self, body): if len(body) > 0: body = json.loads(body) if len(body) is 1 and isinstance(body.items()[0][1], (dict, list)): return body[body.items()[0][0]] return body def create_share(self, share_protocol=None, size=1, name=None, snapshot_id=None, description="tempest created share", metadata={}): if name is None: name = rand_name("tempest-created-share-") if share_protocol is None: share_protocol = self.share_protocol post_body = { "share": { "share_proto": share_protocol, "description": description, "snapshot_id": snapshot_id, "name": name, "size": size, "metadata": metadata } } body = json.dumps(post_body) resp, body = self.post("shares", body, self.headers) return resp, self._parse_resp(body) def delete_share(self, share_id): resp, body = self.delete("shares/%s" % share_id, self.headers) return resp, self._parse_resp(body) def list_shares(self): resp, body = self.get("shares", self.headers) return resp, self._parse_resp(body) def list_shares_with_detail(self, params=None): """List the details of all shares.""" url = 'shares/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url, self.headers) return resp, self._parse_resp(body) def get_share(self, share_id): uri = "shares/%s" % share_id resp, body = self.get(uri, self.headers) return resp, self._parse_resp(body) def create_access_rule(self, share_id, access_type="ip", access_to="0.0.0.0"): post_body = { "os-allow_access": { "access_type": access_type, "access_to": access_to } } body = json.dumps(post_body) uri = "shares/%s/action" % share_id resp, body = self.post(uri, body, self.headers) return resp, self._parse_resp(body) def list_access_rules(self, share_id): uri = "shares/%s/action" % share_id body = {"os-access_list": None} resp, body = self.post(uri, json.dumps(body), self.headers) return resp, self._parse_resp(body) def delete_access_rule(self, share_id, rule_id): post_body = { "os-deny_access": { "access_id": rule_id } } body = json.dumps(post_body) uri = "shares/%s/action" % share_id return self.post(uri, body, self.headers) def create_snapshot(self, share_id, name=None, description="tempest created share-ss", force=False): if name is None: name = rand_name("tempest-created-share-snap-") post_body = { "snapshot": { "name": name, "force": force, "description": description, "share_id": share_id } } body = json.dumps(post_body) resp, body = self.post("snapshots", body, self.headers) return resp, self._parse_resp(body) def get_snapshot(self, snapshot_id): uri = "snapshots/%s" % snapshot_id resp, body = self.get(uri, self.headers) return resp, self._parse_resp(body) def list_snapshots(self): resp, body = self.get("snapshots", self.headers) return resp, self._parse_resp(body) def list_snapshots_with_detail(self, params=None): """List the details of all shares.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url, self.headers) return resp, self._parse_resp(body) def delete_snapshot(self, snap_id): uri = "snapshots/%s" % snap_id resp, body = self.delete(uri, self.headers) return resp, self._parse_resp(body) def wait_for_share_status(self, share_id, status): """Waits for a Share to reach a given status.""" resp, body = self.get_share(share_id) share_name = body['name'] share_status = body['status'] start = int(time.time()) while share_status != status: time.sleep(self.build_interval) resp, body = self.get_share(share_id) share_status = body['status'] if 'error' in share_status: raise exceptions_shares.\ ShareBuildErrorException(share_id=share_id) if int(time.time()) - start >= self.build_timeout: message = ('Share %s failed to reach %s status within ' 'the required time (%s s).' % (share_name, status, self.build_timeout)) raise exceptions.TimeoutException(message) def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Share to reach a given status.""" resp, body = self.get_snapshot(snapshot_id) snapshot_name = body['name'] snapshot_status = body['status'] start = int(time.time()) while snapshot_status != status: time.sleep(self.build_interval) resp, body = self.get_snapshot(snapshot_id) snapshot_status = body['status'] if 'error' in snapshot_status: raise exceptions.\ SnapshotBuildErrorException(snapshot_id=snapshot_id) if int(time.time()) - start >= self.build_timeout: message = ('Share Snapshot %s failed to reach %s status ' 'within the required time (%s s).' % (snapshot_name, status, self.build_timeout)) raise exceptions.TimeoutException(message) def wait_for_access_rule_status(self, share_id, rule_id, status): """Waits for a Share to reach a given status.""" rule_status = "new" start = int(time.time()) while rule_status != status: time.sleep(self.build_interval) resp, rules = self.list_access_rules(share_id) for rule in rules: if rule["id"] in rule_id: rule_status = rule['state'] break if 'error' in rule_status: raise exceptions_shares.\ AccessRuleBuildErrorException(rule_id=rule_id) if int(time.time()) - start >= self.build_timeout: message = ('Share Access Rule %s failed to reach %s status ' 'within the required time (%s s).' % (rule_id, status, self.build_timeout)) raise exceptions.TimeoutException(message) def default_quotas(self, tenant_id): uri = "os-quota-sets/%s/defaults" % tenant_id resp, body = self.get(uri, self.headers) return resp, self._parse_resp(body) def show_quotas(self, tenant_id, user_id=None): uri = "os-quota-sets/%s" % tenant_id if user_id is not None: uri += "?user_id=%s" % (user_id) resp, body = self.get(uri, self.headers) return resp, self._parse_resp(body) def reset_quotas(self, tenant_id, user_id=None): uri = "os-quota-sets/%s" % tenant_id if user_id is not None: uri += "?user_id=%s" % user_id resp, body = self.delete(uri, self.headers) return resp, self._parse_resp(body) def update_quotas(self, tenant_id, user_id=None, shares=None, snapshots=None, gigabytes=None, force=True): put_body = {"quota_set": {}} put_body["quota_set"]["tenant_id"] = tenant_id if force: put_body["quota_set"]["force"] = "true" if shares is not None: put_body["quota_set"]["shares"] = shares if snapshots is not None: put_body["quota_set"]["snapshots"] = snapshots if gigabytes is not None: put_body["quota_set"]["gigabytes"] = gigabytes put_body = json.dumps(put_body) uri = "os-quota-sets/%s" % tenant_id if user_id is not None: uri += "?user_id=%s" % user_id resp, body = self.put(uri, put_body, self.headers) return resp, self._parse_resp(body) def get_limits(self): resp, body = self.get("limits", self.headers) return resp, self._parse_resp(body) def is_resource_deleted(self, s_id, rule_id=None): if rule_id is None: try: self.get_snapshot(s_id) except exceptions.NotFound: try: self.get_share(s_id) except exceptions.NotFound: return True return False else: _, rules = self.list_share_access_rules(s_id) for rule in rules: if rule["id"] in rule_id: return False return True def list_extensions(self): resp, extensions = self.get("extensions", self.headers) return resp, self._parse_resp(extensions) def rename(self, share_id, name, desc=None): uri = "shares/%s" % share_id body = {"share": {"display_name": name}} if desc is not None: body["share"].update({"display_description": desc}) body = json.dumps(body) resp, body = self.put(uri, body, self.headers) return resp, self._parse_resp(body) def rename_snapshot(self, snapshot_id, name, desc=None): uri = "snapshots/%s" % snapshot_id body = {"snapshot": {"display_name": name}} if desc is not None: body["snapshot"].update({"display_description": desc}) body = json.dumps(body) resp, body = self.put(uri, body, self.headers) return resp, self._parse_resp(body) def reset_state(self, s_id, status="error", s_type="shares"): """ Resets the state of a share or a snapshot status: available, error, creating, deleting, error_deleting s_type: shares, snapshots """ uri = "%s/%s/action" % (s_type, s_id) body = {"os-reset_status": {"status": status}} body = json.dumps(body) resp, body = self.post(uri, body, self.headers) return resp, self._parse_resp(body) ############### def _update_metadata(self, share_id, metadata={}, method="post"): uri = "shares/%s/metadata" % share_id post_body = {"metadata": metadata} body = json.dumps(post_body) if method is "post": resp, metadata = self.post(uri, body, self.headers) if method is "put": resp, metadata = self.put(uri, body, self.headers) return resp, self._parse_resp(metadata) def set_metadata(self, share_id, metadata={}): return self._update_metadata(share_id, metadata) def update_all_metadata(self, share_id, metadata={}): return self._update_metadata(share_id, metadata, method="put") def delete_metadata(self, share_id, key): uri = "shares/%s/metadata/%s" % (share_id, key) resp, body = self.delete(uri, self.headers) return resp, self._parse_resp(body) def get_metadata(self, share_id): uri = "shares/%s/metadata" % share_id resp, body = self.get(uri, self.headers) return resp, self._parse_resp(body) manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/shares/json/__init__.py0000664000175000017500000000000012301410454030472 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/shares/__init__.py0000664000175000017500000000000012301410454027521 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/shares/xml/0000775000175000017500000000000012301410516026221 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/shares/xml/shares_client.py0000664000175000017500000002072412301410454031424 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from lxml import etree from tempest.common.utils.data_utils import rand_name from tempest.services.compute.xml import common as xml from tempest.services.shares.json import shares_client class SharesClientXML(shares_client.SharesClientJSON): """ Tempest REST client for Manila. It handles shares and access to it in openstack. """ def __init__(self, config, username, password, auth_url, tenant_name=None): super(SharesClientXML, self).__init__(config, username, password, auth_url, tenant_name) self.TYPE = "xml" # from RestClientXML self.headers["Content-Type"] = "application/%s" % self.TYPE self.headers["Accept"] = "application/%s" % self.TYPE def _parse_resp(self, body): # from RestClientXML if len(body) > 0: element = etree.fromstring(body) entity_list = ["shares", "snapshots", "extensions", "access_list"] if "metadata" in element.tag: dictionary = {} for el in element.getchildren(): dictionary[u"%s" % el.get("key")] = u"%s" % el.text return dictionary elif any(s in element.tag for s in entity_list): s_list = [] if element is not None: s_list += [xml.xml_to_json(sh) for sh in list(element)] return s_list else: return xml.xml_to_json(element) return body def is_absolute_limit(self, resp, resp_body): # from RestClientXML if (not isinstance(resp_body, collections.Mapping) or 'retry-after' not in resp): return True return 'exceed' in resp_body.get('message', 'blabla') def create_share(self, share_protocol=None, size=1, name=None, snapshot_id=None, description="tempest created share", metadata={}): if name is None: name = rand_name("tempest-created-share-") if share_protocol is None: share_protocol = self.share_protocol share = xml.Element("share", xmlns=xml.XMLNS_11) share.append(xml.Element("share_proto", share_protocol)) if description is not None: share.append(xml.Element("description", description)) if snapshot_id is not None: share.append(xml.Element("snapshot_id", snapshot_id)) share.append(xml.Element("name", name)) share.append(xml.Element("size", size)) metadata_el = xml.Element("metadata") for key, value in metadata.iteritems(): metadata_el.append(xml.Element(key, value)) share.append(metadata_el) resp, body = self.post('shares', str(xml.Document(share)), self.headers) return resp, self._parse_resp(body) def create_access_rule(self, share_id, access_type="ip", access_to="0.0.0.0"): rule = xml.Element("os-allow_access", xmlns=xml.XMLNS_11) rule.append(xml.Element("access_type", access_type)) rule.append(xml.Element("access_to", access_to)) uri = "shares/%s/action" % (share_id) resp, body = self.post(uri, str(xml.Document(rule)), self.headers) return resp, self._parse_resp(body) def list_access_rules(self, share_id): uri = "shares/%s/action" % (share_id) access_list = xml.Element("os-access_list", xmlns=xml.XMLNS_11, value=None) resp, body = self.post(uri, str(xml.Document(access_list)), self.headers) return resp, self._parse_resp(body) def delete_access_rule(self, share_id, rule_id): rule = xml.Element("os-deny_access", xmlns=xml.XMLNS_11) rule.append(xml.Element("access_id", rule_id)) uri = "shares/%s/action" % share_id return self.post(uri, str(xml.Document(rule)), self.headers) def create_snapshot(self, share_id, name=None, description="tempest created share-ss", force=False): if name is None: name = rand_name("tempest-created-share-snap-") snap = xml.Element("snapshot", xmlns=xml.XMLNS_11) snap.append(xml.Element("name", name)) snap.append(xml.Element("force", force)) snap.append(xml.Element("description", description)) snap.append(xml.Element("share_id", share_id)) resp, body = self.post('snapshots', str(xml.Document(snap)), self.headers) return resp, self._parse_resp(body) def update_quotas(self, tenant_id=None, user_id=None, shares=None, snapshots=None, gigabytes=None, force=True): uri = "os-quota-sets/%s" % tenant_id if user_id is not None: uri += "?user_id=%s" % user_id upd = xml.Element("quota_set", id=tenant_id) if force: upd.append(xml.Element("force", "true")) if shares is not None: upd.append(xml.Element("shares", shares)) if snapshots is not None: upd.append(xml.Element("snapshots", snapshots)) if gigabytes is not None: upd.append(xml.Element("gigabytes", gigabytes)) resp, body = self.put(uri, str(xml.Document(upd)), self.headers) return resp, self._parse_resp(body) def get_limits(self): resp, element = self.get("limits", self.headers) element = etree.fromstring(element) limits = {"rate": [], "absolute": {}} for abs_el in element.getchildren(): if "absolute" in abs_el.tag: element = abs_el break for child in element.getchildren(): limit = {} for key, value in child.attrib.iteritems(): limit[key] = value limits["absolute"][limit["name"]] = limit["value"] return resp, limits def rename(self, share_id, name, desc=None): uri = "shares/%s" % share_id share = xml.Element("share", xmlns=xml.XMLNS_11) share.append(xml.Element("display_name", name)) if desc is not None: share.append(xml.Element("display_description", desc)) resp, body = self.put(uri, str(xml.Document(share)), self.headers) return resp, self._parse_resp(body) def rename_snapshot(self, snapshot_id, name, desc=None): uri = "snapshots/%s" % snapshot_id snap = xml.Element("snapshot", xmlns=xml.XMLNS_11) snap.append(xml.Element("display_name", name)) if desc is not None: snap.append(xml.Element("display_description", desc)) resp, body = self.put(uri, str(xml.Document(snap)), self.headers) return resp, self._parse_resp(body) def reset_state(self, s_id, status="error", s_type="shares"): """ Resets the state of a share or a snapshot status: available, error, creating, deleting, error_deleting s_type: shares, snapshots """ uri = "%s/%s/action" % (s_type, s_id) body = xml.Element("os-reset_status", xmlns=xml.XMLNS_11) body.append(xml.Element("status", status)) resp, body = self.post(uri, str(xml.Document(body)), self.headers) return resp, self._parse_resp(body) def _update_metadata(self, share_id, metadata={}, method="post"): uri = "shares/%s/metadata" % (str(share_id)) metadata_el = xml.Element("metadata") for key, value in metadata.iteritems(): metadata_el.append(xml.Element("meta", value, key=key)) meta_str = str(xml.Document(metadata_el)) if method is "post": resp, body = self.post(uri, meta_str, self.headers) elif method is "put": resp, body = self.put(uri, meta_str, self.headers) metas = self._parse_resp(body) return resp, metas manila-2013.2.dev175.gbf1a399/contrib/tempest/tempest/services/shares/xml/__init__.py0000664000175000017500000000000012301410454030321 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/devstack/0000775000175000017500000000000012301410516020753 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/devstack/README.rst0000664000175000017500000000102112301410454022435 0ustar chuckchuck00000000000000==================== Devstack Integration ==================== This directory contains the files necessary to integrate Manila with devstack. To install: $ DEVSTACK_DIR=/path/to/devstack $ cp lib/manila ${DEVSTACK_DIR}/lib $ cp extras.d/70-manila.sh ${DEVSTACK_DIR}/extras.d note: 70-manila.sh uses simple lvm-driver without multitenancy support. To configure devstack to run manila: $ cd ${DEVSTACK_DIR} $ services=(manila m-api m-shr m-sch); for item in ${services[*]}; do echo "enable_service $item" >> localrc; done manila-2013.2.dev175.gbf1a399/contrib/devstack/extras.d/0000775000175000017500000000000012301410516022503 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/devstack/extras.d/70-manila.sh0000664000175000017500000000155012301410454024526 0ustar chuckchuck00000000000000# DevStack extras script to install Manila if is_service_enabled manila; then if [[ "$1" == "source" ]]; then # Initial source source $TOP_DIR/lib/manila elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Manila" install_manila elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Manila" configure_manila echo_summary "Initialing Manila" init_manila elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Starting Manila" start_manila echo_summary "Creating Manila entities for auth service" create_manila_accounts fi if [[ "$1" == "unstack" ]]; then cleanup_manila fi if [[ "$1" == "clean" ]]; then cleanup_manila sudo rm -rf /etc/manila fi fi manila-2013.2.dev175.gbf1a399/contrib/devstack/lib/0000775000175000017500000000000012301410516021521 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/contrib/devstack/lib/manila0000664000175000017500000003412512301410454022713 0ustar chuckchuck00000000000000# lib/manila # Install and start **Manila** file shares service # Dependencies: # - functions # - DEST, DATA_DIR, STACK_USER must be defined # SERVICE_{TENANT_NAME|PASSWORD} must be defined # ``KEYSTONE_TOKEN_FORMAT`` must be defined # stack.sh # --------- # install_manila # # configure_manila # # init_manila # # start_manila # # stop_manila # # cleanup_manila # # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- MANILA_REPO_ROOT=stackforge MANILA_GIT_BASE=${MANILA_GIT_BASE:-https://github.com} MANILA_REPO=${MANILA_GIT_BASE}/${MANILA_REPO_ROOT}/manila.git MANILA_BRANCH=master MANILACLIENT_REPO=${MANILA_GIT_BASE}/${MANILA_REPO_ROOT}/python-manilaclient.git MANILACLIENT_BRANCH=master SHARE_BACKING_FILE_SIZE=${SHARE_BACKING_FILE_SIZE:-8400M} MANILA_SECURE_DELETE=`trueorfalse True $MANILA_SECURE_DELETE` # set up default driver MANILA_DRIVER=${MANILA_DRIVER:-default} # set up default directories MANILA_DIR=$DEST/manila MANILACLIENT_DIR=$DEST/python-manilaclient MANILA_STATE_PATH=${MANILA_STATE_PATH:=$DATA_DIR/manila} MANILA_MNT_DIR=${MANILA_MNT_DIR:=$MANILA_STATE_PATH/mnt} MANILA_AUTH_CACHE_DIR=${MANILA_AUTH_CACHE_DIR:-/var/cache/manila} MANILA_CONF_DIR=/etc/manila MANILA_CONF=$MANILA_CONF_DIR/manila.conf MANILA_API_PASTE_INI=$MANILA_CONF_DIR/api-paste.ini # Public facing bits MANILA_SERVICE_HOST=${MANILA_SERVICE_HOST:-$SERVICE_HOST} MANILA_SERVICE_PORT=${MANILA_SERVICE_PORT:-8786} MANILA_SERVICE_PORT_INT=${MANILA_SERVICE_PORT_INT:-18776} MANILA_SERVICE_PROTOCOL=${MANILA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Support entry points installation of console scripts if [[ -d $MANILA_DIR/bin ]]; then MANILA_BIN_DIR=$MANILA_DIR/bin else MANILA_BIN_DIR=/usr/local/bin fi SHARE_GROUP=${SHARE_GROUP:-stack-shares} SHARE_NAME_PREFIX=${SHARE_NAME_PREFIX:-share-} SHARE_EXPORT_IP=${SHARE_EXPORT_IP:-$HOST_IP} SHARE_DRIVER=${SHARE_DRIVER:-manila.share.drivers.lvm.LVMShareDriver} MANILA_SCHEDULER_DRIVER=${MANILA_SCHEDULER_DRIVER:-manila.scheduler.simple.SimpleScheduler} CIFS_HELPER=${CIFS_HELPER:-manila.share.drivers.lvm.CIFSNetConfHelper} function _clean_share_group() { local vg=$1 local vg_prefix=$2 # Clean out existing shares for lv in `sudo lvs --noheadings -o lv_name $vg`; do # vg_prefix prefixes the LVs we want if [[ "${lv#$vg_prefix}" != "$lv" ]]; then sudo umount $MANILA_MNT_DIR/$lv sudo lvremove -f $vg/$lv sudo rm -rf $MANILA_MNT_DIR/$lv fi done } # _clean_share_group removes all manila shares from the specified shares group _clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX function _clean_lvm_backing_file() { local vg=$1 # if there is no logical volume left, it's safe to attempt a cleanup # of the backing file if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then # if the backing physical device is a loop device, it was probably setup by devstack VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') if [[ -n "$VG_DEV" ]]; then sudo losetup -d $VG_DEV rm -f $DATA_DIR/${vg}-backing-file fi fi } # cleanup_manila() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_manila() { # ensure the volume group is cleared up because fails might # leave dead volumes in the group # Campsite rule: leave behind a share volume group at least as clean as we found it # _clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX _clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX _clean_lvm_backing_file $SHARE_GROUP } # configure_manila() - Set config files, create data dirs, etc function configure_manila() { setup_develop $MANILA_DIR setup_develop $MANILACLIENT_DIR if [[ ! -d $MANILA_CONF_DIR ]]; then sudo mkdir -p $MANILA_CONF_DIR fi sudo chown $STACK_USER $MANILA_CONF_DIR cp -p $MANILA_DIR/etc/manila/policy.json $MANILA_CONF_DIR # Set the paths of certain binaries MANILA_ROOTWRAP=$(get_rootwrap_location manila) # If Manila ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $MANILA_ROOTWRAP ROOTWRAP_MANILA_SUDOER_CMD="$MANILA_ROOTWRAP" if [[ -d $MANILA_DIR/etc/manila/rootwrap.d ]]; then # Wipe any existing rootwrap.d files first if [[ -d $MANILA_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $MANILA_CONF_DIR/rootwrap.d fi # Deploy filters to /etc/manila/rootwrap.d sudo mkdir -m 755 $MANILA_CONF_DIR/rootwrap.d sudo cp $MANILA_DIR/etc/manila/rootwrap.d/*.filters $MANILA_CONF_DIR/rootwrap.d sudo chown -R root:root $MANILA_CONF_DIR/rootwrap.d sudo chmod 644 $MANILA_CONF_DIR/rootwrap.d/* # Set up rootwrap.conf, pointing to /etc/manila/rootwrap.d sudo cp $MANILA_DIR/etc/manila/rootwrap.conf $MANILA_CONF_DIR/ sudo sed -e "s:^filters_path=.*$:filters_path=$MANILA_CONF_DIR/rootwrap.d:" -i $MANILA_CONF_DIR/rootwrap.conf sudo chown root:root $MANILA_CONF_DIR/rootwrap.conf sudo chmod 0644 $MANILA_CONF_DIR/rootwrap.conf # Specify rootwrap.conf as first parameter to manila-rootwrap MANILA_ROOTWRAP="$MANILA_ROOTWRAP $MANILA_CONF_DIR/rootwrap.conf" ROOTWRAP_MANILA_SUDOER_CMD="$MANILA_ROOTWRAP *" fi TEMPFILE=`mktemp` echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_MANILA_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/manila-rootwrap cp $MANILA_DIR/etc/manila/api-paste.ini $MANILA_API_PASTE_INI iniset $MANILA_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST iniset $MANILA_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT iniset $MANILA_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $MANILA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $MANILA_API_PASTE_INI filter:authtoken admin_user manila iniset $MANILA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD iniset $MANILA_API_PASTE_INI filter:authtoken signing_dir $MANILA_AUTH_CACHE_DIR cp $MANILA_DIR/etc/manila/manila.conf.sample $MANILA_CONF iniset $MANILA_CONF DEFAULT auth_strategy keystone iniset $MANILA_CONF DEFAULT debug True iniset $MANILA_CONF DEFAULT verbose True iniset $MANILA_CONF DEFAULT scheduler_driver $MANILA_SCHEDULER_DRIVER iniset $MANILA_CONF DEFAULT share_export_ip $SHARE_EXPORT_IP iniset $MANILA_CONF DEFAULT share_volume_group $SHARE_GROUP iniset $MANILA_CONF DEFAULT share_name_template ${SHARE_NAME_PREFIX}%s iniset $MANILA_CONF DEFAULT iscsi_helper tgtadm iniset $MANILA_CONF DEFAULT sql_connection `database_connection_url manila` iniset $MANILA_CONF DEFAULT api_paste_config $MANILA_API_PASTE_INI iniset $MANILA_CONF DEFAULT rootwrap_config $MANILA_CONF_DIR/rootwrap.conf iniset $MANILA_CONF DEFAULT osapi_share_extension manila.api.openstack.share.contrib.standard_extensions iniset $MANILA_CONF DEFAULT state_path $MANILA_STATE_PATH iniset $MANILA_CONF DEFAULT share_driver $SHARE_DRIVER iniset $MANILA_CONF DEFAULT share_lvm_helpers CIFS=$CIFS_HELPER,NFS=manila.share.drivers.lvm.NFSHelper iniset $MANILA_CONF DEFAULT path_to_key /home/stack/.ssh/id_rsa.pub iniset $MANILA_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD iniset $MANILA_CONF DEFAULT cinder_admin_password $SERVICE_PASSWORD iniset $MANILA_CONF DEFAULT neutron_admin_password $SERVICE_PASSWORD if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $MANILA_CONF DEFAULT osapi_share_listen_port $MANILA_SERVICE_PORT_INT fi if [ "$SYSLOG" != "False" ]; then iniset $MANILA_CONF DEFAULT use_syslog True fi iniset_rpc_backend manila $MANILA_CONF DEFAULT if [[ "$MANILA_SECURE_DELETE" == "False" ]]; then iniset $MANILA_CONF DEFAULT secure_delete False iniset $MANILA_CONF DEFAULT share_clear none fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output iniset $MANILA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" iniset $MANILA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $MANILA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $MANILA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" fi } # create_manila_accounts() - Set up common required manila accounts # Tenant User Roles # ------------------------------------------------------------------ # service manila admin # if enabled # Migrated from keystone_data.sh function create_manila_accounts() { SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") # Manila if [[ "$ENABLED_SERVICES" =~ "m-api" ]]; then MANILA_USER=$(keystone user-create \ --name=manila \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ --email=manila@example.com \ | grep " id " | get_field 2) keystone user-role-add \ --tenant_id $SERVICE_TENANT \ --user_id $MANILA_USER \ --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then MANILA_SERVICE=$(keystone service-create \ --name=manila \ --type=share \ --description="Manila Shared Filesystem Service" \ | grep " id " | get_field 2) keystone endpoint-create \ --region RegionOne \ --service_id $MANILA_SERVICE \ --publicurl "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v1/\$(tenant_id)s" \ --adminurl "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v1/\$(tenant_id)s" fi fi } # init_manila() - Initialize database and volume group function init_manila() { # Force nova volumes off # NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") if is_service_enabled $DATABASE_BACKENDS; then # (re)create manila database recreate_database manila utf8 # (re)create manila database $MANILA_BIN_DIR/manila-manage db sync fi if is_service_enabled m-shr; then # Configure a default volume group called '`stack-shares`' for the share # service if it does not yet exist. If you don't wish to use a file backed # volume group, create your own volume group called ``stack-volumes`` before # invoking ``stack.sh``. # # By default, the backing file is 8G in size, and is stored in ``/opt/stack/data``. if ! sudo vgs $SHARE_GROUP; then SHARE_BACKING_FILE=${SHARE_BACKING_FILE:-$DATA_DIR/${SHARE_GROUP}-backing-file} # Only create if the file doesn't already exists [[ -f $SHARE_BACKING_FILE ]] || truncate -s $SHARE_BACKING_FILE_SIZE $SHARE_BACKING_FILE DEV=`sudo losetup -f --show $SHARE_BACKING_FILE` # Only create if the loopback device doesn't contain $SHARE_GROUP if ! sudo vgs $SHARE_GROUP; then sudo vgcreate $SHARE_GROUP $DEV; fi fi mkdir -p $MANILA_STATE_PATH/shares if sudo vgs $SHARE_GROUP; then _clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX fi fi # Create cache dir sudo mkdir -p $MANILA_AUTH_CACHE_DIR sudo chown $STACK_USER $MANILA_AUTH_CACHE_DIR rm -f $MANILA_AUTH_CACHE_DIR/* } # install_manila() - Collect source and prepare function install_manila() { if [[ "$RECLONE" = "True" ]]; then git_clone $MANILA_REPO $MANILA_DIR $MANILA_BRANCH git_clone $MANILACLIENT_REPO $MANILACLIENT_DIR $MANILACLIENT_BRANCH fi if is_service_enabled m-shr; then if is_ubuntu; then sudo apt-get install -y nfs-kernel-server nfs-common samba elif is_fedora; then sudo yum install -y nfs-utils nfs-utils-lib samba fi fi } # apply config.d approach (e.g. Oneiric does not have this) function _configure_tgt_for_config_d() { if [[ ! -d /etc/tgt/conf.d/ ]]; then sudo mkdir -p /etc/tgt/conf.d echo "include /etc/tgt/conf.d/*.conf" | sudo tee -a /etc/tgt/targets.conf fi } # start_manila() - Start running processes, including screen function start_manila() { samba_daemon_name=smbd if is_service_enabled m-shr; then if is_fedora; then samba_daemon_name=smb fi stop_service $samba_daemon_name fi screen_it m-api "cd $MANILA_DIR && $MANILA_BIN_DIR/manila-api --config-file $MANILA_CONF" screen_it m-shr "sudo service $samba_daemon_name stop; devloop='sudo losetup -j ${SHARE_BACKING_FILE:-$DATA_DIR/${SHARE_GROUP}-backing-file}'; if [[ \$( \$devloop ) != *'/dev/loop'* ]]; then sudo losetup -f ${SHARE_BACKING_FILE:-$DATA_DIR/${SHARE_GROUP}-backing-file}; fi; cd $MANILA_DIR && $MANILA_BIN_DIR/manila-share --config-file $MANILA_CONF" screen_it m-sch "cd $MANILA_DIR && $MANILA_BIN_DIR/manila-scheduler --config-file $MANILA_CONF" # Start proxies if enabled if is_service_enabled m-api && is_service_enabled tls-proxy; then start_tls_proxy '*' $MANILA_SERVICE_PORT $MANILA_SERVICE_HOST $MANILA_SERVICE_PORT_INT & fi } # stop_manila() - Stop running processes function stop_manila() { # Kill the manila screen windows for serv in m-api m-sch m-shr; do screen -S $SCREEN_NAME -p $serv -X kill done } # Restore xtrace $XTRACE manila-2013.2.dev175.gbf1a399/manila.egg-info/0000775000175000017500000000000012301410516020442 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila.egg-info/top_level.txt0000664000175000017500000000000712301410516023171 0ustar chuckchuck00000000000000manila manila-2013.2.dev175.gbf1a399/manila.egg-info/not-zip-safe0000664000175000017500000000000112301410516022670 0ustar chuckchuck00000000000000 manila-2013.2.dev175.gbf1a399/manila.egg-info/entry_points.txt0000664000175000017500000000105412301410516023740 0ustar chuckchuck00000000000000[manila.scheduler.weights] CapacityWeigher = manila.scheduler.weights.capacity:CapacityWeigher [manila.scheduler.filters] CapabilitiesFilter = manila.openstack.common.scheduler.filters.capabilities_filter:CapabilitiesFilter RetryFilter = manila.scheduler.filters.retry_filter:RetryFilter CapacityFilter = manila.scheduler.filters.capacity_filter:CapacityFilter JsonFilter = manila.openstack.common.scheduler.filters.json_filter:JsonFilter AvailabilityZoneFilter = manila.openstack.common.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter manila-2013.2.dev175.gbf1a399/manila.egg-info/requires.txt0000664000175000017500000000075512301410516023051 0ustar chuckchuck00000000000000pbr>=0.5.21,<1.0 amqplib>=0.6.1 anyjson>=0.3.3 Babel>=1.3 eventlet>=0.13.0 greenlet>=0.3.2 iso8601>=0.1.8 kombu>=2.4.8 lockfile>=0.8 lxml>=2.3 oslo.config>=1.2.0 paramiko>=1.8.0 Paste PasteDeploy>=1.5.0 python-neutronclient>=2.3.0,<3 python-glanceclient>=0.9.0 python-keystoneclient>=0.3.2 python-swiftclient>=1.5 Routes>=1.12.3 SQLAlchemy>=0.7.8,<=0.7.99 sqlalchemy-migrate>=0.7.2 stevedore>=0.10 python-cinderclient>=1.0.6 python-novaclient>=2.15.0 suds>=0.4 WebOb>=1.2.3,<1.3 wsgiref>=0.1.2manila-2013.2.dev175.gbf1a399/manila.egg-info/SOURCES.txt0000664000175000017500000003630412301410516022334 0ustar chuckchuck00000000000000AUTHORS CONTRIBUTING.md ChangeLog HACKING.rst LICENSE MANIFEST.in README.md README.rst babel.cfg openstack-common.conf pylintrc requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini bin/manila-all bin/manila-api bin/manila-clear-rabbit-queues bin/manila-manage bin/manila-rootwrap bin/manila-rpc-zmq-receiver bin/manila-scheduler bin/manila-share contrib/devstack/README.rst contrib/devstack/extras.d/70-manila.sh contrib/devstack/lib/manila contrib/tempest/README.rst contrib/tempest/tempest/clients_shares.py contrib/tempest/tempest/config_shares.py contrib/tempest/tempest/exceptions_shares.py contrib/tempest/tempest/api/shares/__init__.py contrib/tempest/tempest/api/shares/base.py contrib/tempest/tempest/api/shares/test_metadata.py contrib/tempest/tempest/api/shares/test_metadata_negative.py contrib/tempest/tempest/api/shares/test_rules.py contrib/tempest/tempest/api/shares/test_rules_negative.py contrib/tempest/tempest/api/shares/test_security_negative.py contrib/tempest/tempest/api/shares/test_shares.py contrib/tempest/tempest/api/shares/test_shares_negative.py contrib/tempest/tempest/api/shares/admin/__init__.py contrib/tempest/tempest/api/shares/admin/test_admin_actions.py contrib/tempest/tempest/api/shares/admin/test_admin_actions_negative.py contrib/tempest/tempest/api/shares/admin/test_quotas.py contrib/tempest/tempest/api/shares/admin/test_quotas_negative.py contrib/tempest/tempest/cli/manilaclient.py contrib/tempest/tempest/cli/simple_read_only/test_manila.py contrib/tempest/tempest/services/shares/__init__.py contrib/tempest/tempest/services/shares/json/__init__.py contrib/tempest/tempest/services/shares/json/shares_client.py contrib/tempest/tempest/services/shares/xml/__init__.py contrib/tempest/tempest/services/shares/xml/shares_client.py doc/.gitignore doc/Makefile doc/README.rst doc/find_autodoc_modules.sh doc/generate_autodoc_index.sh doc/ext/__init__.py doc/ext/manila_autodoc.py doc/ext/manila_todo.py doc/source/conf.py doc/source/index.rst doc/source/_ga/layout.html doc/source/_static/.gitignore doc/source/_static/.placeholder doc/source/_static/basic.css doc/source/_static/default.css doc/source/_static/jquery.tweet.js doc/source/_static/tweaks.css doc/source/_templates/.gitignore doc/source/_templates/.placeholder doc/source/_theme/layout.html doc/source/_theme/theme.conf doc/source/devref/addmethod.openstackapi.rst doc/source/devref/api.rst doc/source/devref/architecture.rst doc/source/devref/auth.rst doc/source/devref/database.rst doc/source/devref/development.environment.rst doc/source/devref/fakes.rst doc/source/devref/gerrit.rst doc/source/devref/il8n.rst doc/source/devref/index.rst doc/source/devref/jenkins.rst doc/source/devref/launchpad.rst doc/source/devref/manila.rst doc/source/devref/rpc.rst doc/source/devref/scheduler.rst doc/source/devref/services.rst doc/source/devref/share.rst doc/source/devref/threading.rst doc/source/devref/unit_tests.rst doc/source/images/rpc/arch.png doc/source/images/rpc/arch.svg doc/source/images/rpc/flow1.png doc/source/images/rpc/flow1.svg doc/source/images/rpc/flow2.png doc/source/images/rpc/flow2.svg doc/source/images/rpc/rabt.png doc/source/images/rpc/rabt.svg doc/source/images/rpc/state.png doc/source/man/manila-manage.rst etc/manila/api-paste.ini etc/manila/logging_sample.conf etc/manila/manila.conf.sample etc/manila/policy.json etc/manila/rootwrap.conf etc/manila/rootwrap.d/share.filters etc/manila/rootwrap.d/volume.filters manila/__init__.py manila/context.py manila/exception.py manila/manager.py manila/policy.py manila/quota.py manila/service.py manila/test.py manila/utils.py manila/version.py manila/wsgi.py manila.egg-info/PKG-INFO manila.egg-info/SOURCES.txt manila.egg-info/dependency_links.txt manila.egg-info/entry_points.txt manila.egg-info/not-zip-safe manila.egg-info/requires.txt manila.egg-info/top_level.txt manila/api/__init__.py manila/api/auth.py manila/api/common.py manila/api/extensions.py manila/api/sizelimit.py manila/api/urlmap.py manila/api/versions.py manila/api/xmlutil.py manila/api/contrib/__init__.py manila/api/contrib/admin_actions.py manila/api/contrib/extended_quotas.py manila/api/contrib/quota_classes.py manila/api/contrib/quotas.py manila/api/contrib/services.py manila/api/contrib/share_actions.py manila/api/contrib/user_quotas.py manila/api/middleware/__init__.py manila/api/middleware/auth.py manila/api/middleware/fault.py manila/api/middleware/sizelimit.py manila/api/openstack/__init__.py manila/api/openstack/urlmap.py manila/api/openstack/wsgi.py manila/api/openstack/volume/__init__.py manila/api/openstack/volume/versions.py manila/api/schemas/atom-link.rng manila/api/schemas/v1.1/extension.rng manila/api/schemas/v1.1/extensions.rng manila/api/schemas/v1.1/limits.rng manila/api/schemas/v1.1/metadata.rng manila/api/v1/__init__.py manila/api/v1/limits.py manila/api/v1/router.py manila/api/v1/security_service.py manila/api/v1/share_metadata.py manila/api/v1/share_networks.py manila/api/v1/share_snapshots.py manila/api/v1/shares.py manila/api/views/__init__.py manila/api/views/limits.py manila/api/views/security_service.py manila/api/views/share_networks.py manila/api/views/share_snapshots.py manila/api/views/shares.py manila/api/views/versions.py manila/common/__init__.py manila/common/config.py manila/common/constants.py manila/common/sqlalchemyutils.py manila/compute/__init__.py manila/compute/nova.py manila/db/__init__.py manila/db/api.py manila/db/base.py manila/db/migration.py manila/db/sqlalchemy/__init__.py manila/db/sqlalchemy/api.py manila/db/sqlalchemy/migration.py manila/db/sqlalchemy/models.py manila/db/sqlalchemy/session.py manila/db/sqlalchemy/utils.py manila/db/sqlalchemy/migrate_repo/README manila/db/sqlalchemy/migrate_repo/__init__.py manila/db/sqlalchemy/migrate_repo/manage.py manila/db/sqlalchemy/migrate_repo/migrate.cfg manila/db/sqlalchemy/migrate_repo/versions/001_manila_init.py manila/db/sqlalchemy/migrate_repo/versions/__init__.py manila/image/__init__.py manila/image/glance.py manila/image/image_utils.py manila/network/__init__.py manila/network/linux/__init__.py manila/network/linux/interface.py manila/network/linux/ip_lib.py manila/network/linux/ovs_lib.py manila/network/neutron/__init__.py manila/network/neutron/api.py manila/network/neutron/constants.py manila/network/neutron/neutron_network_plugin.py manila/openstack/__init__.py manila/openstack/common/README manila/openstack/common/__init__.py manila/openstack/common/context.py manila/openstack/common/eventlet_backdoor.py manila/openstack/common/exception.py manila/openstack/common/excutils.py manila/openstack/common/fileutils.py manila/openstack/common/gettextutils.py manila/openstack/common/importutils.py manila/openstack/common/jsonutils.py manila/openstack/common/local.py manila/openstack/common/lockutils.py manila/openstack/common/log.py manila/openstack/common/loopingcall.py manila/openstack/common/network_utils.py manila/openstack/common/policy.py manila/openstack/common/processutils.py manila/openstack/common/service.py manila/openstack/common/strutils.py manila/openstack/common/threadgroup.py manila/openstack/common/timeutils.py manila/openstack/common/uuidutils.py manila/openstack/common/notifier/__init__.py manila/openstack/common/notifier/api.py manila/openstack/common/notifier/log_notifier.py manila/openstack/common/notifier/no_op_notifier.py manila/openstack/common/notifier/rabbit_notifier.py manila/openstack/common/notifier/rpc_notifier.py manila/openstack/common/notifier/rpc_notifier2.py manila/openstack/common/notifier/test_notifier.py manila/openstack/common/rootwrap/__init__.py manila/openstack/common/rootwrap/cmd.py manila/openstack/common/rootwrap/filters.py manila/openstack/common/rootwrap/wrapper.py manila/openstack/common/rpc/__init__.py manila/openstack/common/rpc/amqp.py manila/openstack/common/rpc/common.py manila/openstack/common/rpc/dispatcher.py manila/openstack/common/rpc/impl_fake.py manila/openstack/common/rpc/impl_kombu.py manila/openstack/common/rpc/impl_qpid.py manila/openstack/common/rpc/impl_zmq.py manila/openstack/common/rpc/matchmaker.py manila/openstack/common/rpc/matchmaker_redis.py manila/openstack/common/rpc/proxy.py manila/openstack/common/rpc/service.py manila/openstack/common/rpc/zmq_receiver.py manila/openstack/common/scheduler/__init__.py manila/openstack/common/scheduler/filter.py manila/openstack/common/scheduler/weight.py manila/openstack/common/scheduler/filters/__init__.py manila/openstack/common/scheduler/filters/availability_zone_filter.py manila/openstack/common/scheduler/filters/capabilities_filter.py manila/openstack/common/scheduler/filters/extra_specs_ops.py manila/openstack/common/scheduler/filters/json_filter.py manila/openstack/common/scheduler/weights/__init__.py manila/scheduler/__init__.py manila/scheduler/chance.py manila/scheduler/driver.py manila/scheduler/filter_scheduler.py manila/scheduler/host_manager.py manila/scheduler/manager.py manila/scheduler/rpcapi.py manila/scheduler/scheduler_options.py manila/scheduler/simple.py manila/scheduler/filters/__init__.py manila/scheduler/filters/capacity_filter.py manila/scheduler/filters/retry_filter.py manila/scheduler/weights/__init__.py manila/scheduler/weights/capacity.py manila/share/__init__.py manila/share/api.py manila/share/configuration.py manila/share/driver.py manila/share/manager.py manila/share/rpcapi.py manila/share/drivers/__init__.py manila/share/drivers/generic.py manila/share/drivers/glusterfs.py manila/share/drivers/lvm.py manila/share/drivers/netapp/__init__.py manila/share/drivers/netapp/api.py manila/share/drivers/netapp/driver.py manila/testing/README.rst manila/tests/__init__.py manila/tests/conf_fixture.py manila/tests/declare_conf.py manila/tests/fake_compute.py manila/tests/fake_driver.py manila/tests/fake_network.py manila/tests/fake_utils.py manila/tests/fake_volume.py manila/tests/policy.json manila/tests/runtime_conf.py manila/tests/test_api.py manila/tests/test_conf.py manila/tests/test_context.py manila/tests/test_exception.py manila/tests/test_migrations.conf manila/tests/test_migrations.py manila/tests/test_misc.py manila/tests/test_policy.py manila/tests/test_quota.py manila/tests/test_service.py manila/tests/test_share.py manila/tests/test_share_api.py manila/tests/test_share_driver.py manila/tests/test_share_generic.py manila/tests/test_share_glusterfs.py manila/tests/test_share_lvm.py manila/tests/test_share_netapp.py manila/tests/test_share_rpcapi.py manila/tests/test_skip_examples.py manila/tests/test_test.py manila/tests/test_test_utils.py manila/tests/test_utils.py manila/tests/test_wsgi.py manila/tests/utils.py manila/tests/api/__init__.py manila/tests/api/common.py manila/tests/api/fakes.py manila/tests/api/test_common.py manila/tests/api/test_extensions.py manila/tests/api/test_router.py manila/tests/api/test_wsgi.py manila/tests/api/test_xmlutil.py manila/tests/api/contrib/__init__.py manila/tests/api/contrib/stubs.py manila/tests/api/contrib/test_admin_actions.py manila/tests/api/contrib/test_services.py manila/tests/api/contrib/test_share_actions.py manila/tests/api/extensions/__init__.py manila/tests/api/extensions/foxinsocks.py manila/tests/api/middleware/__init__.py manila/tests/api/middleware/test_auth.py manila/tests/api/middleware/test_faults.py manila/tests/api/middleware/test_sizelimit.py manila/tests/api/openstack/__init__.py manila/tests/api/openstack/test_wsgi.py manila/tests/api/v1/__init__.py manila/tests/api/v1/stubs.py manila/tests/api/v1/test_limits.py manila/tests/api/v1/test_security_service.py manila/tests/api/v1/test_share_metadata.py manila/tests/api/v1/test_share_networks.py manila/tests/api/v1/test_share_snapshots.py manila/tests/api/v1/test_shares.py manila/tests/compute/__init__.py manila/tests/compute/test_nova.py manila/tests/db/__init__.py manila/tests/db/fakes.py manila/tests/glance/__init__.py manila/tests/glance/stubs.py manila/tests/image/__init__.py manila/tests/image/fake.py manila/tests/image/test_glance.py manila/tests/integrated/__init__.py manila/tests/integrated/integrated_helpers.py manila/tests/integrated/test_extensions.py manila/tests/integrated/test_login.py manila/tests/integrated/api/__init__.py manila/tests/integrated/api/client.py manila/tests/monkey_patch_example/__init__.py manila/tests/monkey_patch_example/example_a.py manila/tests/monkey_patch_example/example_b.py manila/tests/network/__init__.py manila/tests/network/test_security_service_db.py manila/tests/network/test_share_network_db.py manila/tests/network/linux/__init__.py manila/tests/network/linux/test_interface.py manila/tests/network/linux/test_ip_lib.py manila/tests/network/linux/test_ovs_lib.py manila/tests/network/neutron/__init__.py manila/tests/network/neutron/test_neutron_api.py manila/tests/network/neutron/test_neutron_plugin.py manila/tests/scheduler/__init__.py manila/tests/scheduler/fakes.py manila/tests/scheduler/test_capacity_weigher.py manila/tests/scheduler/test_filter_scheduler.py manila/tests/scheduler/test_host_filters.py manila/tests/scheduler/test_host_manager.py manila/tests/scheduler/test_rpcapi.py manila/tests/scheduler/test_scheduler.py manila/tests/scheduler/test_scheduler_options.py manila/tests/var/ca.crt manila/tests/var/certificate.crt manila/tests/var/privatekey.key manila/tests/volume/__init__.py manila/tests/volume/test_cinder.py manila/tests/windows/__init__.py manila/tests/windows/basetestcase.py manila/tests/windows/db_fakes.py manila/tests/windows/mockproxy.py manila/tests/windows/windowsutils.py manila/tests/windows/stubs/README.rst manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_check_for_setup_errors_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_from_snapshot_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_from_snapshot_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_initialize_connection_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_initialize_connection_wmi.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_os.p.gz manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_wmi.p.gz manila/tests/xenapi/__init__.py manila/volume/__init__.py manila/volume/cinder.py tools/enable-pre-commit-hook.sh tools/install_venv.py tools/install_venv_common.py tools/lintstack.py tools/lintstack.sh tools/with_venv.sh tools/conf/extract_opts.py tools/conf/generate_sample.shmanila-2013.2.dev175.gbf1a399/manila.egg-info/PKG-INFO0000664000175000017500000000306612301410516021544 0ustar chuckchuck00000000000000Metadata-Version: 1.1 Name: manila Version: 2013.2.dev175.gbf1a399 Summary: Shared Storage for OpenStack Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: The Choose Your Own Adventure README for Manila =============================================== You have come across a storage service for an open cloud computing service. It has identified itself as "Manila." It was abstracted from the Nova project. To monitor it from a distance: follow `@openstack `_ on twitter. To tame it for use in your own cloud: read http://docs.openstack.org To dissect it in detail: visit http://github.com/stackforge/manila To taunt it with its weaknesses: use http://bugs.launchpad.net/manila To watch it: http://jenkins.openstack.org To hack at it: read HACKING To cry over its pylint problems: http://jenkins.openstack.org/job/manila-pylint/violations Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 2.6 manila-2013.2.dev175.gbf1a399/manila.egg-info/dependency_links.txt0000664000175000017500000000000112301410516024510 0ustar chuckchuck00000000000000 manila-2013.2.dev175.gbf1a399/MANIFEST.in0000664000175000017500000000013612301410454017246 0ustar chuckchuck00000000000000include AUTHORS include ChangeLog exclude .gitignore exclude .gitreview global-exclude *.pyc manila-2013.2.dev175.gbf1a399/AUTHORS0000664000175000017500000000117512301410516016563 0ustar chuckchuck00000000000000119Vik Aleks Chirko Andrei V. Ostapenko Ben Swartzlander Bill Owen Mark McLoughlin Michael Still Monty Taylor Ram Raja Sascha Peilicke Valeriy Ponomaryov Vijay Bellur Yulia Portnova Yulia Portnova Zhongyue Luo bswartz ubu vponomaryov manila-2013.2.dev175.gbf1a399/pylintrc0000664000175000017500000000232612301410454017302 0ustar chuckchuck00000000000000# The format of this file isn't really documented; just use --generate-rcfile [Messages Control] # NOTE(justinsb): We might want to have a 2nd strict pylintrc in future # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. disable=C0111,W0511,W0142,W0622 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$ # Module names matching manila-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(manila-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ manila-2013.2.dev175.gbf1a399/README.md0000664000175000017500000000006312301410454016766 0ustar chuckchuck00000000000000manila ====== Openstack Shared Filesystem Service manila-2013.2.dev175.gbf1a399/etc/0000775000175000017500000000000012301410516016262 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/etc/manila/0000775000175000017500000000000012301410516017523 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/etc/manila/manila.conf.sample0000664000175000017500000004711312301410454023122 0ustar chuckchuck00000000000000#################### # manila.conf sample # #################### [DEFAULT] # # Options defined in manila.exception # # make exception message format errors fatal (boolean value) #fatal_exception_format_errors=false # # Options defined in manila.common.config # # Virtualization api connection type : libvirt, xenapi, or # fake (string value) #connection_type= # The SQLAlchemy connection string used to connect to the # database (string value) #sql_connection=sqlite:///$state_path/$sqlite_db # Verbosity of SQL debugging information. 0=None, # 100=Everything (integer value) #sql_connection_debug=0 # File name for the paste.deploy config for manila-api (string # value) #api_paste_config=api-paste.ini # Directory where the manila python module is installed # (string value) #pybasedir=/usr/lib/python/site-packages # Directory where manila binaries are installed (string value) #bindir=$pybasedir/bin # Top-level directory for maintaining manila's state (string # value) #state_path=$pybasedir # ip address of this host (string value) #my_ip=10.0.0.1 # default glance hostname or ip (string value) #glance_host=$my_ip # default glance port (integer value) #glance_port=9292 # A list of the glance api servers available to manila # ([hostname|ip]:port) (list value) #glance_api_servers=$glance_host:$glance_port # Version of the glance api to use (integer value) #glance_api_version=1 # Number retries when downloading an image from glance # (integer value) #glance_num_retries=0 # Allow to perform insecure SSL (https) requests to glance # (boolean value) #glance_api_insecure=false # the topic scheduler nodes listen on (string value) #scheduler_topic=manila-scheduler # the topic share nodes listen on (string value) #share_topic=manila-share # Deploy v1 of the Manila API. (boolean value) #enable_v1_api=true # Deploy v2 of the Manila API. (boolean value) #enable_v2_api=true # whether to rate limit the api (boolean value) #api_rate_limit=true # Specify list of extensions to load when using # osapi_share_extension option with # manila.api.contrib.select_extensions (list value) #osapi_share_ext_list= # osapi share extension to load (multi valued) #osapi_share_extension=manila.api.contrib.standard_extensions # Base URL that will be presented to users in links to the # OpenStack Share API (string value) #osapi_share_base_URL= # the maximum number of items returned in a single response # from a collection resource (integer value) #osapi_max_limit=1000 # the filename to use with sqlite (string value) #sqlite_db=manila.sqlite # If passed, use synchronous mode for sqlite (boolean value) #sqlite_synchronous=true # timeout before idle sql connections are reaped (integer # value) #sql_idle_timeout=3600 # maximum db connection retries during startup. (setting -1 # implies an infinite retry count) (integer value) #sql_max_retries=10 # interval between retries of opening a sql connection # (integer value) #sql_retry_interval=10 # full class name for the Manager for scheduler (string value) #scheduler_manager=manila.scheduler.manager.SchedulerManager # full class name for the Manager for share (string value) #share_manager=manila.share.manager.ShareManager # Name of this node. This can be an opaque identifier. It is # not necessarily a hostname, FQDN, or IP address. (string # value) #host=manila # availability zone of this node (string value) #storage_availability_zone=nova # Memcached servers or None for in process cache. (list value) #memcached_servers= # time period to generate share usages for. Time period must # be hour, day, month or year (string value) #share_usage_audit_period=month # Deprecated: command to use for running commands as root # (string value) #root_helper=sudo # Path to the rootwrap configuration file to use for running # commands as root (string value) #rootwrap_config= # Whether to log monkey patching (boolean value) #monkey_patch=false # List of modules/decorators to monkey patch (list value) #monkey_patch_modules= # maximum time since last check-in for up service (integer # value) #service_down_time=60 # The full class name of the share API class to use (string # value) #share_api_class=manila.share.api.API # The strategy to use for auth. Supports noauth, keystone, and # deprecated. (string value) #auth_strategy=noauth # A list of backend names to use. These backend names should # be backed by a unique [CONFIG] group with its options (list # value) #enabled_backends= # A list of share backend names to use. These backend names # should be backed by a unique [CONFIG] group with its options # (list value) #enabled_share_backends= # Whether snapshots count against GigaByte quota (boolean # value) #no_snapshot_gb_quota=false # # Options defined in manila.policy # # JSON file representing policy (string value) #policy_file=policy.json # Rule checked when requested rule is not found (string value) #policy_default_rule=default # # Options defined in manila.quota # # number of shares allowed per project (integer value) #quota_shares=10 # number of share snapshots allowed per project (integer # value) #quota_snapshots=10 # number of share gigabytes (snapshots are also included) # allowed per project (integer value) #quota_gigabytes=1000 # number of seconds until a reservation expires (integer # value) #reservation_expire=86400 # count of reservations until usage is refreshed (integer # value) #until_refresh=0 # number of seconds between subsequent usage refreshes # (integer value) #max_age=0 # default driver to use for quota checks (string value) #quota_driver=manila.quota.DbQuotaDriver # # Options defined in manila.service # # seconds between nodes reporting state to datastore (integer # value) #report_interval=10 # seconds between running periodic tasks (integer value) #periodic_interval=60 # range of seconds to randomly delay when starting the # periodic task scheduler to reduce stampeding. (Disable by # setting to 0) (integer value) #periodic_fuzzy_delay=60 # IP address for OpenStack Share API to listen (string value) #osapi_share_listen=0.0.0.0 # port for os share api to listen (integer value) #osapi_share_listen_port=8786 # # Options defined in manila.test # # File name of clean sqlite db (string value) #sqlite_clean_db=clean.sqlite # should we use everything for testing (boolean value) #fake_tests=true # # Options defined in manila.wsgi # # Number of backlog requests to configure the socket with # (integer value) #backlog=4096 # Sets the value of TCP_KEEPIDLE in seconds for each server # socket. Not supported on OS X. (integer value) #tcp_keepidle=600 # CA certificate file to use to verify connecting clients # (string value) #ssl_ca_file= # Certificate file to use when starting the server securely # (string value) #ssl_cert_file= # Private key file to use when starting the server securely # (string value) #ssl_key_file= # # Options defined in manila.api.middleware.auth # # Treat X-Forwarded-For as the canonical remote address. Only # enable this if you have a sanitizing proxy. (boolean value) #use_forwarded_for=false # # Options defined in manila.api.middleware.sizelimit # # Max size for body of a request (integer value) #osapi_max_request_body_size=114688 # # Options defined in manila.db.api # # The backend to use for db (string value) #db_backend=sqlalchemy # Services to be added to the available pool on create # (boolean value) #enable_new_services=true # Template string to be used to generate share names (string # value) #share_name_template=share-%s # Template string to be used to generate share snapshot names # (string value) #share_snapshot_name_template=share-snapshot-%s # # Options defined in manila.db.base # # driver to use for database access (string value) #db_driver=manila.db # # Options defined in manila.image.image_utils # # parent dir for tempdir used for image conversion (string # value) #image_conversion_dir=/tmp # # Options defined in manila.openstack.common.eventlet_backdoor # # port for eventlet backdoor to listen (integer value) #backdoor_port= # # Options defined in manila.openstack.common.lockutils # # Whether to disable inter-process locks (boolean value) #disable_process_locking=false # Directory to use for lock files. Default to a temp directory # (string value) #lock_path= # # Options defined in manila.openstack.common.log # # Print debugging output (set logging level to DEBUG instead # of default WARNING level). (boolean value) #debug=false # Print more verbose output (set logging level to INFO instead # of default WARNING level). (boolean value) #verbose=false # Log output to standard error (boolean value) #use_stderr=true # Default file mode used when creating log files (string # value) #logfile_mode=0644 # format string to use for log messages with context (string # value) #logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s # format string to use for log messages without context # (string value) #logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # data to append to log format when level is DEBUG (string # value) #logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d # prefix each line of exception output with this format # (string value) #logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s # list of logger=LEVEL pairs (list value) #default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN # publish error events (boolean value) #publish_errors=false # make deprecations fatal (boolean value) #fatal_deprecations=false # If an instance is passed with the log message, format it # like this (string value) #instance_format="[instance: %(uuid)s] " # If an instance UUID is passed with the log message, format # it like this (string value) #instance_uuid_format="[instance: %(uuid)s] " # If this option is specified, the logging configuration file # specified is used and overrides any other logging options # specified. Please see the Python logging module # documentation for details on logging configuration files. # (string value) #log_config= # A logging.Formatter log message format string which may use # any of the available logging.LogRecord attributes. Default: # %(default)s (string value) #log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s # Format string for %%(asctime)s in log records. Default: # %(default)s (string value) #log_date_format=%Y-%m-%d %H:%M:%S # (Optional) Name of log file to output to. If no default is # set, logging will go to stdout. (string value) #log_file= # (Optional) The base directory used for relative --log-file # paths (string value) #log_dir= # Use syslog for logging. (boolean value) #use_syslog=false # syslog facility to receive log lines (string value) #syslog_log_facility=LOG_USER # # Options defined in manila.openstack.common.notifier.api # # Driver or drivers to handle sending notifications (multi # valued) # Default notification level for outgoing notifications # (string value) #default_notification_level=INFO # Default publisher_id for outgoing notifications (string # value) #default_publisher_id=$host # # Options defined in manila.openstack.common.notifier.rpc_notifier # # AMQP topic used for openstack notifications (list value) #notification_topics=notifications # # Options defined in manila.openstack.common.notifier.rpc_notifier2 # # AMQP topic(s) used for openstack notifications (list value) #topics=notifications # # Options defined in manila.openstack.common.rpc # # The messaging module to use, defaults to kombu. (string # value) #rpc_backend=manila.openstack.common.rpc.impl_kombu # Size of RPC thread pool (integer value) #rpc_thread_pool_size=64 # Size of RPC connection pool (integer value) #rpc_conn_pool_size=30 # Seconds to wait for a response from call or multicall # (integer value) #rpc_response_timeout=60 # Seconds to wait before a cast expires (TTL). Only supported # by impl_zmq. (integer value) #rpc_cast_timeout=30 # Modules of exceptions that are permitted to be recreatedupon # receiving exception data from an rpc call. (list value) #allowed_rpc_exception_modules=manila.openstack.common.exception,nova.exception,manila.exception,exceptions # If passed, use a fake RabbitMQ provider (boolean value) #fake_rabbit=false # AMQP exchange to connect to if using RabbitMQ or Qpid # (string value) #control_exchange=openstack # # Options defined in manila.openstack.common.rpc.amqp # # Enable a fast single reply queue if using AMQP based RPC # like RabbitMQ or Qpid. (boolean value) #amqp_rpc_single_reply_queue=false # # Options defined in manila.openstack.common.rpc.impl_kombu # # SSL version to use (valid only if SSL enabled) (string # value) #kombu_ssl_version= # SSL key file (valid only if SSL enabled) (string value) #kombu_ssl_keyfile= # SSL cert file (valid only if SSL enabled) (string value) #kombu_ssl_certfile= # SSL certification authority file (valid only if SSL enabled) # (string value) #kombu_ssl_ca_certs= # The RabbitMQ broker address where a single node is used # (string value) #rabbit_host=localhost # The RabbitMQ broker port where a single node is used # (integer value) #rabbit_port=5672 # RabbitMQ HA cluster host:port pairs (list value) #rabbit_hosts=$rabbit_host:$rabbit_port # connect over SSL for RabbitMQ (boolean value) #rabbit_use_ssl=false # the RabbitMQ userid (string value) #rabbit_userid=guest # the RabbitMQ password (string value) #rabbit_password=guest # the RabbitMQ virtual host (string value) #rabbit_virtual_host=/ # how frequently to retry connecting with RabbitMQ (integer # value) #rabbit_retry_interval=1 # how long to backoff for between retries when connecting to # RabbitMQ (integer value) #rabbit_retry_backoff=2 # maximum retries with trying to connect to RabbitMQ (the # default of 0 implies an infinite retry count) (integer # value) #rabbit_max_retries=0 # use durable queues in RabbitMQ (boolean value) #rabbit_durable_queues=false # use H/A queues in RabbitMQ (x-ha-policy: all).You need to # wipe RabbitMQ database when changing this option. (boolean # value) #rabbit_ha_queues=false # # Options defined in manila.openstack.common.rpc.impl_qpid # # Qpid broker hostname (string value) #qpid_hostname=localhost # Qpid broker port (integer value) #qpid_port=5672 # Qpid HA cluster host:port pairs (list value) #qpid_hosts=$qpid_hostname:$qpid_port # Username for qpid connection (string value) #qpid_username= # Password for qpid connection (string value) #qpid_password= # Space separated list of SASL mechanisms to use for auth # (string value) #qpid_sasl_mechanisms= # Seconds between connection keepalive heartbeats (integer # value) #qpid_heartbeat=60 # Transport to use, either 'tcp' or 'ssl' (string value) #qpid_protocol=tcp # Disable Nagle algorithm (boolean value) #qpid_tcp_nodelay=true # # Options defined in manila.openstack.common.rpc.impl_zmq # # ZeroMQ bind address. Should be a wildcard (*), an ethernet # interface, or IP. The "host" option should point or resolve # to this address. (string value) #rpc_zmq_bind_address=* # MatchMaker driver (string value) #rpc_zmq_matchmaker=manila.openstack.common.rpc.matchmaker.MatchMakerLocalhost # ZeroMQ receiver listening port (integer value) #rpc_zmq_port=9501 # Number of ZeroMQ contexts, defaults to 1 (integer value) #rpc_zmq_contexts=1 # Maximum number of ingress messages to locally buffer per # topic. Default is unlimited. (integer value) #rpc_zmq_topic_backlog= # Directory for holding IPC sockets (string value) #rpc_zmq_ipc_dir=/var/run/openstack # Name of this node. Must be a valid hostname, FQDN, or IP # address. Must match "host" option, if running Nova. (string # value) #rpc_zmq_host=manila # # Options defined in manila.openstack.common.rpc.matchmaker # # Matchmaker ring file (JSON) (string value) #matchmaker_ringfile=/etc/nova/matchmaker_ring.json # Heartbeat frequency (integer value) #matchmaker_heartbeat_freq=300 # Heartbeat time-to-live. (integer value) #matchmaker_heartbeat_ttl=600 # # Options defined in manila.openstack.common.rpc.matchmaker_redis # # Host to locate redis (string value) #host=127.0.0.1 # Use this port to connect to redis host. (integer value) #port=6379 # Password for Redis server. (optional) (string value) #password= # # Options defined in manila.scheduler.driver # # The scheduler host manager class to use (string value) #scheduler_host_manager=manila.scheduler.host_manager.HostManager # Maximum number of attempts to schedule a share (integer # value) #scheduler_max_attempts=3 # # Options defined in manila.scheduler.host_manager # # Which filter class names to use for filtering hosts when not # specified in the request. (list value) #scheduler_default_filters=AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter # Which weigher class names to use for weighing hosts. (list # value) #scheduler_default_weighers=CapacityWeigher # # Options defined in manila.scheduler.manager # # Default scheduler driver to use (string value) #scheduler_driver=manila.scheduler.filter_scheduler.FilterScheduler # # Options defined in manila.scheduler.scheduler_options # # Absolute path to scheduler configuration JSON file. (string # value) #scheduler_json_config_location= # # Options defined in manila.scheduler.simple # # maximum number of volume gigabytes to allow per host # (integer value) #max_gigabytes=10000 # # Options defined in manila.scheduler.weights.capacity # # Multiplier used for weighing volume capacity. Negative # numbers mean to stack vs spread. (floating point value) #capacity_weight_multiplier=1.0 # # Options defined in manila.share.driver # # number of times to attempt to run flakey shell commands # (integer value) #num_shell_tries=3 # The percentage of backend capacity reserved (integer value) #reserved_share_percentage=0 # The backend name for a given driver implementation (string # value) #share_backend_name= # # Options defined in manila.share.drivers.lvm # # Base folder where exported shares are located (string value) #share_export_root=$state_path/mnt # IP to be added to export string (string value) #share_export_ip= # Path to smb config (string value) #smb_config_path=$state_path/smb.conf # If set, create lvms with multiple mirrors. Note that this # requires lvm_mirrors + 2 pvs with available space (integer # value) #share_lvm_mirrors=0 # Name for the VG that will contain exported shares (string # value) #share_volume_group=stack-shares # Specify list of share export helpers. (list value) #share_lvm_helpers=CIFS=manila.share.drivers.lvm.CIFSNetConfHelper,NFS=manila.share.drivers.lvm.NFSHelper # # Options defined in manila.share.drivers.netapp # # URL of the WSDL file for the DFM server (string value) #netapp_nas_wsdl_url= # User name for the DFM server (string value) #netapp_nas_login= # Password for the DFM server (string value) #netapp_nas_password= # Hostname for the DFM server (string value) #netapp_nas_server_hostname= # Port number for the DFM server (integer value) #netapp_nas_server_port=8088 # Use secure connection to server. (boolean value) #netapp_nas_server_secure=true # # Options defined in manila.share.manager # # Driver to use for share creation (string value) #share_driver=manila.share.drivers.lvm.LVMShareDriver # # Option defined in manila.network.neutron.api # #neutron_url = http://127.0.0.1:9696 #neutron_region_name = RegionOne #neutron_admin_tenant_name = service #neutron_auth_strategy = keystone #neutron_admin_auth_url = http://127.0.0.1:35357/v2.0 #neutron_admin_password = %admin_pass #neutron_admin_username = neutron # Total option count: 180 manila-2013.2.dev175.gbf1a399/etc/manila/policy.json0000664000175000017500000000240612301410454021720 0ustar chuckchuck00000000000000{ "context_is_admin": [["role:admin"]], "admin_or_owner": [["is_admin:True"], ["project_id:%(project_id)s"]], "default": [["rule:admin_or_owner"]], "admin_api": [["is_admin:True"]], "share:create": [], "share:get_all": ["project_id:%(project_id)s"], "share:get_snapshot": ["project_id:%(project_id)s"], "share:get_all_snapshots": ["project_id:%(project_id)s"], "share_extension:quotas:show": [], "share_extension:quotas:update_for_project": [["rule:admin_api"]], "share_extension:quotas:update_for_user": [["rule:admin_or_projectadmin"]], "share_extension:quota_classes": [], "share_extension:share_admin_actions:reset_status": [["rule:admin_api"]], "share_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]], "share_extension:services": [["rule:admin_api"]], "security_service:create": [["rule:admin_api"]], "security_service:delete": [["rule:admin_api"]], "security_service:update": [["rule:admin_api"]], "share_network:create": [["rule:admin_api"]], "share_network:delete": [["rule:admin_api"]], "share_network:update": [["rule:admin_api"]], "share_network:add_security_service": [["rule:admin_api"]], "share_network:remove_security_service": [["rule:admin_api"]] } manila-2013.2.dev175.gbf1a399/etc/manila/api-paste.ini0000664000175000017500000000262012301410454022110 0ustar chuckchuck00000000000000############# # OpenStack # ############# [composite:osapi_share] use = call:manila.api:root_app_factory /: apiversions /v1: openstack_share_api_v1 [composite:openstack_share_api_v1] use = call:manila.api.middleware.auth:pipeline_factory noauth = faultwrap sizelimit noauth apiv1 keystone = faultwrap sizelimit authtoken keystonecontext apiv1 keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1 [filter:faultwrap] paste.filter_factory = manila.api.middleware.fault:FaultWrapper.factory [filter:noauth] paste.filter_factory = manila.api.middleware.auth:NoAuthMiddleware.factory [filter:sizelimit] paste.filter_factory = manila.api.middleware.sizelimit:RequestBodySizeLimiter.factory [app:apiv1] paste.app_factory = manila.api.v1.router:APIRouter.factory [pipeline:apiversions] pipeline = faultwrap osshareversionapp [app:osshareversionapp] paste.app_factory = manila.api.versions:Versions.factory ########## # Shared # ########## [filter:keystonecontext] paste.filter_factory = manila.api.middleware.auth:ManilaKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory service_protocol = http service_host = 127.0.0.1 service_port = 5000 auth_host = 127.0.0.1 auth_port = 35357 auth_protocol = http admin_tenant_name = %SERVICE_TENANT_NAME% admin_user = %SERVICE_USER% admin_password = %SERVICE_PASSWORD% signing_dir = /var/lib/manila manila-2013.2.dev175.gbf1a399/etc/manila/rootwrap.conf0000664000175000017500000000165412301410454022256 0ustar chuckchuck00000000000000# Configuration for manila-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/manila/rootwrap.d,/usr/share/manila/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR manila-2013.2.dev175.gbf1a399/etc/manila/logging_sample.conf0000664000175000017500000000254712301410454023372 0ustar chuckchuck00000000000000[loggers] keys = root, manila [handlers] keys = stderr, stdout, watchedfile, syslog, null [formatters] keys = legacymanila, default [logger_root] level = WARNING handlers = null [logger_manila] level = INFO handlers = stderr qualname = manila [logger_amqplib] level = WARNING handlers = stderr qualname = amqplib [logger_sqlalchemy] level = WARNING handlers = stderr qualname = sqlalchemy # "level = INFO" logs SQL queries. # "level = DEBUG" logs SQL queries and results. # "level = WARNING" logs neither. (Recommended for production systems.) [logger_boto] level = WARNING handlers = stderr qualname = boto [logger_suds] level = INFO handlers = stderr qualname = suds [logger_eventletwsgi] level = WARNING handlers = stderr qualname = eventlet.wsgi.server [handler_stderr] class = StreamHandler args = (sys.stderr,) formatter = legacymanila [handler_stdout] class = StreamHandler args = (sys.stdout,) formatter = legacymanila [handler_watchedfile] class = handlers.WatchedFileHandler args = ('manila.log',) formatter = legacymanila [handler_syslog] class = handlers.SysLogHandler args = ('/dev/log', handlers.SysLogHandler.LOG_USER) formatter = legacymanila [handler_null] class = manila.common.openstack.NullHandler formatter = default args = () [formatter_legacymanila] class = manila.openstack.common.log.LegacyFormatter [formatter_default] format = %(message)s manila-2013.2.dev175.gbf1a399/etc/manila/rootwrap.d/0000775000175000017500000000000012301410516021622 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/etc/manila/rootwrap.d/volume.filters0000664000175000017500000000361712301410454024533 0ustar chuckchuck00000000000000# manila-rootwrap command filters for volume nodes # This file should be owned by (and only-writeable by) the root user [Filters] # manila/volume/iscsi.py: iscsi_helper '--op' ... ietadm: CommandFilter, ietadm, root tgtadm: CommandFilter, tgtadm, root tgt-admin: CommandFilter, tgt-admin, root rtstool: CommandFilter, rtstool, root # manila/volume/driver.py: 'vgs', '--noheadings', '-o', 'name' vgs: CommandFilter, vgs, root # manila/volume/driver.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. # manila/volume/driver.py: 'lvcreate', '-L', ... lvcreate: CommandFilter, lvcreate, root # manila/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... dd: CommandFilter, dd, root # manila/volume/driver.py: 'lvremove', '-f', %s/%s % ... lvremove: CommandFilter, lvremove, root # manila/volume/driver.py: 'lvdisplay', '--noheading', '-C', '-o', 'Attr',.. lvdisplay: CommandFilter, lvdisplay, root # manila/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... # manila/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... iscsiadm: CommandFilter, iscsiadm, root # manila/volume/drivers/lvm.py: 'shred', '-n3' # manila/volume/drivers/lvm.py: 'shred', '-n0', '-z', '-s%dMiB' shred: CommandFilter, shred, root #manila/volume/.py: utils.temporary_chown(path, 0), ... chown: CommandFilter, chown, root # manila/volume/driver.py dmsetup: CommandFilter, dmsetup, root ln: CommandFilter, ln, root qemu-img: CommandFilter, qemu-img, root env: CommandFilter, env, root # manila/volume/driver.py: utils.read_file_as_root() cat: CommandFilter, cat, root # manila/volume/nfs.py stat: CommandFilter, stat, root mount: CommandFilter, mount, root df: CommandFilter, df, root du: CommandFilter, du, root truncate: CommandFilter, truncate, root chmod: CommandFilter, chmod, root rm: CommandFilter, rm, root lvs: CommandFilter, lvs, root # manila/volume/scality.py mount: CommandFilter, mount, root dd: CommandFilter, dd, root manila-2013.2.dev175.gbf1a399/etc/manila/rootwrap.d/share.filters0000664000175000017500000000377112301410454024327 0ustar chuckchuck00000000000000# manila-rootwrap command filters for share nodes # This file should be owned by (and only-writeable by) the root user [Filters] # manila/share/drivers/lvm.py: 'mkfs.ext4', '/dev/mapper/%s' mkfs.ext4: CommandFilter, /sbin/mkfs.ext4, root # manila/share/drivers/lvm.py: 'exportfs', ... exportfs: CommandFilter, /usr/sbin/exportfs, root # manila/share/drivers/lvm.py: 'smbd', '-s', '%s', '-D' smbd: CommandFilter, /usr/sbin/smbd, root # manila/share/drivers/lvm.py: 'umount', '-f', '%s' umount: CommandFilter, /bin/umount, root # manila/share/drivers/lvm.py: 'mount', '/dev/mapper/%s', '%s' mount: CommandFilter, /bin/mount, root # manila/share/drivers/lvm.py: 'chmod', '777', '%s' chmod: CommandFilter, /bin/chmod, root # manila/share/drivers/lvm.py: 'chown', 'nobody', '-R', '%s' chown: CommandFilter, /bin/chown, root # manila/share/drivers/lvm.py: 'pkill', '-HUP', 'smbd' pkill: CommandFilter, /usr/bin/pkill, root # manila/share/drivers/lvm.py: 'smbcontrol', 'all', 'close-share', '%s' smbcontrol: CommandFilter, /usr/bin/smbcontrol, root # manila/share/drivers/lvm.py: 'net', 'conf', 'addshare', '%s', '%s', 'writeable=y', 'guest_ok=y # manila/share/drivers/lvm.py: 'net', 'conf', 'delshare', '%s' # manila/share/drivers/lvm.py: 'net', 'conf', 'setparm', '%s', '%s', '%s' # manila/share/drivers/lvm.py: 'net', 'conf', 'getparm', '%s', 'hosts allow' net: CommandFilter, /usr/bin/net, root # manila/share/drivers/glusterfs.py: 'mkdir', '%s' mkdir: CommandFilter, /usr/bin/mkdir, root # manila/share/drivers/glusterfs.py: 'rm', '-rf', '%s' rm: CommandFilter, /usr/bin/rm, root # manila/share/drivers/glusterfs.py: 'gluster', '--xml', 'volume', 'info', '%s' # manila/share/drivers/glusterfs.py: 'gluster', 'volume', 'set', '%s', 'nfs.export-dir', '%s' gluster: CommandFilter, /usr/sbin/gluster, root # manila/network/linux/ip_lib.py: 'ip', 'netns', 'exec', '%s', '%s' ip: CommandFilter, /sbin/ip, root # manila/network/linux/interface.py: 'ovs-vsctl', 'add-port', '%s', '%s' ovs-vsctl: CommandFilter, /usr/bin/ovs-vsctl, root manila-2013.2.dev175.gbf1a399/ChangeLog0000664000175000017500000013450412301410516017270 0ustar chuckchuck00000000000000commit bf1a39980928767c343b48304163032155bdca7e Merge: 25d52a2 a1c2cdb Author: Jenkins Date: Thu Feb 20 11:02:23 2014 +0000 Merge "Fixed detail list for security-services" commit 25d52a23030b2e898f1ac914f6268304fe8cefe3 Merge: 6326845 e416fc9 Author: Jenkins Date: Thu Feb 20 10:29:31 2014 +0000 Merge "Fixes bug with path to ssh keys" commit e416fc97ffb84b16ea709025137c657e88bad95b Author: Andrei V. Ostapenko Date: Wed Feb 19 12:52:17 2014 +0200 Fixes bug with path to ssh keys Adds expansion of '~', if it is used in path to ssh key Closes: bug 1282031 Change-Id: I82cc98908bbd547dfa42d2d0da68fab3601b0406 commit 63268451aa262417ff39d0e0955825d4dc820aef Merge: e0efa6e ce2ac69 Author: Jenkins Date: Wed Feb 19 19:46:43 2014 +0000 Merge "Removes use of timeutils.set_time_override" commit a1c2cdbc7e793a05ea7c590979e25ecb8fe272a0 Author: Yulia Portnova Date: Wed Feb 19 14:09:24 2014 +0200 Fixed detail list for security-services Added passing collection to resource. Change-Id: I06ba968e203d06671724314f25b6da58b073761f Closes-Bug: #1282057 commit e0efa6e563f3b473e672401e5abeb0cfc526849c Merge: 9d163f2 e9d14b4 Author: Jenkins Date: Tue Feb 18 11:27:34 2014 +0000 Merge "Added to devstack plugin passwords for services" commit 9d163f2c417cd558234d33b450a199ef523075a4 Author: vponomaryov Date: Tue Feb 18 11:16:26 2014 +0200 Removed cinder artifacts in devstack plugin In manila's devstack plugin left some artifacts from cinder. So, it should be replaced with proper data. Change-Id: Ic0d04e63a1ac6ea84e1ce0394e1bde2b1423c273 commit e9d14b4aa1b65b096a4101b7ad9b4bfa3a4e2eaf Author: vponomaryov Date: Tue Feb 18 11:04:30 2014 +0200 Added to devstack plugin passwords for services Drivers use neutron, and neutron auth requires password, currently devstack does not set password by default. Same to nova and cinder. Change-Id: I5637b938ecbf6813111213e7e5908f456b531c7d commit 21980f84f2de5c8803650f125e282d4d493568a2 Merge: feca18a 49e167f Author: Jenkins Date: Mon Feb 17 10:14:50 2014 +0000 Merge "Fix devstack plugin's usage of RECLONE option" commit feca18a1ca7ac5cce4a2d0b1cdc9667b9e7b0f3f Author: Andrei V. Ostapenko Date: Wed Jan 29 13:43:34 2014 +0200 Generic driver Generic driver, that uses cinder volumes as backend and nova instances to reach multitenancy Implements: bp generic-driver Change-Id: I1d8d4ff15ecbbe2ea0c794d93de950616422279b commit 49e167fdd0b8985255377abc390b6b8c88ae8593 Author: vponomaryov Date: Thu Feb 13 15:29:42 2014 +0200 Fix devstack plugin's usage of RECLONE option Current code redefines "reclone" option and devstack always reclones manila's code. It should depend on devstack's localrc definition of reclone Change-Id: Ie04895a12e348c08329fc5cc1d997710daed056f Closes-Bug: 1279799 commit ce2ac6901d8d147dbe39f0d360cdeec268257637 Author: Zhongyue Luo Date: Wed Feb 12 15:45:44 2014 +0800 Removes use of timeutils.set_time_override The set_time_override function in timeutils was written as a helper function to mock utcnow for unittests before 'mock' was generally used. Now that we have mock and fixture, we no longer need to use it. Change-Id: Ida9b5b0130a38f45e1f51bf4fbdb1d1a5851cf82 Partial-Bug: #1266962 commit ce5eeb7364d656c8791786fc2f002a82753febf2 Author: Andrei V. Ostapenko Date: Wed Jan 29 13:37:46 2014 +0200 Adds modules for managing network interfaces for generic driver Adds modules for creating and managing logical interfaces. This modules create logical interface and attach it to OVS or Linux bridge to associate with neutron port. Module manila.network.linux.ip_lib.py executes linux 'ip' command for logical interface setup. Module manila.network.linux.interface.py is responsible for plugging logical interface into OVS or Linux bridge. Class in module manila.network.linux.ovs_lib.py represents OVS bridge and allows to manage it. Partially implements: bp generic-driver Change-Id: Iaa97e961f1670479a59a2f9adba5953d271b1818 commit 61a52b88d10ae3cc3b5d03cd60d51b7842a2aa88 Author: Andrei V. Ostapenko Date: Sat Jan 25 02:03:41 2014 +0200 Extends neutron api with methods needed for generic driver Adds functions for managing networks, subnets, routers Partially implements: bp generic-driver Change-Id: I513e914148b260451a9247f3b987ad9c6818b472 commit 9a7e113b3accdc4085be588c8ea1d7473aca7704 Author: Andrei V. Ostapenko Date: Thu Jan 9 13:20:40 2014 +0200 Adds nova api needed for generic driver implementation Generic driver will use nova instances to reach multitenancy, so this patch adds a module for interacting with nova. Partially implements: bp generic-driver Change-Id: Ic51971f0fe72bcbbe364e8f6aa4c59a8ec267c83 commit fbde9ae88fcde2e9f378d7592d9e56710bd16bb5 Author: Andrei V. Ostapenko Date: Mon Dec 30 13:46:45 2013 +0200 Adds cinder api needed for generic driver implementation Generic driver will use cinder volumes as back-end for shares, so this patch adds a module for interacting with cinder. Partially implements: bp generic-driver Change-Id: Ide3d98efa8cf38994548934ad445f3561f5e3106 commit 2be7b3b782c0666097d031cbc89e1af724785dab Merge: c7bbab2 9ae1349 Author: Jenkins Date: Thu Feb 6 17:01:30 2014 +0000 Merge "Add policy checks in share networks API" commit c7bbab22896d6cd4097e2b1474e9cfc4d6c3733d Merge: 8bc4867 826b156 Author: Jenkins Date: Thu Feb 6 17:00:20 2014 +0000 Merge "Fix policy.py" commit 8bc48675ec77fd63b8c866fcb9561c3156afdf07 Merge: e8ade68 5525e97 Author: Jenkins Date: Thu Feb 6 16:59:27 2014 +0000 Merge "Add network id verification on share creation" commit e8ade681b61ad3052d990907081a7427269de591 Author: Aleks Chirko Date: Tue Feb 4 14:13:11 2014 +0200 Squash all migrations into one Migration #12 have fixed bug https://bugs.launchpad.net/manila/+bug/1272303, but introduced a new one. Because that bug was rather architectural, we need to address that architectural flaw instead of fixing bugs as they come. Closes-Bug: #1275044 Change-Id: I260a90abd1107c62b687118d5d4a01d7199ea56f commit 5525e97571fcb6d73720289ad9ae21c84ff60a9c Author: Aleks Chirko Date: Wed Feb 5 18:03:21 2014 +0200 Add network id verification on share creation Change-Id: Icf69bfe993b2145f8a4403bcd709a9edfc75de99 Closes-Bug: #1273600 commit 9ae134954fad334593a2dffe83b111799ebae68b Author: Aleks Chirko Date: Wed Feb 5 16:51:48 2014 +0200 Add policy checks in share networks API Add sample policy configuration for share network API. Closes-Bug: #1271943 Change-Id: I77aad91014d7c0ef125192bddeae7aafaaed3aef commit 826b15692ee0cfa509d82d13fbba09c56c9c6b27 Author: Aleks Chirko Date: Fri Jan 31 18:37:11 2014 +0200 Fix policy.py Because inside check_policy() there is hardcoded 'share' target prepended to all policies, any policy we check will be checked against 'share' policy. Change check_policy() to use explicit target and action instead of just action. Change wrap_check_policy decorator to be a decorator maker which accepts resource name as an argument. Closes-Bug: #1274951 Partial-Bug: #1271943 Change-Id: I85c184035619d78107d56ea94918f608d8d7c282 commit 9312341359098835e93257006e86e2b8e1e12981 Author: Aleks Chirko Date: Tue Feb 4 19:07:06 2014 +0200 Updated from global requirements Change-Id: If99b11499a780f8beb81305f8c49753715a213e9 commit 6de896e5c5fb8bb8dd22497b35ba9aebdb5add62 Author: Aleks Chirko Date: Fri Jan 31 19:31:52 2014 +0200 Fix bad calls to model_query() model_query() function was changed recently to use 'model' object as a second positional argument, so all callers need to adhere to this requirement. Fix 'bad' model_query(). Remove 'skips' from tests that were failing due to bad model_query() calls. Closes-Bug: #1275026 Change-Id: Ibf3d45c6a37493cc0ced7cc6e6c552f27b6e4338 commit 0f6a7cb42223b77368f0083e64e75bb126e791ae Merge: 9bde483 a842c55 Author: Jenkins Date: Thu Jan 30 14:03:10 2014 +0000 Merge "Change manila DB to have working unique constraint" commit 9bde483ddf578ec131a3ff19330926c3e6ed1ae9 Merge: 6cf9f9c b103503 Author: Jenkins Date: Thu Jan 30 13:52:44 2014 +0000 Merge "Fixes handling of duplicate share access rule creation" commit a842c554e699174fdcc3a771c968ac978c4135a8 Author: Aleks Chirko Date: Fri Jan 24 15:59:29 2014 +0200 Change manila DB to have working unique constraint Because Manila DB uses 'soft delete', we can't use unique constaints properly. Solution is to change 'deleted' column from Boolean to the type of 'id' column and write value of 'id' to 'delete' on delete operation. To apply unique constr only to not deleted records we will be adding 'deleted' column to every uc from now on. Closes-Bug: #1272303 Change-Id: Ia673b91c7d7f700a25824766486a82f07203f3b6 commit 6cf9f9c83861e7e32f9ce90f2c35b5b73aa7fb10 Author: Aleks Chirko Date: Wed Jan 29 18:28:28 2014 +0200 Change 'deleted' to Boolean in project_user_quotas Change-Id: I7604ba4b938d5ff73bcf3b1f9152cc6aa4f1c0c8 Closes-Bug: #1274165 commit b103503d07debf8c96e918f343f083b8dfff04db Author: Andrei V. Ostapenko Date: Wed Jan 29 17:49:49 2014 +0200 Fixes handling of duplicate share access rule creation Closes: bug 1274163 Change-Id: Id6d649dcf03c5be9ce6fd7204dfa725d59582412 commit 7eca7e3d7dab7224d9f02dc290f9b088199f2b8a Merge: 37c63d6 67ce7ad Author: Jenkins Date: Fri Jan 24 14:16:55 2014 +0000 Merge "Fixes empty network_info for share" commit 67ce7ad16eddc77b4faaf7f81f4c0da225c5ab1b Author: Yulia Portnova Date: Fri Jan 24 15:32:21 2014 +0200 Fixes empty network_info for share Make _setup_share_network method in share manager return new network db reference Closes bug 1272335 Change-Id: I36392d2308c8e135eefb50a96ea9f41fac405a9e commit 37c63d60b44dc3bd90c52f608cdca10556551d8c Author: vponomaryov Date: Thu Jan 23 09:23:01 2014 -0500 Use actual rootwrap option in manila.conf instead deprecated one Change-Id: I0096e751cbf1b31944e8a7b15b46914dd79c340b Closes-Bug: #1267547 commit 0b5a4fc6e76f088426e2bf18bb57ee3db6518eb8 Merge: 687705f b6c3281 Author: Jenkins Date: Thu Jan 23 13:52:36 2014 +0000 Merge "Fix xml response for create/update security service" commit 687705fa299ee66db0e1170bb8ec536ea2690190 Merge: 85a4629 8a229a2 Author: Jenkins Date: Thu Jan 23 13:52:35 2014 +0000 Merge "Add 'password' field to the security service" commit 85a46293f2ed511ed138952cb945d3f8ae98d629 Merge: b29b678 f128d23 Author: Jenkins Date: Thu Jan 23 13:33:50 2014 +0000 Merge "Fix manila's devstack plugin for using Fedora/CentOS/RHEL distro" commit b29b678a2ed84c3d790b577deacc0cb7b23de6bc Merge: a538680 b92c6f4 Author: Jenkins Date: Thu Jan 23 13:30:36 2014 +0000 Merge "Adds network creation to ShareManager" commit a538680b53bcda4beb011a057f0c4e7b1bb0a6b2 Merge: c81ad66 a9247ae Author: Jenkins Date: Thu Jan 23 13:30:29 2014 +0000 Merge "Checking if access rule exists in share api" commit b6c32818805780ceb95b023969f340de027636df Author: Yulia Portnova Date: Thu Jan 23 14:42:21 2014 +0200 Fix xml response for create/update security service Changed xml template for create/update security service from SecurityServiceTemplates to SecurityServiceTemplate Closes bug 1269829 Change-Id: I07f2847b8897cfbc2ee3e1952709925c3c056774 commit 8a229a2c6e395d130c6205b74971b8b5d800f640 Author: Aleks Chirko Date: Thu Jan 23 13:33:36 2014 +0200 Add 'password' field to the security service Change-Id: I8a6d146645b33d7baff52902b7187b09cdbdf63a commit b92c6f4595afa4f2250349a7caea927be7a3174e Author: Andrei V. Ostapenko Date: Fri Dec 20 13:40:20 2013 +0200 Adds network creation to ShareManager Partially implements: bp join-tenant-network Change-Id: I85a69e882ad299b059e40bfea850a169e6e4660d commit a9247ae15b5c7969837cb7acc2ef9c876070aec2 Author: Yulia Portnova Date: Wed Jan 22 16:48:39 2014 +0200 Checking if access rule exists in share api Added db method share_access_get_all_by_type_and_access. Closes bug 1271520 Change-Id: I07702622cc920a04f8333bcf0634f01467c29965 commit c81ad66e7e9cda6034b31722bad54c924233d3ad Author: Aleks Chirko Date: Tue Jan 14 16:28:06 2014 +0200 Add share's networks API Add server side for share's networks. Implemented controller will carry user requests to the DB and thus will allow user to manage share's networks data. Add share's networks support to the share API. Partially implements bp: join-tenant-network Change-Id: Ie4f3945255a049e80083f08a39d7f703a5c75c5e commit d3131f40d214bfa902750f65c8da804c467ddd3a Author: Aleks Chirko Date: Thu Dec 5 15:51:32 2013 +0200 Add share's networks DB model, API and neutron support Add share's networks DB models and API for managing data about tenant's networks. Add neutron network plug-in which will be responsible for managing neutron network resources (creating and deleting ports). Partially implements bp: join-tenant-network Change-Id: I20249f3640a10fc3ed55003a817a994efdb64681 commit f128d235f5f6505ea749972df5e7ade40a731ad9 Author: Valeriy Ponomaryov Date: Tue Jan 21 20:33:36 2014 +0400 Fix manila's devstack plugin for using Fedora/CentOS/RHEL distro Move distro-specified commands into special conditions. Make changes to related places. Change-Id: I386e70b1f173909ccd5a31f3207047d54f7bc0d0 Closes-Bug: #1271100 commit c450c15f1ca192449c69a0d1b72e023a3f90f9c4 Merge: 74a5a7c 419ef80 Author: Jenkins Date: Thu Jan 16 20:04:45 2014 +0000 Merge "glusterfs: Add GlusterFS driver" commit 74a5a7c3b9691043ca61637b6461c942650cfdb7 Merge: d68efa4 a2240d9 Author: Jenkins Date: Wed Jan 15 13:21:01 2014 +0000 Merge "Add manila's tempest-plugin" commit a2240d911af03dac79381fe319396bbb30e56174 Author: vponomaryov Date: Tue Jan 14 15:46:34 2014 +0200 Add manila's tempest-plugin Adds plugin for tempest as for devstack Purpose - run tempest in ci job for manila project Partially implements bp: tempest-job Change-Id: I7a24a8fe6ca44f75d74e0c941dc25e561e79f81a commit d68efa454cec29a804297a88aa35e3aaaf8bcd19 Author: Yulia Portnova Date: Fri Jan 10 15:11:47 2014 +0200 Security service API Added security service controller to Manila v1 API. Partially implements bp: join-tenant-network Change-Id: Ic11feb44547bf438d925261b587edc828eac31c1 commit 55f42ea27c07f1879adda2774fd429ca624f8afe Author: Aleks Chirko Date: Wed Dec 18 17:34:00 2013 +0200 Add security service DB model and API. Add support for managing security services information. User will be able to create description for services such as ldap, kerberos and active directory. Security service record will contain type of service (kerberos, ldap or ad), ip address of dns server, security service server hostname or ip, domain, sid (security identifier) and other auxiliary data. Partially implements bp: join-tenant-network Change-Id: I50e1f96496a40840aad97b77024a11302a1dd997 commit 32b69b5ba49521108ae8d5f3039be767d2ce6da5 Merge: 14774cf 9edddea Author: Jenkins Date: Mon Jan 13 14:48:21 2014 +0000 Merge "Remove redundant options in devstack plugin" commit 9edddea74d021a796d5b85b03fe3a0b7181c00a0 Author: vponomaryov Date: Sat Jan 11 11:11:29 2014 +0200 Remove redundant options in devstack plugin In manila's devstack plugin configures manila.conf file, and it has redundant options, which is not used and deprecated. Change-Id: Iaf8f3ce925006cf423f46528e27abd8305c0c6e7 commit 14774cf8778a1f371bfad8bd98b325aac56cc1b4 Author: vponomaryov Date: Thu Jan 9 13:16:16 2014 +0200 Fix bug with full access to reset-state "Reset state" commands should be available only for admins. Change-Id: I34116212e91988f0282f7045449be61f581a3fce Closes-Bug: #1267432 commit d68e8a41a672ab1343f2093eab591f4592693a64 Merge: 4e39fb9 a78db60 Author: Jenkins Date: Wed Jan 8 16:06:01 2014 +0000 Merge "Adds an ability to reset snapshot state" commit 419ef809effc3a695b7e8bcadf9be05b2cfff327 Author: Ram Raja Date: Wed Nov 20 11:49:26 2013 +0530 glusterfs: Add GlusterFS driver Added class GlusterfsShareDriver, derived from manila.share.driver.ShareDriver, implementing create and delete share capabilities and access control. Change-Id: I909ad28d316d071dd1a4790843eae25ade4a9c95 commit 4e39fb94c0ddb999dd8665a8c1de2140b43f977d Author: vponomaryov Date: Mon Jan 6 16:22:56 2014 +0200 Fix manila's devstack plugin After devstack installation with manila, in some cases, database wasn't installed. Change-Id: I12069049d284e3039fa825a1cacda70e12911025 commit a78db602640355688caa88a8608c5a473eafe2bb Author: Andrei V. Ostapenko Date: Thu Dec 19 17:27:32 2013 +0200 Adds an ability to reset snapshot state Partially implements: bp manila-client-enhancements Change-Id: Iff9fd0b588b90eae4c9475fe28883322abca8246 commit dc253dce03a1fd963e230682be1f55ab97f70db2 Merge: ad89416 7b1f1a3 Author: Jenkins Date: Wed Dec 25 12:32:05 2013 +0000 Merge "Refactoring driver interfaces" commit ad89416af4c51c04dfa5ceece27e987bae141674 Merge: 7d4b39d fe8acf0 Author: Jenkins Date: Tue Dec 24 12:13:23 2013 +0000 Merge "Adds validation of access rules" commit fe8acf0ef647b7f46f8596b133c1827061ba8fa6 Author: Andrei V. Ostapenko Date: Wed Dec 18 15:01:29 2013 +0200 Adds validation of access rules Partially implements: bp improve-acl-functionality Change-Id: I46d3edcadf33afef6fac242284dc6d61b4712bc1 commit 7d4b39d83df6041c52ca56594c373244f58932e5 Author: Andrei V. Ostapenko Date: Thu Dec 19 13:09:35 2013 +0200 Adds admin actions extension to provide reset-state command Partially implements: bp manila-client-enhancements Change-Id: I9c2968e7c8c05ce31ac0aafa83f46e2072dce338 commit 7b1f1a3b38bf073bd8369b04690bce1e5e1fd692 Author: Yulia Portnova Date: Fri Dec 13 14:17:49 2013 +0200 Refactoring driver interfaces Added method create_share_form_snapshot to manila.share.driver.ShareDriver. Removed methods allocate_container, allocate_container_from_snapshot, create_export, deallocate_container, remove_export Implements bp refacto-driver-interfaces Change-Id: I9f1c26b189f50aedc8e19864a172557700adf849 commit 488b11bdd1a9fcbfe0d078ce3e0eef3fa09dbf96 Author: Yulia Portnova Date: Thu Nov 28 22:39:54 2013 +0200 Move NetAppApiClient to separate module Created module manila.share.drivers.netapp.api. Implements bp add-netapp-clustered-share-driver Change-Id: I944fc688c18dceeeb114b5b84b5a8b6b1bf23b45 commit a8166d2e29da04c5eb04f92f84d697cc3203af8f Author: Yulia Portnova Date: Thu Nov 28 22:26:04 2013 +0200 Moved netapp.py from drivers to drivers/netapp. Change-Id: Icfc1479476860e0155c43796a6f2e0d630b2d9b8 commit fb9b861b2c8b12f28f0d597764a6bae78a0b75de Author: Valeriy Ponomaryov Date: Thu Dec 5 14:59:40 2013 +0200 Insert validation of losetup duplicates Change-Id: I38b95a67ae8e102b167320ce11c74588fe5b4415 commit 24577d8dcecbab777492bf60c355aba9d1be2fe2 Author: Valeriy Ponomaryov Date: Thu Dec 5 14:24:11 2013 +0200 Remove redundant options for manila There were not only backend-specific options, but and deprecated options for this backend. Change-Id: I4c1e46c9b9b415e7884c887dba5551046596c5bd commit 03e32815912309153e5da3b4ccfa0eeccfb28ed6 Author: Valeriy Ponomaryov Date: Thu Dec 5 13:40:25 2013 +0200 Place devstack files to proper dirs Change-Id: I2e12e6573ebc4cb01a8bc464009ac63ef9a93780 commit dd017220fb00c8a4d12097638d6744433c8caeb9 Author: Andrei V. Ostapenko Date: Thu Dec 5 12:03:55 2013 +0200 Fixes inappropriate size of metadata value Fixes bug 1258079 Change-Id: If1ab03eafbb2c6fb5be77a83781dce533f253175 commit 3e7de88830521c2341b212d9cf791ba10c1409e8 Author: Valeriy Ponomaryov Date: Mon Dec 2 15:54:58 2013 +0200 Adds 'metadata' key to list of options for xml responses. Change-Id: I27a976f99ba92c9255abb3eafff2d7e9c4163656 Closes-Bug: #1256926 commit 2e1f4e191b806c6e5802c0b8a95548c707e047d9 Author: Andrei V. Ostapenko Date: Thu Nov 28 14:42:44 2013 +0200 Adds an ability to manage share metadata Now on share creating, we can specify metadata and it will be saved in database. Also support of setting, deleting and updating metadata info was added. Adds model for share metadata Adds migration Adds api extension for share metadata Adds relative api functions Partially implements bp manila-client-enhancements Change-Id: Id421323dd9e947d536b577092f1875f178d9aba5 commit cce07d59e189e0ae019345d52b7f5de490b4845e Author: Yulia Portnova Date: Thu Nov 7 16:03:18 2013 +0200 Added Neutron API module Implements bp join-tenant-network Change-Id: Ibda1c5e5ff903b289247cb212a239ea49ffa869a commit f9636fa2c96eb0e8f320dca08271da771832f28c Author: Bill Owen Date: Fri Nov 22 07:51:15 2013 -0700 Add consume_from_share method to HostState class Modify FilterScheduler._schedule_share to call new new method consume_from_share (it was calling consume_from_volume). Add HostState.consume_from_share to host_manager. Change-Id: I124674afe739ccf9648f91fcfbc63fd3be7caecc Closes-Bug: #1253860 commit d24b76b4d7d1c6ae49b0ee147ff7f30c2507bd26 Author: Valeriy Ponomaryov Date: Fri Nov 22 16:08:25 2013 +0200 Add devstack integration Add devstack integration for manila. Devstack has a plugin mechanism for unofficial projects to integrate with it without having to change any of devstack itself. This commit includes the files and some instructions on how to use them. Change-Id: I3b650b7696ccbcb7de9889a115e75310000774ca commit 08003fed2cc8f3d2db2962ed01a3b4ecf8c7539e Author: Bill Owen Date: Thu Nov 21 18:23:04 2013 -0700 Update requirements.txt for keystoneclient Update requirements.txt so that python-keystoneclient requirement is consistent with cinder and nova. Change-Id: Ifad04bde8c6f232c28841e12044e6050b25c9052 Closes-Bug: #1253853 commit 632416d1d73233965b30bad9ce23e1182ac02f22 Merge: 7e4e38f ea944df Author: Jenkins Date: Wed Nov 20 11:27:46 2013 +0000 Merge "Update openstack/common/lockutils" commit 7e4e38f038cca6d7a04ffd7c694bc90c607f06c4 Author: Sascha Peilicke Date: Tue Nov 19 11:24:56 2013 +0100 Support building wheels (PEP-427) With that, building and uploading wheels to PyPI is only one "python setup.py bdist_wheel" away. Change-Id: Ie69ebaf407748b3bc2763fa6b34d3b2b58ea6e3a commit ea944df962e1b42abc2f6760a434be230b7e2ad1 Author: Michael Still Date: Sat Nov 16 19:05:57 2013 +1100 Update openstack/common/lockutils The following commits are in this update: 79e6bc6 fix lockutils.lock() to make it thread-safe ace5120 Add main() to lockutils that creates temp dir for locks 537d8e2 Allow lockutils to get lock_path conf from envvar d498c42 Fix to properly log when we release a semaphore 29d387c Add LockFixture to lockutils 3e3ac0c Modify lockutils.py due to dispose of eventlet 90b6a65 Fix locking bug 27d4b41 Move synchronized body to a first-class function 15c17fb Make lock_file_prefix optional 1a2df89 Enable H302 hacking check b41862d Use param keyword for docstrings Change-Id: Ic0fb3b7de5817dbd69da761f625689c523932bc4 commit 128051bf8bc89d4e48e634466c66bedbc0431974 Merge: 141bcf7 54ee1b2 Author: Jenkins Date: Thu Oct 17 12:37:27 2013 +0000 Merge "Added per user-tenant quota support" commit 141bcf786e5443d76b73196379f457ab31db4dc8 Merge: 58c1959 58e2867 Author: Jenkins Date: Thu Oct 17 12:33:37 2013 +0000 Merge "Change wording of short description" commit 58c1959fcfa0c267c341f748a32a5a7f97b0e593 Merge: ae0263e f272ce5 Author: Jenkins Date: Thu Oct 17 08:10:47 2013 +0000 Merge "Remove unused manila.compute.aggregate_states" commit f272ce5b119d82ce891b908fb0ac241085a65b5b Author: Mark McLoughlin Date: Thu Oct 17 07:14:38 2013 +0100 Remove unused manila.compute.aggregate_states This appears to be unused. Change-Id: Id8bc376d9ea4f3cebbfde527a2870d529eb74cd8 commit ae0263e8496fc701c1682b6fa769496ca595342a Author: Mark McLoughlin Date: Thu Oct 17 07:08:01 2013 +0100 Remove obsolete redhat-eventlet.patch See I62ce43a330d7ae94eda4c7498782a655e63747fa for the gorey details on why this exists. As of this fix: https://github.com/eventlet/eventlet/pull/34 which was released in eventlet 0.13, we no longer need the patch. This has now been removed from oslo-incubator, so this is really just syncing that removal. Change-Id: I84267f3c6726cb2e750f615e107c48b12c6ed353 commit 54ee1b2aaa87c4d40db6c9fb35b93a6186effed2 Author: Andrei V. Ostapenko Date: Fri Oct 11 18:24:07 2013 +0300 Added per user-tenant quota support Added per user-tenant quota support. Added user-quotas extension, that turns on user quota support if it is loaded. Added force parameter, that lets to ignore check if admin want to force update when run 'manila quota-update' Added 'extended-quotas' extension that has provides ability for admins to be able to delete a non-default quota (absolute limit) for a tenant, so that tenant's quota will revert back to the configured default, and makes the force parameter always be passed if the client wants to set the new quota lower than what is already used and reserved. Added user quota support to db.api, sqlalchemy.api, sqlalchemy.models. Added migrations for user quota support. Implement bp: user-quota-support Change-Id: Ifb8f8a041c2fa54e2ed3a8219e87607b161438ca commit 58e286719282cdf53c207bcda7f54aa069130d40 Author: Monty Taylor Date: Tue Oct 15 19:45:32 2013 -0300 Change wording of short description It's important to keep the language very specific for trademark reasons. Change-Id: I147d0c56339099bb0bd602700fe57b67975efc36 commit 3f24fee2188e0a54bab6f710a8c223babfff2b21 Author: Andrei V. Ostapenko Date: Sat Oct 5 13:58:24 2013 +0300 Removing deprecated using of flags module from project Moving file flags.py to manila/common/config.py, replacing FLAGS by CONF. Rename modules fake_flags to conf_fixture, test_flags to test_conf, declare_flags to declare_conf, runtime_flags to runtime_conf like it was done in cinder, nova, glance etc. Implement bp: use-oslo-conf Change-Id: I38d869123e5e706d3b06f1844b97ead05e22668f commit a2b6193e0bda4a11553d219a44f4734b9ee9ffa3 Author: Yulia Portnova Date: Wed Oct 2 11:14:39 2013 +0300 Fixed share size validation while creating from snapshot. Fixes bug 1233755 Change-Id: Ie8689ccc5c3094d8a5303e3cafe842c0f3b964f3 commit cd6b68fa7ecbc92898690724a0e8f1e3461efa23 Merge: a017b97 c902da7 Author: Jenkins Date: Tue Oct 1 14:33:54 2013 +0000 Merge "Fixed xml response for share snapshot." commit c902da7256a41175f2bdc4aff4a5b25b04f96110 Author: 119Vik Date: Fri Sep 27 07:16:36 2013 -0400 Fixed xml response for share snapshot. For now xml response for snapshots show/list isn't full. Snapshot xml template was updated to make response full. Change-Id: I4d94d06213a539cdf6c876c6a2560fd63ff1957d commit a017b97e5e5d30a0a343b118102183af74610f13 Merge: d47e7f9 6e69c97 Author: Jenkins Date: Tue Oct 1 12:53:33 2013 +0000 Merge "Added share size checking if creating from snapshot" commit d47e7f9e83bc124fad6567b07397eaa2cfbbf6f1 Merge: bbd21eb 6dcaa7b Author: Jenkins Date: Tue Oct 1 10:44:51 2013 +0000 Merge "Fixed values passed to share_rpcapi.create_share." commit 6e69c9753c2af810bb1b23401c04591c48a29ea1 Author: Yulia Portnova Date: Tue Oct 1 13:43:27 2013 +0300 Added share size checking if creating from snapshot Fixes bug 1231974 Change-Id: I3148586de47a2474fdae288efb3adefd401f4552 commit 6dcaa7bdc0a8b7e8114914dbf87b20492b861c70 Author: Yulia Portnova Date: Mon Sep 30 13:18:42 2013 +0300 Fixed values passed to share_rpcapi.create_share. Values passed to share_rpcapi.create_share were in incorrect order. Fixed bug 1232126 Change-Id: Icaa9dcaea9d0ee7f325620380180994ce4e838dc commit bbd21eb4cc8019d520f61cb06a79d0dd6c720996 Author: Monty Taylor Date: Mon Sep 30 10:48:26 2013 -0400 Remove d2to1 dependency Change-Id: I9e91070e61e00cbc7d99ff223e85c3c371f60566 commit 9eb5e22d231f9d24b1c6e81c138a8933beb165c2 Merge: 208f51a 70c5541 Author: Jenkins Date: Fri Sep 27 16:44:35 2013 +0000 Merge "Fixed policy check for manila api." commit 208f51a6de7b1f884d36eeefd8cafafee77716a5 Merge: 6feae39 d36f1f0 Author: Jenkins Date: Fri Sep 27 13:09:40 2013 +0000 Merge "Update functionality implementation for manila api." commit d36f1f0c921855f7b9306bc89f1db9dc38189896 Author: 119Vik Date: Wed Sep 25 09:15:06 2013 -0400 Update functionality implementation for manila api. Currently manila api has no update functionality for shares and snapshots. Update functionality added to api and covered with unittests. Change-Id: Ie11d3d822e7c8b3a83e66aca5989d3fc1605b0ba Implements: blueprint share-update-feature commit 70c5541da2a979a76521f23f05b2978359c214df Author: 119Vik Date: Thu Sep 26 06:23:17 2013 -0400 Fixed policy check for manila api. Change-Id: I92e19cbfc1af5e7849151425fc12aa96479de7d3 commit 6feae392a0635e0a3d0d400f2a8f4b13bbf8ec59 Merge: 15dce88 b5c58d8 Author: Jenkins Date: Wed Sep 25 13:07:40 2013 +0000 Merge "Update README with relevant Manila information." commit 15dce88cb9c4093acddede7a2b4868bbaf53d899 Merge: fd3452a 5d610c2 Author: Jenkins Date: Wed Sep 25 13:01:23 2013 +0000 Merge "Fix xml response content for share list/show" commit fd3452a2e1dec67a9c743e533d0137d7b79d5951 Merge: 14301ca e034341 Author: Jenkins Date: Wed Sep 25 13:01:23 2013 +0000 Merge "Check policy implementation for shares api." commit 14301ca01e910a5ee4c60d404e3527ed5c04d978 Author: 119Vik Date: Tue Sep 24 09:54:12 2013 -0400 Added XML serialization for access actions Resources related to share access management hasn't xml serrialisation so it returns incorrect response for xml requests. XML temptates constructors added to share_actions.py ato solve this problem Change-Id: Ie76e768fc696c15dac4f18757ffcfe84335e04ce commit e03434198201e7fbd2ba39892adb46a5066b1cc5 Author: 119Vik Date: Wed Sep 25 04:54:43 2013 -0400 Check policy implementation for shares api. At this moment shares api don't check policy for most actions. Special check policy decorator added for necessary methods at api.py, to solve this problem. Change-Id: I253bda9cf3219343779d62d67fd0386a7c2f3b05 Fixes: Bug#1223375 commit b5c58d8c0b93e0921fd31bbdb845d5788b202b4c Author: Vijay Bellur Date: Wed Sep 25 09:40:06 2013 +0530 Update README with relevant Manila information. Change-Id: I3cce6498ff6aa9c6a8a1395d2703a92daaf04c81 commit 5d610c2d836d084fb476c087146f51f3dc8c455c Author: 119Vik Date: Tue Sep 24 10:47:50 2013 -0400 Fix xml response content for share list/show For now manila list and manila show requests return limited data for xml version. make_share method in shares.py updated to avoid data limiting during xml serialization. Change-Id: I9b241f02e1bbf46f876f235df4ce052490b8aeab commit 36adea3b26b25f4ddef7c7a75a6333b797aedeb3 Author: Yulia Portnova Date: Mon Sep 23 14:22:13 2013 +0300 Add .gitreview file. Change-Id: I895c6c4c897488e6aef5e60215e7abfd2f65fc90 commit 36a0e447a151836517c31964e9af5eb6f9a33003 Author: Yulia Portnova Date: Tue Sep 24 12:39:30 2013 +0300 Unittests failure fix Changed test.integrated.test_login. Modified policay.json Implemented FakeShareDriver Fixes bug 1229634 Change-Id: I7485c3695e209d4314d3a8b197dbffbb49d9e0d2 commit cfe530f114e86d3733f70010d62b010da079d063 Author: Yulia Portnova Date: Thu Sep 19 11:56:07 2013 +0300 Fixed snapshot_id None for share commit 6736d36bd6405f26f465ac057240d54acf45e509 Author: Yulia Portnova Date: Thu Sep 19 11:17:13 2013 +0300 Quota releasing on snapshot deleting bug fixed. commit a0ed749e13cf1b3786dfb5f1f1093033cbd057ab Author: Yulia Portnova Date: Wed Sep 18 16:52:05 2013 +0300 Fixed absolute limits. commit 27b3074627c9a8e8c8633cc4c544348fde3a505e Author: Yulia Portnova Date: Wed Sep 18 15:30:22 2013 +0300 fixed pep8 commit 5d8c002f23db4c919880418be79dd7981b408127 Author: Yulia Portnova Date: Wed Sep 18 12:27:54 2013 +0300 Stubed driver do_setup in start_service commit 83f2482cae409bf10e9a7d0092cbee5ee9d2a1ce Author: Yulia Portnova Date: Tue Sep 17 15:18:47 2013 +0300 Quota tests fixed commit 4cdec7dda8191ecd77acceb1f4d7681aba9142d7 Author: Yulia Portnova Date: Tue Sep 17 12:28:01 2013 +0300 removed egg-info commit 9a68e91f9e612d4f2531b0b3ca88aa8602b69fb8 Author: ubu Date: Tue Sep 17 05:24:40 2013 -0400 modified conf sample commit 894157512dbdb870c8eed88140a169d05a939920 Author: Yulia Portnova Date: Tue Sep 17 12:21:17 2013 +0300 modified docs commit 9169fc311e6793bb140ad86a758695b19c9a25f2 Author: Yulia Portnova Date: Tue Sep 17 10:57:47 2013 +0300 docs commit 901d841a234afb75155a74ef67055ff5f0f746a9 Author: Yulia Portnova Date: Mon Sep 16 16:36:35 2013 +0300 snapshot view, size added commit d20f8e92ab26b82d69a98d4d869899345c1554d3 Author: Yulia Portnova Date: Mon Sep 16 10:55:45 2013 +0300 quotas for snapshot commit 6026fc024401662f3a68064bc142cb93def937c5 Author: Yulia Portnova Date: Mon Sep 16 10:53:59 2013 +0300 fixed api error commit b3091cc0765f30e80f62dccf1ad59b1ae73b5c4e Author: Yulia Portnova Date: Mon Sep 16 10:10:00 2013 +0300 snapshot size commit aac5b4374a8b27fce3b025b2b0ea011bf8526983 Merge: 86bc890 69f3128 Author: 119Vik Date: Thu Sep 12 15:58:16 2013 +0300 Merge branch 'cinder-to-manila' of github.com:bswartz/manila into cinder-to-manila commit 86bc890f3b1866c94f79e518be1a83161ce1a4d4 Author: 119Vik Date: Thu Sep 12 15:58:01 2013 +0300 fixed TYPO commit 69f3128dbc643abc3d8c27aa62c7a5cab9eaf8fa Author: Yulia Portnova Date: Thu Sep 12 15:16:46 2013 +0300 Access create empty boy fix commit c115b5843bbaef3bd0ecb694b540666d0d9f01bc Author: Yulia Portnova Date: Thu Sep 12 14:53:54 2013 +0300 User cannot delete snapshot fix commit 2e2491c97e5924b15d1115055c96323e0d11edae Author: Yulia Portnova Date: Thu Sep 12 10:27:43 2013 +0300 Can not delete share with error status fixed commit 19e557f29d640bc09e5919a19d85a6949db3e1ba Merge: 17274ef 52cd007 Author: 119Vik Date: Thu Sep 12 15:12:28 2013 +0300 Merge branch '119vik/bugfix' into cinder-to-manila commit 52cd007ad544d516d65c42a41d787274be8cf8ee Author: 119Vik Date: Thu Sep 12 12:39:35 2013 +0300 response status for share with snapshot delete request - fixed. commit a2c5ae0ed0eb0ed68a13cc650ad46bdf52f1dcad Author: 119Vik Date: Thu Sep 12 12:22:55 2013 +0300 fixed null value validation for snapshot id commit 17274ef2d05f69cbd63acb1508ef4354bf090743 Author: 119Vik Date: Wed Sep 11 17:19:50 2013 +0300 fixed share temaplate name commit c0e15af7fba670750d5f417f7f181bb811fce3dc Author: 119Vik Date: Wed Sep 11 16:15:22 2013 +0300 fixed share snapshots commit 9fde2b23acef2158a73ba82d1558480208ad1c5f Author: Yulia Portnova Date: Wed Sep 11 14:58:39 2013 +0300 pep8 fix commit 7f8be1b66a0be8125e75d6a3d0a2b3f53bf252bf Author: Yulia Portnova Date: Wed Sep 11 12:39:12 2013 +0300 License flake8 error fixed commit 4f5dde7db5771bbe2ad0595a909c0f1e8dae1fe0 Author: Yulia Portnova Date: Wed Sep 11 12:30:10 2013 +0300 Fixed flake8 errors commit 0d33cb038fbedf32dc2f7237cd586ffdb4cd76f6 Author: Yulia Portnova Date: Wed Sep 11 12:29:22 2013 +0300 Api share-snapshots to snapshots commit 3683f7feb9cfda46fbed11ae82b65d759fd90569 Author: Yulia Portnova Date: Tue Sep 10 16:36:10 2013 +0300 Removed unused imports commit 9a5c3791763716a12015458453df0e9c959bee45 Author: Yulia Portnova Date: Tue Sep 10 15:58:41 2013 +0300 Fixed api tests commit 1a253971c241bcea339cb6281e2958184a5eb269 Author: Yulia Portnova Date: Tue Sep 10 13:25:30 2013 +0300 Removed v2 api. Moved shares and snapshots from contrib to v1 commit 68cbc773de543937b4a98e94345c6a9a1fbcce4f Author: Yulia Portnova Date: Tue Sep 10 12:06:02 2013 +0300 quotas exception fix commit a41117df0532cffe3dc14b52d36c30f617bb1a6c Author: Yulia Portnova Date: Tue Sep 10 11:04:04 2013 +0300 Quotas fix commit 01ee3a3279ca8036be4e44dcac23494a73d1dfdf Author: Yulia Portnova Date: Tue Sep 10 11:03:42 2013 +0300 Deleted api v2 commit 68c71d8de219cc90a46cc7a918c91240620d4e3b Author: Yulia Portnova Date: Fri Sep 6 15:37:58 2013 +0300 Quotas fixed. quotas unittests fixed. commit 06d200f0aa3339a78a13298b3109d9a3935ab1e0 Author: Yulia Portnova Date: Fri Sep 6 10:56:03 2013 +0300 Removed ubused unittests. commit 65d4daa3b8ef4a5b3ba11a719e653859c8b30792 Author: Yulia Portnova Date: Thu Sep 5 16:44:11 2013 +0300 fixed fake flags commit 35fe8d3b7fe48a2c50608005229d33937c29e887 Author: Yulia Portnova Date: Thu Sep 5 16:41:32 2013 +0300 Removed volume specific tests commit a005c85ef1851c06019f1d455095a55ce0bf6d88 Merge: b2b51fb cc3f4fa Author: Yulia Portnova Date: Thu Sep 5 15:29:49 2013 +0300 merge commit b2b51fb29ff14f16bb136ddb0dcd932b1e7616b3 Author: Yulia Portnova Date: Thu Sep 5 15:27:46 2013 +0300 Mass replace osapi_volume to osapi_share Removed locale commit 59411bf710a5d6af3817bd67c1736a368bf48d1a Author: ubu Date: Thu Sep 5 08:14:22 2013 -0400 Update connfig.sample scripts commit cc3f4faaf846941651e8d728be2d60a19686fbcb Author: ubu Date: Thu Sep 5 08:14:22 2013 -0400 Update connfig.sample scripts commit f367977a0b46ecabcfaad8c88064a01a685b7219 Author: Yulia Portnova Date: Thu Sep 5 11:37:51 2013 +0300 Removed unused opts from flags.py commit 84aa8c2deeb02f29ecb9c16b5025f4ff8b172e52 Author: Yulia Portnova Date: Thu Sep 5 11:33:10 2013 +0300 removed some volume occurances commit 640b4b917635d538aab982a49a5e7b6d70680dd0 Author: Yulia Portnova Date: Thu Sep 5 11:16:08 2013 +0300 removed block specific exceptions commit c67fc2da7a0648eb302b6ef02ae92fcd48201414 Author: Yulia Portnova Date: Thu Sep 5 11:02:21 2013 +0300 osapi_volume to osapi_share commit 0016763230a57a2921d79c23a51d670bc28d1a71 Author: Yulia Portnova Date: Thu Sep 5 11:01:29 2013 +0300 removed volumes from bin scripts commit 468b5bfe3fa7829079fb85168ef3067f139eec4d Author: Yulia Portnova Date: Thu Sep 5 10:36:31 2013 +0300 Added help to smb_config_path conf commit 22da3e6479dfc3553b66bdae78efc0c2ab2fa335 Author: Yulia Portnova Date: Thu Sep 5 10:21:25 2013 +0300 modified fake flags commit 65d8167a0f247026760fb1ff1901454ccbf677f9 Author: Yulia Portnova Date: Thu Sep 5 10:20:33 2013 +0300 deleted brick commit c651f3f5f95654c4a0b69b8b508e6303a5754a2d Author: 119Vik Date: Wed Sep 4 17:08:27 2013 +0300 fixed manila manage commit 3daf82a5d9114b44e9fb2245b14659dfcba96f29 Author: Yulia Portnova Date: Wed Sep 4 15:51:05 2013 +0300 api-paste.ini: osapi_volume to osapi-share commit 4e2f27c11ab1d201e6dab05a73977c1ba5eea4e9 Author: Yulia Portnova Date: Wed Sep 4 15:44:38 2013 +0300 Replaced cinder with manila commit 5748e0dd298a6ecdc1bcd746cd066eec86f47745 Author: Yulia Portnova Date: Wed Sep 4 12:05:44 2013 +0300 Renamed service api config opts. Set default port to 8786 commit 49a5d0b46113ce75568fa44b129d6599df49ec23 Author: Yulia Portnova Date: Wed Sep 4 10:44:52 2013 +0300 removed volumes from scheduler commit 373bea0fe19e3640578a27193195276b0bde5273 Author: Yulia Portnova Date: Wed Sep 4 10:21:21 2013 +0300 deleteted .idea, added .gitignore commit c112a4115171ef0f5d4e559ab1338593493c9c3b Author: Yulia Portnova Date: Wed Sep 4 10:15:04 2013 +0300 volume api removed commit 7ba0c66512d4d9bb91f8106f85d684d61580ff71 Author: Yulia Portnova Date: Wed Sep 4 10:01:13 2013 +0300 fixed keystone context commit d02e3156a2f0687b9a204e40efba695a4463b150 Author: Yulia Portnova Date: Tue Sep 3 15:57:55 2013 +0300 api fix commit bb79e54fa136596a69d29a9b1a47ce62deda3079 Author: Yulia Portnova Date: Tue Sep 3 12:00:14 2013 +0300 Removed backups commit 1dde8fb5cfda07121ca7a702dc632553456eef15 Author: Yulia Portnova Date: Tue Sep 3 11:14:00 2013 +0300 DB cleaned commit 439855f1e844feb937ab5fe42e4cc3ca281ccfcc Author: Yulia Portnova Date: Tue Sep 3 10:40:38 2013 +0300 Removed SM models and migrations commit e1943b3d6a7581109c7eada6126eb378c26ae46b Author: Yulia Portnova Date: Mon Sep 2 15:35:50 2013 +0300 Modified models commit ee021608540f57a1cfc7aed19fcc98d37a7db4ef Author: Yulia Portnova Date: Mon Sep 2 15:05:13 2013 +0300 Modified migrations commit b8483b2644cf58e6ab20f796182a502ae02e78d1 Author: Yulia Portnova Date: Mon Sep 2 14:07:20 2013 +0300 Removed block-specific from DB api commit 9b20ae70ce4760f8a46f7b0ff9c78b508db2cc75 Author: Yulia Portnova Date: Mon Sep 2 10:43:46 2013 +0300 Deleted manila.volume commit dc4ce932ed304a2c1dbe68059bdea664792db67f Author: Yulia Portnova Date: Mon Sep 2 09:59:07 2013 +0300 Renamed cinder to manila. Fixed setup.py, fixed bin scripts. commit f99ef92c9024b1dfc604646c8c0912b963c11523 Author: Ben Swartzlander Date: Thu Aug 8 10:34:06 2013 -0400 Initialize from cinder commit 0483210d09f291dfef304d620a12f3d22550d56d Author: bswartz Date: Thu Aug 8 07:30:20 2013 -0700 Initial commitmanila-2013.2.dev175.gbf1a399/manila/0000775000175000017500000000000012301410516016750 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/wsgi.py0000664000175000017500000003752512301410454020310 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" import errno import os import socket import ssl import sys import time import eventlet import eventlet.wsgi import greenlet from oslo.config import cfg from paste import deploy import routes.middleware import webob.dec import webob.exc from manila import exception from manila.openstack.common import log as logging from manila import utils socket_opts = [ cfg.IntOpt('backlog', default=4096, help="Number of backlog requests to configure the socket with"), cfg.IntOpt('tcp_keepidle', default=600, help="Sets the value of TCP_KEEPIDLE in seconds for each " "server socket. Not supported on OS X."), cfg.StrOpt('ssl_ca_file', default=None, help="CA certificate file to use to verify " "connecting clients"), cfg.StrOpt('ssl_cert_file', default=None, help="Certificate file to use when starting " "the server securely"), cfg.StrOpt('ssl_key_file', default=None, help="Private key file to use when starting " "the server securely"), ] CONF = cfg.CONF CONF.register_opts(socket_opts) CONF = cfg.CONF LOG = logging.getLogger(__name__) class Server(object): """Server class to manage a WSGI server, serving a WSGI application.""" default_pool_size = 1000 def __init__(self, name, app, host=None, port=None, pool_size=None, protocol=eventlet.wsgi.HttpProtocol): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :returns: None """ self.name = name self.app = app self._host = host or "0.0.0.0" self._port = port or 0 self._server = None self._socket = None self._protocol = protocol self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) self._logger = logging.getLogger("eventlet.wsgi.server") self._wsgi_logger = logging.WritableLogger(self._logger) def _get_socket(self, host, port, backlog): bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET cert_file = CONF.ssl_cert_file key_file = CONF.ssl_key_file ca_file = CONF.ssl_ca_file use_ssl = cert_file or key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError(_("Unable to find key_file : %s") % key_file) if use_ssl and (not cert_file or not key_file): raise RuntimeError(_("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) def wrap_ssl(sock): ssl_kwargs = { 'server_side': True, 'certfile': cert_file, 'keyfile': key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.ssl_ca_file: ssl_kwargs['ca_certs'] = ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED return ssl.wrap_socket(sock, **ssl_kwargs) sock = None retry_until = time.time() + 30 while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=backlog, family=family) if use_ssl: sock = wrap_ssl(sock) except socket.error, err: if err.args[0] != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s " "after trying for 30 seconds") % {'host': host, 'port': port}) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) return sock def _start(self): """Run the blocking eventlet WSGI server. :returns: None """ eventlet.wsgi.server(self._socket, self.app, protocol=self._protocol, custom_pool=self._pool, log=self._wsgi_logger) def start(self, backlog=128): """Start serving a WSGI application. :param backlog: Maximum number of queued connections. :returns: None :raises: manila.exception.InvalidInput """ if backlog < 1: raise exception.InvalidInput( reason='The backlog must be more than 1') self._socket = self._get_socket(self._host, self._port, backlog=backlog) self._server = eventlet.spawn(self._start) (self._host, self._port) = self._socket.getsockname()[0:2] LOG.info(_("Started %(name)s on %(_host)s:%(_port)s") % self.__dict__) @property def host(self): return self._host @property def port(self): return self._port def stop(self): """Stop this server. This is not a very nice action, as currently the method by which a server is stopped is by killing its eventlet. :returns: None """ LOG.info(_("Stopping WSGI server.")) self._server.kill() def wait(self): """Block, until the server has stopped. Waits on the server's eventlet to finish, then returns. :returns: None """ try: self._server.wait() except greenlet.GreenletExit: LOG.info(_("WSGI server has stopped.")) class Request(webob.Request): pass class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = manila.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import manila.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(detail='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or or or) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): """Base WSGI middleware. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = manila.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import manila.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory def __init__(self, application): self.application = application def process_request(self, req): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) class Debug(Middleware): """Helper class for debugging a WSGI application. Can be inserted into any WSGI application chain to get information about the request and response. """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): print ('*' * 40) + ' REQUEST ENVIRON' for key, value in req.environ.items(): print key, '=', value print resp = req.get_response(self.application) print ('*' * 40) + ' RESPONSE HEADERS' for (key, value) in resp.headers.iteritems(): print key, '=', value print resp.app_iter = self.print_generator(resp.app_iter) return resp @staticmethod def print_generator(app_iter): """Iterator that prints the contents of a wrapper string.""" print ('*' * 40) + ' BODY' for part in app_iter: sys.stdout.write(part) sys.stdout.flush() yield part print class Router(object): """WSGI middleware that maps incoming requests to WSGI apps.""" def __init__(self, mapper): """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be an object that can route the request to the action-specific method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, '/svrlist', controller=sc, action='list') # Actions are all implicitly defined mapper.resource('server', 'servers', controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Route the incoming request to a controller based on self.map. If no match, return a 404. """ return self._router @staticmethod @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): """Dispatch the request to the appropriate controller. Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return webob.exc.HTTPNotFound() app = match['controller'] return app class Loader(object): """Used to load WSGI applications from paste configurations.""" def __init__(self, config_path=None): """Initialize the loader, and attempt to find the config. :param config_path: Full or relative path to the paste config. :returns: None """ config_path = config_path or CONF.api_paste_config self.config_path = utils.find_config(config_path) def load_app(self, name): """Return the paste URLMap wrapped WSGI application. :param name: Name of the application to load. :returns: Paste URLMap object wrapping the requested application. :raises: `manila.exception.PasteAppNotFound` """ try: return deploy.loadapp("config:%s" % self.config_path, name=name) except LookupError as err: LOG.error(err) raise exception.PasteAppNotFound(name=name, path=self.config_path) manila-2013.2.dev175.gbf1a399/manila/api/0000775000175000017500000000000012301410516017521 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/versions.py0000664000175000017500000002145312301410454021751 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from lxml import etree from manila.api.openstack import wsgi from manila.api.views import versions as views_versions from manila.api import xmlutil from oslo.config import cfg CONF = cfg.CONF _KNOWN_VERSIONS = { "v2.0": { "id": "v2.0", "status": "CURRENT", "updated": "2012-11-21T11:33:21Z", "links": [ { "rel": "describedby", "type": "application/pdf", "href": "http://jorgew.github.com/block-storage-api/" "content/os-block-storage-1.0.pdf", }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", #(anthony) FIXME "href": "http://docs.rackspacecloud.com/" "servers/api/v1.1/application.wadl", }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.volume+xml;version=1", }, { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=1", } ], }, "v1.0": { "id": "v1.0", "status": "CURRENT", "updated": "2012-01-04T11:33:21Z", "links": [ { "rel": "describedby", "type": "application/pdf", "href": "http://jorgew.github.com/block-storage-api/" "content/os-block-storage-1.0.pdf", }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", #(anthony) FIXME "href": "http://docs.rackspacecloud.com/" "servers/api/v1.1/application.wadl", }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.volume+xml;version=1", }, { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=1", } ], } } def get_supported_versions(): versions = {} if CONF.enable_v1_api: versions['v1.0'] = _KNOWN_VERSIONS['v1.0'] if CONF.enable_v2_api: versions['v2.0'] = _KNOWN_VERSIONS['v2.0'] return versions class MediaTypesTemplateElement(xmlutil.TemplateElement): def will_render(self, datum): return 'media-types' in datum def make_version(elem): elem.set('id') elem.set('status') elem.set('updated') mts = MediaTypesTemplateElement('media-types') elem.append(mts) mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') mt.set('base') mt.set('type') xmlutil.make_links(elem, 'links') version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} class VersionTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('version', selector='version') make_version(root) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class VersionsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('versions') elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') make_version(elem) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class ChoicesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('choices') elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') make_version(elem) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class AtomSerializer(wsgi.XMLDictSerializer): NSMAP = {None: xmlutil.XMLNS_ATOM} def __init__(self, metadata=None, xmlns=None): self.metadata = metadata or {} if not xmlns: self.xmlns = wsgi.XMLNS_ATOM else: self.xmlns = xmlns def _get_most_recent_update(self, versions): recent = None for version in versions: updated = datetime.datetime.strptime(version['updated'], '%Y-%m-%dT%H:%M:%SZ') if not recent: recent = updated elif updated > recent: recent = updated return recent.strftime('%Y-%m-%dT%H:%M:%SZ') def _get_base_url(self, link_href): # Make sure no trailing / link_href = link_href.rstrip('/') return link_href.rsplit('/', 1)[0] + '/' def _create_feed(self, versions, feed_title, feed_id): feed = etree.Element('feed', nsmap=self.NSMAP) title = etree.SubElement(feed, 'title') title.set('type', 'text') title.text = feed_title # Set this updated to the most recently updated version recent = self._get_most_recent_update(versions) etree.SubElement(feed, 'updated').text = recent etree.SubElement(feed, 'id').text = feed_id link = etree.SubElement(feed, 'link') link.set('rel', 'self') link.set('href', feed_id) author = etree.SubElement(feed, 'author') etree.SubElement(author, 'name').text = 'Rackspace' etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' for version in versions: feed.append(self._create_version_entry(version)) return feed def _create_version_entry(self, version): entry = etree.Element('entry') etree.SubElement(entry, 'id').text = version['links'][0]['href'] title = etree.SubElement(entry, 'title') title.set('type', 'text') title.text = 'Version %s' % version['id'] etree.SubElement(entry, 'updated').text = version['updated'] for link in version['links']: link_elem = etree.SubElement(entry, 'link') link_elem.set('rel', link['rel']) link_elem.set('href', link['href']) if 'type' in link: link_elem.set('type', link['type']) content = etree.SubElement(entry, 'content') content.set('type', 'text') content.text = 'Version %s %s (%s)' % (version['id'], version['status'], version['updated']) return entry class VersionsAtomSerializer(AtomSerializer): def default(self, data): versions = data['versions'] feed_id = self._get_base_url(versions[0]['links'][0]['href']) feed = self._create_feed(versions, 'Available API Versions', feed_id) return self._to_xml(feed) class VersionAtomSerializer(AtomSerializer): def default(self, data): version = data['version'] feed_id = version['links'][0]['href'] feed = self._create_feed([version], 'About This Version', feed_id) return self._to_xml(feed) class Versions(wsgi.Resource): def __init__(self): super(Versions, self).__init__(None) @wsgi.serializers(xml=VersionsTemplate, atom=VersionsAtomSerializer) def index(self, req): """Return all versions.""" builder = views_versions.get_view_builder(req) return builder.build_versions(get_supported_versions()) @wsgi.serializers(xml=ChoicesTemplate) @wsgi.response(300) def multi(self, req): """Return multiple choices.""" builder = views_versions.get_view_builder(req) return builder.build_choices(get_supported_versions(), req) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" args = {} if request_environment['PATH_INFO'] == '/': args['action'] = 'index' else: args['action'] = 'multi' return args class ShareVersionV1(object): @wsgi.serializers(xml=VersionTemplate, atom=VersionAtomSerializer) def show(self, req): builder = views_versions.get_view_builder(req) return builder.build_version(_KNOWN_VERSIONS['v1.0']) def create_resource(): return wsgi.Resource(ShareVersionV1()) manila-2013.2.dev175.gbf1a399/manila/api/schemas/0000775000175000017500000000000012301410516021144 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/schemas/atom-link.rng0000664000175000017500000000700112301410454023546 0ustar chuckchuck00000000000000 1 [^:]* .+/.+ [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* xml:base xml:lang manila-2013.2.dev175.gbf1a399/manila/api/schemas/v1.1/0000775000175000017500000000000012301410516021631 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/schemas/v1.1/extensions.rng0000664000175000017500000000032212301410454024536 0ustar chuckchuck00000000000000 manila-2013.2.dev175.gbf1a399/manila/api/schemas/v1.1/extension.rng0000664000175000017500000000072112301410454024356 0ustar chuckchuck00000000000000 manila-2013.2.dev175.gbf1a399/manila/api/schemas/v1.1/metadata.rng0000664000175000017500000000043512301410454024124 0ustar chuckchuck00000000000000 manila-2013.2.dev175.gbf1a399/manila/api/schemas/v1.1/limits.rng0000664000175000017500000000172312301410454023646 0ustar chuckchuck00000000000000 manila-2013.2.dev175.gbf1a399/manila/api/sizelimit.py0000664000175000017500000000217012301410454022105 0ustar chuckchuck00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.middleware import sizelimit from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter): def __init__(self, *args, **kwargs): LOG.warn(_('manila.api.sizelimit:RequestBodySizeLimiter is ' 'deprecated. Please use manila.api.middleware.sizelimit:' 'RequestBodySizeLimiter instead')) super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) manila-2013.2.dev175.gbf1a399/manila/api/auth.py0000664000175000017500000000257212301410454021043 0ustar chuckchuck00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.middleware import auth from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) class ManilaKeystoneContext(auth.ManilaKeystoneContext): def __init__(self, application): LOG.warn(_('manila.api.auth:ManilaKeystoneContext is deprecated. ' 'Please use ' 'manila.api.middleware.auth:ManilaKeystoneContext ' 'instead.')) super(ManilaKeystoneContext, self).__init__(application) def pipeline_factory(loader, global_conf, **local_conf): LOG.warn(_('manila.api.auth:pipeline_factory is deprecated. Please use ' 'manila.api.middleware.auth:pipeline_factory instead.')) auth.pipeline_factory(loader, global_conf, **local_conf) manila-2013.2.dev175.gbf1a399/manila/api/openstack/0000775000175000017500000000000012301410516021510 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/openstack/wsgi.py0000664000175000017500000011540312301410454023040 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import math import time import webob from manila import exception from manila.openstack.common import jsonutils from manila.openstack.common import log as logging from manila import utils from manila import wsgi from lxml import etree from xml.dom import minidom from xml.parsers import expat XMLNS_V1 = 'http://docs.openstack.org/volume/api/v1' XMLNS_ATOM = 'http://www.w3.org/2005/Atom' LOG = logging.getLogger(__name__) # The vendor content types should serialize identically to the non-vendor # content types. So to avoid littering the code with both options, we # map the vendor to the other when looking up the type _CONTENT_TYPE_MAP = { 'application/vnd.openstack.volume+json': 'application/json', 'application/vnd.openstack.volume+xml': 'application/xml', } SUPPORTED_CONTENT_TYPES = ( 'application/json', 'application/vnd.openstack.volume+json', 'application/xml', 'application/vnd.openstack.volume+xml', ) _MEDIA_TYPE_MAP = { 'application/vnd.openstack.volume+json': 'json', 'application/json': 'json', 'application/vnd.openstack.volume+xml': 'xml', 'application/xml': 'xml', 'application/atom+xml': 'atom', } class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def best_match_content_type(self): """Determine the requested response content-type.""" if 'manila.best_content_type' not in self.environ: # Calculate the best MIME type content_type = None # Check URL path suffix parts = self.path.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in SUPPORTED_CONTENT_TYPES: content_type = possible_type if not content_type: content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) self.environ['manila.best_content_type'] = (content_type or 'application/json') return self.environ['manila.best_content_type'] def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header """ if "Content-Type" not in self.headers: return None allowed_types = SUPPORTED_CONTENT_TYPES content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type=content_type) return content_type class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class TextDeserializer(ActionDispatcher): """Default request body deserialization""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class XMLDeserializer(TextDeserializer): def __init__(self, metadata=None): """ :param metadata: information needed to deserialize xml into a dictionary. """ super(XMLDeserializer, self).__init__() self.metadata = metadata or {} def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) try: node = utils.safe_minidom_parse_string(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} except expat.ExpatError: msg = _("cannot understand XML") raise exception.MalformedRequestBody(reason=msg) def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. :param listnames: list of XML node names whose subnodes should be considered list items. """ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: return node.childNodes[0].nodeValue elif node.nodeName in listnames: return [self._from_xml_node(n, listnames) for n in node.childNodes] else: result = dict() for attr in node.attributes.keys(): result[attr] = node.attributes[attr].nodeValue for child in node.childNodes: if child.nodeType != node.TEXT_NODE: result[child.nodeName] = self._from_xml_node(child, listnames) return result def find_first_child_named(self, parent, name): """Search a nodes children for the first child with a given name""" for node in parent.childNodes: if node.nodeName == name: return node return None def find_children_named(self, parent, name): """Return all of a nodes children who have the given name""" for node in parent.childNodes: if node.nodeName == name: yield node def extract_text(self, node): """Get the text field contained by the given node""" if len(node.childNodes) == 1: child = node.childNodes[0] if child.nodeType == child.TEXT_NODE: return child.nodeValue return "" def find_attribute_or_element(self, parent, name): """Get an attribute value; fallback to an element if not found""" if parent.hasAttribute(name): return parent.getAttribute(name) node = self.find_first_child_named(parent, name) if node: return self.extract_text(node) return None def default(self, datastring): return {'body': self._from_xml(datastring)} class MetadataXMLDeserializer(XMLDeserializer): def extract_metadata(self, metadata_node): """Marshal the metadata attribute of a parsed request""" metadata = {} if metadata_node is not None: for meta_node in self.find_children_named(metadata_node, "meta"): key = meta_node.getAttribute("key") metadata[key] = self.extract_text(meta_node) return metadata class DictSerializer(ActionDispatcher): """Default request body serialization""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization""" def default(self, data): return jsonutils.dumps(data) class XMLDictSerializer(DictSerializer): def __init__(self, metadata=None, xmlns=None): """ :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} self.xmlns = xmlns def default(self, data): # We expect data to contain a single key which is the XML root. root_key = data.keys()[0] doc = minidom.Document() node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) return self.to_xml_string(node) def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) return node.toxml('UTF-8') #NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking def _add_xmlns(self, node, has_atom=False): if self.xmlns is not None: node.setAttribute('xmlns', self.xmlns) if has_atom: node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") def _to_xml_node(self, doc, metadata, nodename, data): """Recursive method to convert data members to XML nodes.""" result = doc.createElement(nodename) # Set the xml namespace if one is specified # TODO(justinsb): We could also use prefixes on the keys xmlns = metadata.get('xmlns', None) if xmlns: result.setAttribute('xmlns', xmlns) #TODO(bcwaldon): accomplish this without a type-check if isinstance(data, list): collections = metadata.get('list_collections', {}) if nodename in collections: metadata = collections[nodename] for item in data: node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(item)) result.appendChild(node) return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): singular = nodename[:-1] else: singular = 'item' for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) #TODO(bcwaldon): accomplish this without a type-check elif isinstance(data, dict): collections = metadata.get('dict_collections', {}) if nodename in collections: metadata = collections[nodename] for k, v in data.items(): node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(k)) text = doc.createTextNode(str(v)) node.appendChild(text) result.appendChild(node) return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in data.items(): if k in attrs: result.setAttribute(k, str(v)) else: node = self._to_xml_node(doc, metadata, k, v) result.appendChild(node) else: # Type is atom node = doc.createTextNode(str(data)) result.appendChild(node) return result def _create_link_nodes(self, xml_doc, links): link_nodes = [] for link in links: link_node = xml_doc.createElement('atom:link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) if 'type' in link: link_node.setAttribute('type', link['type']) link_nodes.append(link_node) return link_nodes def _to_xml(self, root): """Convert the xml object to an xml string.""" return etree.tostring(root, encoding='UTF-8', xml_declaration=True) def serializers(**serializers): """Attaches serializers to a method. This decorator associates a dictionary of serializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_serializers'): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator def deserializers(**deserializers): """Attaches deserializers to a method. This decorator associates a dictionary of deserializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_deserializers'): func.wsgi_deserializers = {} func.wsgi_deserializers.update(deserializers) return func return decorator def response(code): """Attaches response code to a method. This decorator associates a response code with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): func.wsgi_code = code return func return decorator class ResponseObject(object): """Bundles a response object with appropriate serializers. Object that app methods may return in order to bind alternate serializers with a response object to be serialized. Its use is optional. """ def __init__(self, obj, code=None, **serializers): """Binds serializers with an object. Takes keyword arguments akin to the @serializer() decorator for specifying serializers. Serializers specified will be given preference over default serializers or method-specific serializers on return. """ self.obj = obj self.serializers = serializers self._default_code = 200 self._code = code self._headers = {} self.serializer = None self.media_type = None def __getitem__(self, key): """Retrieves a header with the given name.""" return self._headers[key.lower()] def __setitem__(self, key, value): """Sets a header with the given name to the given value.""" self._headers[key.lower()] = value def __delitem__(self, key): """Deletes the header with the given name.""" del self._headers[key.lower()] def _bind_method_serializers(self, meth_serializers): """Binds method serializers with the response object. Binds the method serializers with the response object. Serializers specified to the constructor will take precedence over serializers specified to this method. :param meth_serializers: A dictionary with keys mapping to response types and values containing serializer objects. """ # We can't use update because that would be the wrong # precedence for mtype, serializer in meth_serializers.items(): self.serializers.setdefault(mtype, serializer) def get_serializer(self, content_type, default_serializers=None): """Returns the serializer for the wrapped object. Returns the serializer for the wrapped object subject to the indicated content type. If no serializer matching the content type is attached, an appropriate serializer drawn from the default serializers will be used. If no appropriate serializer is available, raises InvalidContentType. """ default_serializers = default_serializers or {} try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in self.serializers: return mtype, self.serializers[mtype] else: return mtype, default_serializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def preserialize(self, content_type, default_serializers=None): """Prepares the serializer that will be used to serialize. Determines the serializer that will be used and prepares an instance of it for later call. This allows the serializer to be accessed by extensions for, e.g., template extension. """ mtype, serializer = self.get_serializer(content_type, default_serializers) self.media_type = mtype self.serializer = serializer() def attach(self, **kwargs): """Attach slave templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) def serialize(self, request, content_type, default_serializers=None): """Serializes the wrapped object. Utility method for serializing the wrapped object. Returns a webob.Response object. """ if self.serializer: serializer = self.serializer else: _mtype, _serializer = self.get_serializer(content_type, default_serializers) serializer = _serializer() response = webob.Response() response.status_int = self.code for hdr, value in self._headers.items(): response.headers[hdr] = value response.headers['Content-Type'] = content_type if self.obj is not None: response.body = serializer.serialize(self.obj) return response @property def code(self): """Retrieve the response status.""" return self._code or self._default_code @property def headers(self): """Retrieve the headers.""" return self._headers.copy() def action_peek_json(body): """Determine action to invoke.""" try: decoded = jsonutils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(reason=msg) # Return the action and the decoded body... return decoded.keys()[0] def action_peek_xml(body): """Determine action to invoke.""" dom = utils.safe_minidom_parse_string(body) action_node = dom.childNodes[0] return action_node.tagName class ResourceExceptionHandler(object): """Context manager to handle Resource exceptions. Used when processing exceptions generated by API implementation methods (or their extensions). Converts most exceptions to Fault exceptions, with the appropriate logging. """ def __enter__(self): return None def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.NotAuthorized): msg = unicode(ex_value) raise Fault(webob.exc.HTTPForbidden(explanation=msg)) elif isinstance(ex_value, exception.Invalid): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=unicode(ex_value))) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error(_( 'Exception handling resource: %s') % ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info(_("Fault thrown: %s"), unicode(ex_value)) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) raise Fault(ex_value) # We didn't handle the exception return False class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. Exceptions derived from webob.exc.HTTPException will be automatically wrapped in Fault() to provide API friendly error responses. """ def __init__(self, controller, action_peek=None, **deserializers): """ :param controller: object that implement methods created by routes lib :param action_peek: dictionary of routines for peeking into an action request body to determine the desired action """ self.controller = controller default_deserializers = dict(xml=XMLDeserializer, json=JSONDeserializer) default_deserializers.update(deserializers) self.default_deserializers = default_deserializers self.default_serializers = dict(xml=XMLDictSerializer, json=JSONDictSerializer) self.action_peek = dict(xml=action_peek_xml, json=action_peek_json) self.action_peek.update(action_peek or {}) # Copy over the actions dictionary self.wsgi_actions = {} if controller: self.register_actions(controller) # Save a mapping of extensions self.wsgi_extensions = {} self.wsgi_action_extensions = {} def register_actions(self, controller): """Registers controller actions with this resource.""" actions = getattr(controller, 'wsgi_actions', {}) for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(controller, method_name) def register_extensions(self, controller): """Registers controller extensions with this resource.""" extensions = getattr(controller, 'wsgi_extensions', []) for method_name, action_name in extensions: # Look up the extending method extension = getattr(controller, method_name) if action_name: # Extending an action... if action_name not in self.wsgi_action_extensions: self.wsgi_action_extensions[action_name] = [] self.wsgi_action_extensions[action_name].append(extension) else: # Extending a regular method if method_name not in self.wsgi_extensions: self.wsgi_extensions[method_name] = [] self.wsgi_extensions[method_name].append(extension) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" # NOTE(Vek): Check for get_action_args() override in the # controller if hasattr(self.controller, 'get_action_args'): return self.controller.get_action_args(request_environment) try: args = request_environment['wsgiorg.routing_args'][1].copy() except (KeyError, IndexError, AttributeError): return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args def get_body(self, request): try: content_type = request.get_content_type() except exception.InvalidContentType: LOG.debug(_("Unrecognized Content-Type provided in request")) return None, '' if not content_type: LOG.debug(_("No Content-Type provided in request")) return None, '' if len(request.body) <= 0: LOG.debug(_("Empty body provided in request")) return None, '' return content_type, request.body def deserialize(self, meth, content_type, body): meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in meth_deserializers: deserializer = meth_deserializers[mtype] else: deserializer = self.default_deserializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) return deserializer().deserialize(body) def pre_process_extensions(self, extensions, request, action_args): # List of callables for post-processing extensions post = [] for ext in extensions: if inspect.isgeneratorfunction(ext): response = None # If it's a generator function, the part before the # yield is the preprocessing stage try: with ResourceExceptionHandler(): gen = ext(req=request, **action_args) response = gen.next() except Fault as ex: response = ex # We had a response... if response: return response, [] # No response, queue up generator for post-processing post.append(gen) else: # Regular functions only perform post-processing post.append(ext) # Run post-processing in the reverse order return None, reversed(post) def post_process_extensions(self, extensions, resp_obj, request, action_args): for ext in extensions: response = None if inspect.isgenerator(ext): # If it's a generator, run the second half of # processing try: with ResourceExceptionHandler(): response = ext.send(resp_obj) except StopIteration: # Normal exit of generator continue except Fault as ex: response = ex else: # Regular functions get post-processing... try: with ResourceExceptionHandler(): response = ext(req=request, resp_obj=resp_obj, **action_args) except Fault as ex: response = ex # We had a response... if response: return response return None @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info("%(method)s %(url)s" % {"method": request.method, "url": request.url}) # Identify the action, its arguments, and the requested # content type action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) content_type, body = self.get_body(request) accept = request.best_match_content_type() # NOTE(Vek): Splitting the function up this way allows for # auditing by external tools that wrap the existing # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. return self._process_stack(request, action, action_args, content_type, body, accept) def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('manila.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request url") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions(post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict except AttributeError, e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s") % msg_dict LOG.info(msg) return response def get_method(self, request, action, content_type, body): """Look up the action-specific method and its extensions.""" # Look up the method try: if not self.controller: meth = getattr(self, action) else: meth = getattr(self.controller, action) except AttributeError: if (not self.wsgi_actions or action not in ['action', 'create', 'delete']): # Propagate the error raise else: return meth, self.wsgi_extensions.get(action, []) if action == 'action': # OK, it's an action; figure out which action... mtype = _MEDIA_TYPE_MAP.get(content_type) action_name = self.action_peek[mtype](body) LOG.debug("Action body: %s" % body) else: action_name = action # Look up the action method return (self.wsgi_actions[action_name], self.wsgi_action_extensions.get(action_name, [])) def dispatch(self, method, request, action_args): """Dispatch a call to the action-specific method.""" return method(req=request, **action_args) def action(name): """Mark a function as an action. The given name will be taken as the action key in the body. This is also overloaded to allow extensions to provide non-extending definitions of create and delete operations. """ def decorator(func): func.wsgi_action = name return func return decorator def extends(*args, **kwargs): """Indicate a function extends an operation. Can be used as either:: @extends def index(...): pass or as:: @extends(action='resize') def _action_resize(...): pass """ def decorator(func): # Store enough information to find what we're extending func.wsgi_extends = (func.__name__, kwargs.get('action')) return func # If we have positional arguments, call the decorator if args: return decorator(*args) # OK, return the decorator instead return decorator class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds the wsgi_actions dictionary to the class.""" # Find all actions actions = {} extensions = [] # start with wsgi actions from base classes for base in bases: actions.update(getattr(base, 'wsgi_actions', {})) for key, value in cls_dict.items(): if not callable(value): continue if getattr(value, 'wsgi_action', None): actions[value.wsgi_action] = key elif getattr(value, 'wsgi_extends', None): extensions.append(value.wsgi_extends) # Add the actions and extensions to the class dict cls_dict['wsgi_actions'] = actions cls_dict['wsgi_extensions'] = extensions return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) class Controller(object): """Default controller.""" __metaclass__ = ControllerMetaclass _view_builder_class = None def __init__(self, view_builder=None): """Initialize controller with a view builder instance.""" if view_builder: self._view_builder = view_builder elif self._view_builder_class: self._view_builder = self._view_builder_class() else: self._view_builder = None @staticmethod def is_valid_body(body, entity_name): if not (body and entity_name in body): return False def is_dict(d): try: d.get(None) return True except AttributeError: return False if not is_dict(body[entity_name]): return False return True class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" _fault_names = {400: "badRequest", 401: "unauthorized", 403: "forbidden", 404: "itemNotFound", 405: "badMethod", 409: "conflictingRequest", 413: "overLimit", 415: "badMediaType", 501: "notImplemented", 503: "serviceUnavailable"} def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = exception.status_int @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") fault_data = { fault_name: { 'code': code, 'message': self.wrapped_exc.explanation}} if code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry # 'code' is an attribute on the fault tag itself metadata = {'attributes': {fault_name: 'code'}} xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) content_type = req.best_match_content_type() serializer = { 'application/xml': xml_serializer, 'application/json': JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type _set_request_id_header(req, self.wrapped_exc.headers) return self.wrapped_exc def __str__(self): return self.wrapped_exc.__str__() def _set_request_id_header(req, headers): context = req.environ.get('manila.context') if context: headers['x-compute-request-id'] = context.request_id class OverLimitFault(webob.exc.HTTPException): """ Rate-limited request response. """ def __init__(self, message, details, retry_time): """ Initialize new `OverLimitFault` with relevant information. """ hdrs = OverLimitFault._retry_after(retry_time) self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) self.content = { "overLimitFault": { "code": self.wrapped_exc.status_int, "message": message, "details": details, }, } @staticmethod def _retry_after(retry_time): delay = int(math.ceil(retry_time - time.time())) retry_after = delay if delay > 0 else 0 headers = {'Retry-After': '%d' % retry_after} return headers @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """ Return the wrapped exception with a serialized body conforming to our error format. """ content_type = request.best_match_content_type() metadata = {"attributes": {"overLimitFault": "code"}} xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) serializer = { 'application/xml': xml_serializer, 'application/json': JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) self.wrapped_exc.body = content return self.wrapped_exc manila-2013.2.dev175.gbf1a399/manila/api/openstack/volume/0000775000175000017500000000000012301410516023017 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/openstack/volume/versions.py0000664000175000017500000000201312301410454025236 0ustar chuckchuck00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import versions from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) class Versions(versions.Versions): def __init__(self): LOG.warn(_('manila.api.openstack.volume.versions.Versions is ' 'deprecated. Please use manila.api.versions.Versions ' 'instead.')) super(Versions, self).__init__() manila-2013.2.dev175.gbf1a399/manila/api/openstack/volume/__init__.py0000664000175000017500000000202312301410454025126 0ustar chuckchuck00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.v1.router import APIRouter as v1_router from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) class APIRouter(v1_router): def __init__(self, ext_mgr=None): LOG.warn(_('manila.api.openstack.volume:APIRouter is deprecated. ' 'Please use manila.api.v1.router:APIRouter instead.')) super(APIRouter, self).__init__(ext_mgr) manila-2013.2.dev175.gbf1a399/manila/api/openstack/__init__.py0000664000175000017500000001110112301410454023614 0ustar chuckchuck00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack API controllers. """ import routes from manila.api.middleware import fault from manila.api.openstack import wsgi from manila.openstack.common import log as logging from manila import utils from manila import wsgi as base_wsgi LOG = logging.getLogger(__name__) class APIMapper(routes.Mapper): def routematch(self, url=None, environ=None): if url is "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) class ProjectMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '{project_id}/' else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) class APIRouter(base_wsgi.Router): """ Routes requests on the OpenStack API to the appropriate controller and method. """ ExtensionManager = None # override in subclasses @classmethod def factory(cls, global_config, **local_config): """Simple paste factory, :class:`manila.wsgi.Router` doesn't have""" return cls() def __init__(self, ext_mgr=None): if ext_mgr is None: if self.ExtensionManager: ext_mgr = self.ExtensionManager() else: raise Exception(_("Must specify an ExtensionManager class")) mapper = ProjectMapper() self.resources = {} self._setup_routes(mapper, ext_mgr) self._setup_ext_routes(mapper, ext_mgr) self._setup_extensions(ext_mgr) super(APIRouter, self).__init__(mapper) def _setup_ext_routes(self, mapper, ext_mgr): for resource in ext_mgr.get_resources(): LOG.debug(_('Extended resource: %s'), resource.collection) wsgi_resource = wsgi.Resource(resource.controller) self.resources[resource.collection] = wsgi_resource kargs = dict( controller=wsgi_resource, collection=resource.collection_actions, member=resource.member_actions) if resource.parent: kargs['parent_resource'] = resource.parent mapper.resource(resource.collection, resource.collection, **kargs) if resource.custom_routes_fn: resource.custom_routes_fn(mapper, wsgi_resource) def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): ext_name = extension.extension.name collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning(_('Extension %(ext_name)s: Cannot extend ' 'resource %(collection)s: No such resource') % locals()) continue LOG.debug(_('Extension %(ext_name)s extending resource: ' '%(collection)s') % locals()) resource = self.resources[collection] resource.register_actions(controller) resource.register_extensions(controller) def _setup_routes(self, mapper, ext_mgr): raise NotImplementedError class FaultWrapper(fault.FaultWrapper): def __init__(self, application): LOG.warn(_('manila.api.openstack:FaultWrapper is deprecated. Please ' 'use manila.api.middleware.fault:FaultWrapper instead.')) super(FaultWrapper, self).__init__(application) manila-2013.2.dev175.gbf1a399/manila/api/openstack/urlmap.py0000664000175000017500000000177212301410454023372 0ustar chuckchuck00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import urlmap from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) def urlmap_factory(loader, global_conf, **local_conf): LOG.warn(_('manila.api.openstack.urlmap:urlmap_factory is deprecated. ' 'Please use manila.api.urlmap:urlmap_factory instead.')) urlmap.urlmap_factory(loader, global_conf, **local_conf) manila-2013.2.dev175.gbf1a399/manila/api/xmlutil.py0000664000175000017500000006765512301410454021615 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path from lxml import etree from manila import utils XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' XMLNS_ATOM = 'http://www.w3.org/2005/Atom' XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1' XMLNS_VOLUME_V2 = ('http://docs.openstack.org/api/openstack-volume/2.0/' 'content') XMLNS_SHARE_V1 = '' def validate_schema(xml, schema_name): if isinstance(xml, str): xml = etree.fromstring(xml) base_path = 'manila/api/schemas/v1.1/' if schema_name in ('atom', 'atom-link'): base_path = 'manila/api/schemas/' schema_path = os.path.join(utils.maniladir(), '%s%s.rng' % (base_path, schema_name)) schema_doc = etree.parse(schema_path) relaxng = etree.RelaxNG(schema_doc) relaxng.assertValid(xml) class Selector(object): """Selects datum to operate on from an object.""" def __init__(self, *chain): """Initialize the selector. Each argument is a subsequent index into the object. """ self.chain = chain def __repr__(self): """Return a representation of the selector.""" return "Selector" + repr(self.chain) def __call__(self, obj, do_raise=False): """Select a datum to operate on. Selects the relevant datum within the object. :param obj: The object from which to select the object. :param do_raise: If False (the default), return None if the indexed datum does not exist. Otherwise, raise a KeyError. """ # Walk the selector list for elem in self.chain: # If it's callable, call it if callable(elem): obj = elem(obj) else: # Use indexing try: obj = obj[elem] except (KeyError, IndexError): # No sense going any further if do_raise: # Convert to a KeyError, for consistency raise KeyError(elem) return None # Return the finally-selected object return obj def get_items(obj): """Get items in obj.""" return list(obj.items()) class EmptyStringSelector(Selector): """Returns the empty string if Selector would return None.""" def __call__(self, obj, do_raise=False): """Returns empty string if the selected value does not exist.""" try: return super(EmptyStringSelector, self).__call__(obj, True) except KeyError: return "" class ConstantSelector(object): """Returns a constant.""" def __init__(self, value): """Initialize the selector. :param value: The value to return. """ self.value = value def __repr__(self): """Return a representation of the selector.""" return repr(self.value) def __call__(self, _obj, _do_raise=False): """Select a datum to operate on. Returns a constant value. Compatible with Selector.__call__(). """ return self.value class TemplateElement(object): """Represent an element in the template.""" def __init__(self, tag, attrib=None, selector=None, subselector=None, **extra): """Initialize an element. Initializes an element in the template. Keyword arguments specify attributes to be set on the element; values must be callables. See TemplateElement.set() for more information. :param tag: The name of the tag to create. :param attrib: An optional dictionary of element attributes. :param selector: An optional callable taking an object and optional boolean do_raise indicator and returning the object bound to the element. :param subselector: An optional callable taking an object and optional boolean do_raise indicator and returning the object bound to the element. This is used to further refine the datum object returned by selector in the event that it is a list of objects. """ # Convert selector into a Selector if selector is None: selector = Selector() elif not callable(selector): selector = Selector(selector) # Convert subselector into a Selector if subselector is not None and not callable(subselector): subselector = Selector(subselector) self.tag = tag self.selector = selector self.subselector = subselector self.attrib = {} self._text = None self._children = [] self._childmap = {} # Run the incoming attributes through set() so that they # become selectorized if not attrib: attrib = {} attrib.update(extra) for k, v in attrib.items(): self.set(k, v) def __repr__(self): """Return a representation of the template element.""" return ('<%s.%s %r at %#x>' % (self.__class__.__module__, self.__class__.__name__, self.tag, id(self))) def __len__(self): """Return the number of child elements.""" return len(self._children) def __contains__(self, key): """Determine whether a child node named by key exists.""" return key in self._childmap def __getitem__(self, idx): """Retrieve a child node by index or name.""" if isinstance(idx, basestring): # Allow access by node name return self._childmap[idx] else: return self._children[idx] def append(self, elem): """Append a child to the element.""" # Unwrap templates... elem = elem.unwrap() # Avoid duplications if elem.tag in self._childmap: raise KeyError(elem.tag) self._children.append(elem) self._childmap[elem.tag] = elem def extend(self, elems): """Append children to the element.""" # Pre-evaluate the elements elemmap = {} elemlist = [] for elem in elems: # Unwrap templates... elem = elem.unwrap() # Avoid duplications if elem.tag in self._childmap or elem.tag in elemmap: raise KeyError(elem.tag) elemmap[elem.tag] = elem elemlist.append(elem) # Update the children self._children.extend(elemlist) self._childmap.update(elemmap) def insert(self, idx, elem): """Insert a child element at the given index.""" # Unwrap templates... elem = elem.unwrap() # Avoid duplications if elem.tag in self._childmap: raise KeyError(elem.tag) self._children.insert(idx, elem) self._childmap[elem.tag] = elem def remove(self, elem): """Remove a child element.""" # Unwrap templates... elem = elem.unwrap() # Check if element exists if elem.tag not in self._childmap or self._childmap[elem.tag] != elem: raise ValueError(_('element is not a child')) self._children.remove(elem) del self._childmap[elem.tag] def get(self, key): """Get an attribute. Returns a callable which performs datum selection. :param key: The name of the attribute to get. """ return self.attrib[key] def set(self, key, value=None): """Set an attribute. :param key: The name of the attribute to set. :param value: A callable taking an object and optional boolean do_raise indicator and returning the datum bound to the attribute. If None, a Selector() will be constructed from the key. If a string, a Selector() will be constructed from the string. """ # Convert value to a selector if value is None: value = Selector(key) elif not callable(value): value = Selector(value) self.attrib[key] = value def keys(self): """Return the attribute names.""" return self.attrib.keys() def items(self): """Return the attribute names and values.""" return self.attrib.items() def unwrap(self): """Unwraps a template to return a template element.""" # We are a template element return self def wrap(self): """Wraps a template element to return a template.""" # Wrap in a basic Template return Template(self) def apply(self, elem, obj): """Apply text and attributes to an etree.Element. Applies the text and attribute instructions in the template element to an etree.Element instance. :param elem: An etree.Element instance. :param obj: The base object associated with this template element. """ # Start with the text... if self.text is not None: elem.text = unicode(self.text(obj)) # Now set up all the attributes... for key, value in self.attrib.items(): try: elem.set(key, unicode(value(obj, True))) except KeyError: # Attribute has no value, so don't include it pass def _render(self, parent, datum, patches, nsmap): """Internal rendering. Renders the template node into an etree.Element object. Returns the etree.Element object. :param parent: The parent etree.Element instance. :param datum: The datum associated with this template element. :param patches: A list of other template elements that must also be applied. :param nsmap: An optional namespace dictionary to be associated with the etree.Element instance. """ # Allocate a node if callable(self.tag): tagname = self.tag(datum) else: tagname = self.tag elem = etree.Element(tagname, nsmap=nsmap) # If we have a parent, append the node to the parent if parent is not None: parent.append(elem) # If the datum is None, do nothing else if datum is None: return elem # Apply this template element to the element self.apply(elem, datum) # Additionally, apply the patches for patch in patches: patch.apply(elem, datum) # We have fully rendered the element; return it return elem def render(self, parent, obj, patches=[], nsmap=None): """Render an object. Renders an object against this template node. Returns a list of two-item tuples, where the first item is an etree.Element instance and the second item is the datum associated with that instance. :param parent: The parent for the etree.Element instances. :param obj: The object to render this template element against. :param patches: A list of other template elements to apply when rendering this template element. :param nsmap: An optional namespace dictionary to attach to the etree.Element instances. """ # First, get the datum we're rendering data = None if obj is None else self.selector(obj) # Check if we should render at all if not self.will_render(data): return [] elif data is None: return [(self._render(parent, None, patches, nsmap), None)] # Make the data into a list if it isn't already if not isinstance(data, list): data = [data] elif parent is None: raise ValueError(_('root element selecting a list')) # Render all the elements elems = [] for datum in data: if self.subselector is not None: datum = self.subselector(datum) elems.append((self._render(parent, datum, patches, nsmap), datum)) # Return all the elements rendered, as well as the # corresponding datum for the next step down the tree return elems def will_render(self, datum): """Hook method. An overridable hook method to determine whether this template element will be rendered at all. By default, returns False (inhibiting rendering) if the datum is None. :param datum: The datum associated with this template element. """ # Don't render if datum is None return datum is not None def _text_get(self): """Template element text. Either None or a callable taking an object and optional boolean do_raise indicator and returning the datum bound to the text of the template element. """ return self._text def _text_set(self, value): # Convert value to a selector if value is not None and not callable(value): value = Selector(value) self._text = value def _text_del(self): self._text = None text = property(_text_get, _text_set, _text_del) def tree(self): """Return string representation of the template tree. Returns a representation of the template rooted at this element as a string, suitable for inclusion in debug logs. """ # Build the inner contents of the tag... contents = [self.tag, '!selector=%r' % self.selector] # Add the text... if self.text is not None: contents.append('!text=%r' % self.text) # Add all the other attributes for key, value in self.attrib.items(): contents.append('%s=%r' % (key, value)) # If there are no children, return it as a closed tag if len(self) == 0: return '<%s/>' % ' '.join([str(i) for i in contents]) # OK, recurse to our children children = [c.tree() for c in self] # Return the result return ('<%s>%s' % (' '.join(contents), ''.join(children), self.tag)) def SubTemplateElement(parent, tag, attrib=None, selector=None, subselector=None, **extra): """Create a template element as a child of another. Corresponds to the etree.SubElement interface. Parameters are as for TemplateElement, with the addition of the parent. """ # Convert attributes attrib = attrib or {} attrib.update(extra) # Get a TemplateElement elem = TemplateElement(tag, attrib=attrib, selector=selector, subselector=subselector) # Append the parent safely if parent is not None: parent.append(elem) return elem class Template(object): """Represent a template.""" def __init__(self, root, nsmap=None): """Initialize a template. :param root: The root element of the template. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ self.root = root.unwrap() if root is not None else None self.nsmap = nsmap or {} self.serialize_options = dict(encoding='UTF-8', xml_declaration=True) def _serialize(self, parent, obj, siblings, nsmap=None): """Internal serialization. Recursive routine to build a tree of etree.Element instances from an object based on the template. Returns the first etree.Element instance rendered, or None. :param parent: The parent etree.Element instance. Can be None. :param obj: The object to render. :param siblings: The TemplateElement instances against which to render the object. :param nsmap: An optional namespace dictionary to be associated with the etree.Element instance rendered. """ # First step, render the element elems = siblings[0].render(parent, obj, siblings[1:], nsmap) # Now, recurse to all child elements seen = set() for idx, sibling in enumerate(siblings): for child in sibling: # Have we handled this child already? if child.tag in seen: continue seen.add(child.tag) # Determine the child's siblings nieces = [child] for sib in siblings[idx + 1:]: if child.tag in sib: nieces.append(sib[child.tag]) # Now we recurse for every data element for elem, datum in elems: self._serialize(elem, datum, nieces) # Return the first element; at the top level, this will be the # root element if elems: return elems[0][0] def serialize(self, obj, *args, **kwargs): """Serialize an object. Serializes an object against the template. Returns a string with the serialized XML. Positional and keyword arguments are passed to etree.tostring(). :param obj: The object to serialize. """ elem = self.make_tree(obj) if elem is None: return '' for k, v in self.serialize_options.items(): kwargs.setdefault(k, v) # Serialize it into XML return etree.tostring(elem, *args, **kwargs) def make_tree(self, obj): """Create a tree. Serializes an object against the template. Returns an Element node with appropriate children. :param obj: The object to serialize. """ # If the template is empty, return the empty string if self.root is None: return None # Get the siblings and nsmap of the root element siblings = self._siblings() nsmap = self._nsmap() # Form the element tree return self._serialize(None, obj, siblings, nsmap) def _siblings(self): """Hook method for computing root siblings. An overridable hook method to return the siblings of the root element. By default, this is the root element itself. """ return [self.root] def _nsmap(self): """Hook method for computing the namespace dictionary. An overridable hook method to return the namespace dictionary. """ return self.nsmap.copy() def unwrap(self): """Unwraps a template to return a template element.""" # Return the root element return self.root def wrap(self): """Wraps a template element to return a template.""" # We are a template return self def apply(self, master): """Hook method for determining slave applicability. An overridable hook method used to determine if this template is applicable as a slave to a given master template. :param master: The master template to test. """ return True def tree(self): """Return string representation of the template tree. Returns a representation of the template as a string, suitable for inclusion in debug logs. """ return "%r: %s" % (self, self.root.tree()) class MasterTemplate(Template): """Represent a master template. Master templates are versioned derivatives of templates that additionally allow slave templates to be attached. Slave templates allow modification of the serialized result without directly changing the master. """ def __init__(self, root, version, nsmap=None): """Initialize a master template. :param root: The root element of the template. :param version: The version number of the template. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ super(MasterTemplate, self).__init__(root, nsmap) self.version = version self.slaves = [] def __repr__(self): """Return string representation of the template.""" return ("<%s.%s object version %s at %#x>" % (self.__class__.__module__, self.__class__.__name__, self.version, id(self))) def _siblings(self): """Hook method for computing root siblings. An overridable hook method to return the siblings of the root element. This is the root element plus the root elements of all the slave templates. """ return [self.root] + [slave.root for slave in self.slaves] def _nsmap(self): """Hook method for computing the namespace dictionary. An overridable hook method to return the namespace dictionary. The namespace dictionary is computed by taking the master template's namespace dictionary and updating it from all the slave templates. """ nsmap = self.nsmap.copy() for slave in self.slaves: nsmap.update(slave._nsmap()) return nsmap def attach(self, *slaves): """Attach one or more slave templates. Attaches one or more slave templates to the master template. Slave templates must have a root element with the same tag as the master template. The slave template's apply() method will be called to determine if the slave should be applied to this master; if it returns False, that slave will be skipped. (This allows filtering of slaves based on the version of the master template.) """ slave_list = [] for slave in slaves: slave = slave.wrap() # Make sure we have a tree match if slave.root.tag != self.root.tag: slavetag = slave.root.tag mastertag = self.root.tag msg = _("Template tree mismatch; adding slave %(slavetag)s " "to master %(mastertag)s") % locals() raise ValueError(msg) # Make sure slave applies to this template if not slave.apply(self): continue slave_list.append(slave) # Add the slaves self.slaves.extend(slave_list) def copy(self): """Return a copy of this master template.""" # Return a copy of the MasterTemplate tmp = self.__class__(self.root, self.version, self.nsmap) tmp.slaves = self.slaves[:] return tmp class SlaveTemplate(Template): """Represent a slave template. Slave templates are versioned derivatives of templates. Each slave has a minimum version and optional maximum version of the master template to which they can be attached. """ def __init__(self, root, min_vers, max_vers=None, nsmap=None): """Initialize a slave template. :param root: The root element of the template. :param min_vers: The minimum permissible version of the master template for this slave template to apply. :param max_vers: An optional upper bound for the master template version. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ super(SlaveTemplate, self).__init__(root, nsmap) self.min_vers = min_vers self.max_vers = max_vers def __repr__(self): """Return string representation of the template.""" return ("<%s.%s object versions %s-%s at %#x>" % (self.__class__.__module__, self.__class__.__name__, self.min_vers, self.max_vers, id(self))) def apply(self, master): """Hook method for determining slave applicability. An overridable hook method used to determine if this template is applicable as a slave to a given master template. This version requires the master template to have a version number between min_vers and max_vers. :param master: The master template to test. """ # Does the master meet our minimum version requirement? if master.version < self.min_vers: return False # How about our maximum version requirement? if self.max_vers is not None and master.version > self.max_vers: return False return True class TemplateBuilder(object): """Template builder. This class exists to allow templates to be lazily built without having to build them each time they are needed. It must be subclassed, and the subclass must implement the construct() method, which must return a Template (or subclass) instance. The constructor will always return the template returned by construct(), or, if it has a copy() method, a copy of that template. """ _tmpl = None def __new__(cls, copy=True): """Construct and return a template. :param copy: If True (the default), a copy of the template will be constructed and returned, if possible. """ # Do we need to construct the template? if cls._tmpl is None: tmp = super(TemplateBuilder, cls).__new__(cls) # Construct the template cls._tmpl = tmp.construct() # If the template has a copy attribute, return the result of # calling it if copy and hasattr(cls._tmpl, 'copy'): return cls._tmpl.copy() # Return the template return cls._tmpl def construct(self): """Construct a template. Called to construct a template instance, which it must return. Only called once. """ raise NotImplementedError(_("subclasses must implement construct()!")) def make_links(parent, selector=None): """ Attach an Atom element to the parent. """ elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, selector=selector) elem.set('rel') elem.set('type') elem.set('href') # Just for completeness... return elem def make_flat_dict(name, selector=None, subselector=None, ns=None): """ Utility for simple XML templates that traditionally used XMLDictSerializer with no metadata. Returns a template element where the top-level element has the given tag name, and where sub-elements have tag names derived from the object's keys and text derived from the object's values. This only works for flat dictionary objects, not dictionaries containing nested lists or dictionaries. """ # Set up the names we need... if ns is None: elemname = name tagname = Selector(0) else: elemname = '{%s}%s' % (ns, name) tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0]) if selector is None: selector = name # Build the root element root = TemplateElement(elemname, selector=selector, subselector=subselector) # Build an element to represent all the keys and values elem = SubTemplateElement(root, tagname, selector=get_items) elem.text = 1 # Return the template return root manila-2013.2.dev175.gbf1a399/manila/api/v1/0000775000175000017500000000000012301410516020047 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/v1/shares.py0000664000175000017500000001666612301410454021726 0ustar chuckchuck00000000000000# Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The shares api.""" import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import shares as share_views from manila.api import xmlutil from manila import exception from manila.openstack.common import log as logging from manila import share LOG = logging.getLogger(__name__) def make_share(elem): attrs = ['id', 'size', 'availability_zone', 'status', 'name', 'description', 'share_proto', 'export_location', 'links', 'snapshot_id', 'created_at', 'metadata'] for attr in attrs: elem.set(attr) class ShareTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('share', selector='share') make_share(root) return xmlutil.MasterTemplate(root, 1) class SharesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('shares') elem = xmlutil.SubTemplateElement(root, 'share', selector='shares') make_share(elem) return xmlutil.MasterTemplate(root, 1) class ShareController(wsgi.Controller): """The Shares API controller for the OpenStack API.""" _view_builder_class = share_views.ViewBuilder def __init__(self): super(ShareController, self).__init__() self.share_api = share.API() @wsgi.serializers(xml=ShareTemplate) def show(self, req, id): """Return data about the given share.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, share) def delete(self, req, id): """Delete a share.""" context = req.environ['manila.context'] LOG.audit(_("Delete share with id: %s"), id, context=context) try: share = self.share_api.get(context, id) self.share_api.delete(context, share) except exception.NotFound: raise exc.HTTPNotFound() except exception.InvalidShare: raise exc.HTTPForbidden() return webob.Response(status_int=202) @wsgi.serializers(xml=SharesTemplate) def index(self, req): """Returns a summary list of shares.""" return self._get_shares(req, is_detail=False) @wsgi.serializers(xml=SharesTemplate) def detail(self, req): """Returns a detailed list of shares.""" return self._get_shares(req, is_detail=True) def _get_shares(self, req, is_detail): """Returns a list of shares, transformed through view builder. """ context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # NOTE(rushiagr): v2 API allows name instead of display_name if 'name' in search_opts: search_opts['display_name'] = search_opts['name'] del search_opts['name'] common.remove_invalid_options( context, search_opts, self._get_share_search_options()) shares = self.share_api.get_all(context, search_opts=search_opts) limited_list = common.limited(shares, req) if is_detail: shares = self._view_builder.detail_list(req, limited_list) else: shares = self._view_builder.summary_list(req, limited_list) return shares def _get_share_search_options(self): """Return share search options allowed by non-admin.""" return ('name', 'status') @wsgi.serializers(xml=ShareTemplate) def update(self, req, id, body): """Update a share.""" context = req.environ['manila.context'] if not body or 'share' not in body: raise exc.HTTPUnprocessableEntity() share_data = body['share'] valid_update_keys = ( 'display_name', 'display_description', ) update_dict = dict([(key, share_data[key]) for key in valid_update_keys if key in share_data]) try: share = self.share_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound() share = self.share_api.update(context, share, update_dict) share.update(update_dict) return self._view_builder.detail(req, share) @wsgi.serializers(xml=ShareTemplate) def create(self, req, body): """Creates a new share.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share'): raise exc.HTTPUnprocessableEntity() share = body['share'] # NOTE(rushiagr): v2 API allows name instead of display_name if share.get('name'): share['display_name'] = share.get('name') del share['name'] # NOTE(rushiagr): v2 API allows description instead of # display_description if share.get('description'): share['display_description'] = share.get('description') del share['description'] size = share['size'] share_proto = share['share_proto'].upper() msg = (_("Create %(share_proto)s share of %(size)s GB") % {'share_proto': share_proto, 'size': size}) LOG.audit(msg, context=context) kwargs = {} kwargs['availability_zone'] = share.get('availability_zone') kwargs['metadata'] = share.get('metadata', None) snapshot_id = share.get('snapshot_id') if snapshot_id: kwargs['snapshot'] = self.share_api.get_snapshot(context, snapshot_id) else: kwargs['snapshot'] = None share_network_id = share.get('share_network_id') if share_network_id: try: self.share_api.db.share_network_get(context, share_network_id) except exception.ShareNetworkNotFound as e: msg = "%s" % e raise exc.HTTPNotFound(explanation=msg) else: kwargs['share_network_id'] = share_network_id display_name = share.get('display_name') display_description = share.get('display_description') new_share = self.share_api.create(context, share_proto, size, display_name, display_description, **kwargs) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._view_builder.summary(req, dict(new_share.iteritems())) def create_resource(): return wsgi.Resource(ShareController()) manila-2013.2.dev175.gbf1a399/manila/api/v1/share_snapshots.py0000664000175000017500000001575012301410454023636 0ustar chuckchuck00000000000000# Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share snapshots api.""" import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import share_snapshots as snapshot_views from manila.api import xmlutil from manila import exception from manila.openstack.common import log as logging from manila import share LOG = logging.getLogger(__name__) def make_snapshot(elem): attrs = ['id', 'size', 'status', 'name', 'description', 'share_proto', 'export_location', 'links', 'share_id', 'created_at', 'share_size'] for attr in attrs: elem.set(attr) class SnapshotTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshot', selector='snapshot') make_snapshot(root) return xmlutil.MasterTemplate(root, 1) class SnapshotsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshots') elem = xmlutil.SubTemplateElement(root, 'snapshot', selector='snapshots') make_snapshot(elem) return xmlutil.MasterTemplate(root, 1) class ShareSnapshotsController(wsgi.Controller): """The Share Snapshots API controller for the OpenStack API.""" _view_builder_class = snapshot_views.ViewBuilder def __init__(self): super(ShareSnapshotsController, self).__init__() self.share_api = share.API() @wsgi.serializers(xml=SnapshotTemplate) def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['manila.context'] try: snapshot = self.share_api.get_snapshot(context, id) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, snapshot) def delete(self, req, id): """Delete a snapshot.""" context = req.environ['manila.context'] LOG.audit(_("Delete snapshot with id: %s"), id, context=context) try: snapshot = self.share_api.get_snapshot(context, id) self.share_api.delete_snapshot(context, snapshot) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) @wsgi.serializers(xml=SnapshotsTemplate) def index(self, req): """Returns a summary list of snapshots.""" return self._get_snapshots(req, is_detail=False) @wsgi.serializers(xml=SnapshotsTemplate) def detail(self, req): """Returns a detailed list of snapshots.""" return self._get_snapshots(req, is_detail=True) def _get_snapshots(self, req, is_detail): """Returns a list of snapshots.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # NOTE(rushiagr): v2 API allows name instead of display_name if 'name' in search_opts: search_opts['display_name'] = search_opts['name'] del search_opts['name'] common.remove_invalid_options(context, search_opts, self._get_snapshots_search_options()) snapshots = self.share_api.get_all_snapshots(context, search_opts=search_opts) limited_list = common.limited(snapshots, req) if is_detail: snapshots = self._view_builder.detail_list(req, limited_list) else: snapshots = self._view_builder.summary_list(req, limited_list) return snapshots def _get_snapshots_search_options(self): """Return share search options allowed by non-admin.""" return ('name', 'status', 'share_id') @wsgi.serializers(xml=SnapshotTemplate) def update(self, req, id, body): """Update a snapshot.""" context = req.environ['manila.context'] if not body or 'snapshot' not in body: raise exc.HTTPUnprocessableEntity() snapshot_data = body['snapshot'] valid_update_keys = ( 'display_name', 'display_description', ) update_dict = dict([(key, snapshot_data[key]) for key in valid_update_keys if key in snapshot_data]) try: snapshot = self.share_api.get_snapshot(context, id) except exception.NotFound: raise exc.HTTPNotFound() snapshot = self.share_api.snapshot_update(context, snapshot, update_dict) snapshot.update(update_dict) return self._view_builder.detail(req, snapshot) @wsgi.response(202) @wsgi.serializers(xml=SnapshotTemplate) def create(self, req, body): """Creates a new snapshot.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'snapshot'): raise exc.HTTPUnprocessableEntity() snapshot = body['snapshot'] share_id = snapshot['share_id'] share = self.share_api.get(context, share_id) msg = _("Create snapshot from share %s") LOG.audit(msg, share_id, context=context) # NOTE(rushiagr): v2 API allows name instead of display_name if 'name' in snapshot: snapshot['display_name'] = snapshot.get('name') del snapshot['name'] # NOTE(rushiagr): v2 API allows description instead of # display_description if 'description' in snapshot: snapshot['display_description'] = snapshot.get('description') del snapshot['description'] new_snapshot = self.share_api.create_snapshot( context, share, snapshot.get('display_name'), snapshot.get('display_description')) return self._view_builder.summary(req, dict(new_snapshot.iteritems())) def create_resource(): return wsgi.Resource(ShareSnapshotsController()) # # class Share_snapshots(extensions.ExtensionDescriptor): # """Enable share snapshtos API.""" # name = 'ShareSnapshots' # alias = 'snapshots' # namespace = '' # updated = '2013-03-01T00:00:00+00:00' # # def get_resources(self): # controller = ShareSnapshotsController() # resource = extensions.ResourceExtension( # 'snapshots', controller, # collection_actions={'detail': 'GET'}) # return [resource] manila-2013.2.dev175.gbf1a399/manila/api/v1/__init__.py0000664000175000017500000000000012301410454022147 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/v1/share_metadata.py0000664000175000017500000001353312301410454023371 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from manila.api import common from manila.api.openstack import wsgi from manila import exception from manila import share from webob import exc class ShareMetadataController(object): """The share metadata API controller for the OpenStack API.""" def __init__(self): self.share_api = share.API() super(ShareMetadataController, self).__init__() def _get_metadata(self, context, share_id): try: share = self.share_api.get(context, share_id) meta = self.share_api.get_share_metadata(context, share) except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) return meta @wsgi.serializers(xml=common.MetadataTemplate) def index(self, req, share_id): """Returns the list of metadata for a given share.""" context = req.environ['manila.context'] return {'metadata': self._get_metadata(context, share_id)} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def create(self, req, share_id, body): try: metadata = body['metadata'] except (KeyError, TypeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) context = req.environ['manila.context'] new_metadata = self._update_share_metadata(context, share_id, metadata, delete=False) return {'metadata': new_metadata} @wsgi.serializers(xml=common.MetaItemTemplate) @wsgi.deserializers(xml=common.MetaItemDeserializer) def update(self, req, share_id, id, body): try: meta_item = body['meta'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['manila.context'] self._update_share_metadata(context, share_id, meta_item, delete=False) return {'meta': meta_item} @wsgi.serializers(xml=common.MetadataTemplate) @wsgi.deserializers(xml=common.MetadataDeserializer) def update_all(self, req, share_id, body): try: metadata = body['metadata'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['manila.context'] new_metadata = self._update_share_metadata(context, share_id, metadata, delete=True) return {'metadata': new_metadata} def _update_share_metadata(self, context, share_id, metadata, delete=False): try: share = self.share_api.get(context, share_id) return self.share_api.update_share_metadata(context, share, metadata, delete) except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidShareMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidShareMetadataSize as error: raise exc.HTTPBadRequest(explanation=error.msg) @wsgi.serializers(xml=common.MetaItemTemplate) def show(self, req, share_id, id): """Return a single metadata item.""" context = req.environ['manila.context'] data = self._get_metadata(context, share_id) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) def delete(self, req, share_id, id): """Deletes an existing metadata.""" context = req.environ['manila.context'] metadata = self._get_metadata(context, share_id) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) try: share = self.share_api.get(context, share_id) self.share_api.delete_share_metadata(context, share, id) except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) return webob.Response(status_int=200) def create_resource(): return wsgi.Resource(ShareMetadataController()) manila-2013.2.dev175.gbf1a399/manila/api/v1/limits.py0000664000175000017500000003553312301410454021734 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. """ import collections import copy import httplib import math import re import time import webob.dec import webob.exc from manila.api.openstack import wsgi from manila.api.views import limits as limits_views from manila.api import xmlutil from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila import quota from manila import wsgi as base_wsgi QUOTAS = quota.QUOTAS # Convenience constants for the limits dictionary passed to Limiter(). PER_SECOND = 1 PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} class LimitsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('limits', selector='limits') rates = xmlutil.SubTemplateElement(root, 'rates') rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate') rate.set('uri', 'uri') rate.set('regex', 'regex') limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit') limit.set('value', 'value') limit.set('verb', 'verb') limit.set('remaining', 'remaining') limit.set('unit', 'unit') limit.set('next-available', 'next-available') absolute = xmlutil.SubTemplateElement(root, 'absolute', selector='absolute') limit = xmlutil.SubTemplateElement(absolute, 'limit', selector=xmlutil.get_items) limit.set('name', 0) limit.set('value', 1) return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) class LimitsController(object): """ Controller for accessing limits in the OpenStack API. """ @wsgi.serializers(xml=LimitsTemplate) def index(self, req): """ Return all global and rate limit information. """ context = req.environ['manila.context'] quotas = QUOTAS.get_project_quotas(context, context.project_id, usages=False) abs_limits = dict((k, v['limit']) for k, v in quotas.items()) rate_limits = req.environ.get("manila.limits", []) builder = self._get_view_builder(req) return builder.build(rate_limits, abs_limits) def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) class Limit(object): """ Stores information about a limit for HTTP requests. """ UNITS = { 1: "SECOND", 60: "MINUTE", 60 * 60: "HOUR", 60 * 60 * 24: "DAY", } UNIT_MAP = dict([(v, k) for k, v in UNITS.items()]) def __init__(self, verb, uri, regex, value, unit): """ Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = _("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") self.error_message = msg % self.__dict__ def __call__(self, verb, url): """ Represents a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", 10, PER_MINUTE), Limit("POST", "*/servers", "^/servers", 50, PER_DAY), Limit("PUT", "*", ".*", 10, PER_MINUTE), Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), Limit("DELETE", "*", ".*", 100, PER_MINUTE), ] class RateLimitingMiddleware(base_wsgi.Middleware): """ Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """ Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get("manila.context") if context: username = context.user_id else: username = None delay, error = self._limiter.check_for_delay(verb, url, username) if delay: msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.OverLimitFault(msg, error, retry) req.environ["manila.limits"] = self._limiter.get_limits(username) return self.application class Limiter(object): """ Rate-limit checking class which handles limits in memory. """ def __init__(self, limits, **kwargs): """ Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith('user:'): username = key[5:] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """ Return the limits for a given user. """ return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """ Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """ Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in Limit.UNIT_MAP: raise ValueError("Invalid units specified") unit = Limit.UNIT_MAP[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """ Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """ Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): """ Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """ Rate-limit requests based on answers from a remote source. """ def __init__(self, limiter_address): """ Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dumps({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = httplib.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if 200 >= resp.status < 300: return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """ Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ return [] manila-2013.2.dev175.gbf1a399/manila/api/v1/router.py0000664000175000017500000000722712301410454021752 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Share API. """ from manila.api import extensions import manila.api.openstack from manila.api.v1 import limits from manila.api import versions from manila.api.v1 import security_service from manila.api.v1 import share_metadata from manila.api.v1 import share_networks from manila.api.v1 import share_snapshots from manila.api.v1 import shares from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) class APIRouter(manila.api.openstack.APIRouter): """ Routes requests on the OpenStack API to the appropriate controller and method. """ ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources['versions'], action='show') mapper.redirect("", "/") self.resources['shares'] = shares.create_resource() mapper.resource("share", "shares", controller=self.resources['shares'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['snapshots'] = share_snapshots.create_resource() mapper.resource("snapshot", "snapshots", controller=self.resources['snapshots'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['share_metadata'] = share_metadata.create_resource() share_metadata_controller = self.resources['share_metadata'] mapper.resource("share_metadata", "metadata", controller=share_metadata_controller, parent_resource=dict(member_name='share', collection_name='shares')) mapper.connect("metadata", "/{project_id}/shares/{share_id}/metadata", controller=share_metadata_controller, action='update_all', conditions={"method": ['PUT']}) self.resources['limits'] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources['limits']) self.resources["security_services"] = \ security_service.create_resource() mapper.resource("security-service", "security-services", controller=self.resources['security_services'], collection={'detail': 'GET'}) self.resources['share_networks'] = share_networks.create_resource() mapper.resource(share_networks.RESOURCE_NAME, 'share-networks', controller=self.resources['share_networks'], member={'action': 'POST'}) manila-2013.2.dev175.gbf1a399/manila/api/v1/security_service.py0000664000175000017500000002000112301410454024002 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security service api.""" import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import security_service as security_service_views from manila.api import xmlutil from manila.common import constants from manila import db from manila import exception from manila.openstack.common import log as logging from manila import policy RESOURCE_NAME = 'security_service' LOG = logging.getLogger(__name__) def make_security_service(elem): attrs = ['id', 'name', 'description', 'type', 'server', 'domain', 'sid', 'password', 'dns_ip', 'status', 'updated_at', 'created_at'] for attr in attrs: elem.set(attr) class SecurityServiceTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_service', selector='security_service') make_security_service(root) return xmlutil.MasterTemplate(root, 1) class SecurityServicesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_services') elem = xmlutil.SubTemplateElement(root, 'security_service', selector='security_services') make_security_service(elem) return xmlutil.MasterTemplate(root, 1) class SecurityServiceController(wsgi.Controller): """The Shares API controller for the OpenStack API.""" _view_builder_class = security_service_views.ViewBuilder @wsgi.serializers(xml=SecurityServiceTemplate) def show(self, req, id): """Return data about the given security service.""" context = req.environ['manila.context'] try: security_service = db.security_service_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'show', security_service) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, security_service) def delete(self, req, id): """Delete a security service.""" context = req.environ['manila.context'] LOG.audit(_("Delete security service with id: %s"), id, context=context) try: security_service = db.security_service_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'delete', security_service) db.security_service_delete(context, id) except exception.NotFound: raise exc.HTTPNotFound() except exception.InvalidShare: raise exc.HTTPForbidden() return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityServicesTemplate) def index(self, req): """Returns a summary list of security services.""" return self._get_security_services(req, is_detail=False) @wsgi.serializers(xml=SecurityServicesTemplate) def detail(self, req): """Returns a detailed list of security services.""" return self._get_security_services(req, is_detail=True) def _get_security_services(self, req, is_detail): """Returns a list of security services, transformed through view builder. """ context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'get_all_security_services') search_opts = {} search_opts.update(req.GET) if 'share_network_id' in search_opts: share_nw = db.share_network_get(context, search_opts['share_network_id']) security_services = share_nw['security_services'] else: common.remove_invalid_options( context, search_opts, self._get_security_services_search_options()) if 'all_tenants' in search_opts: security_services = db.security_service_get_all(context) del search_opts['all_tenants'] else: security_services = db.security_service_get_all_by_project( context, context.project_id) if search_opts: results = [] not_found = object() for service in security_services: for opt, value in search_opts.iteritems(): if service.get(opt, not_found) != value: break else: results.append(service) security_services = results limited_list = common.limited(security_services, req) if is_detail: security_services = self._view_builder.detail_list( req, limited_list) else: security_services = self._view_builder.summary_list( req, limited_list) return security_services def _get_security_services_search_options(self): return ('status', 'name', 'id') @wsgi.serializers(xml=SecurityServiceTemplate) def update(self, req, id, body): """Update a security service.""" context = req.environ['manila.context'] if not body or 'security_service' not in body: raise exc.HTTPUnprocessableEntity() security_service_data = body['security_service'] valid_update_keys = ( 'description', 'name' ) try: security_service = db.security_service_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'show', security_service) except exception.NotFound: raise exc.HTTPNotFound() if security_service['status'].lower() in ['new', 'inactive']: update_dict = security_service_data else: update_dict = dict([(key, security_service_data[key]) for key in valid_update_keys if key in security_service_data]) policy.check_policy(context, RESOURCE_NAME, 'update', security_service) security_service = db.security_service_update(context, id, update_dict) return self._view_builder.detail(req, security_service) @wsgi.serializers(xml=SecurityServiceTemplate) def create(self, req, body): """Creates a new security service.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'create') if not self.is_valid_body(body, 'security_service'): raise exc.HTTPUnprocessableEntity() security_service_args = body['security_service'] security_srv_type = security_service_args.get('type') allowed_types = constants.SECURITY_SERVICES_ALLOWED_TYPES if security_srv_type not in allowed_types: raise exception.InvalidInput( reason=(_("Invalid type %(type)s specified for security " "service. Valid types are %(types)s") % {'type': security_srv_type, 'types': ','.join(allowed_types)})) security_service_args['project_id'] = context.project_id security_service = db.security_service_create( context, security_service_args) return self._view_builder.detail(req, security_service) def create_resource(): return wsgi.Resource(SecurityServiceController()) manila-2013.2.dev175.gbf1a399/manila/api/v1/share_networks.py0000664000175000017500000002104312301410454023460 0ustar chuckchuck00000000000000# Copyright 2014 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The shares api.""" import webob from webob import exc from manila.api.openstack import wsgi from manila.api.views import share_networks as share_networks_views from manila.api import xmlutil from manila.common import constants from manila.db import api as db_api from manila import exception from manila.openstack.common import log as logging from manila import policy RESOURCE_NAME = 'share_network' RESOURCES_NAME = 'share_networks' LOG = logging.getLogger(__name__) SHARE_NETWORK_ATTRS = ('id', 'project_id', 'created_at', 'updated_at', 'neutron_net_id', 'neutron_subnet_id', 'network_type', 'segmentation_id', 'cidr', 'ip_version', 'name', 'description', 'status') def _make_share_network(elem): for attr in SHARE_NETWORK_ATTRS: elem.set(attr) class ShareNetworkTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement(RESOURCE_NAME, selector=RESOURCE_NAME) _make_share_network(root) return xmlutil.MasterTemplate(root, 1) class ShareNetworksTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement(RESOURCES_NAME) elem = xmlutil.SubTemplateElement(root, RESOURCE_NAME, selector=RESOURCES_NAME) _make_share_network(elem) return xmlutil.MasterTemplate(root, 1) class ShareNetworkController(wsgi.Controller): """The Share Network API controller for the OpenStack API.""" _view_builder_class = share_networks_views.ViewBuilder @wsgi.serializers(xml=ShareNetworkTemplate) def show(self, req, id): """Return data about the requested network info.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'show') try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: msg = "%s" % e raise exc.HTTPNotFound(explanation=msg) return self._view_builder.build_share_network(share_network) def delete(self, req, id): """Delete specified share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'delete') try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: msg = "%s" % e raise exc.HTTPNotFound(explanation=msg) if share_network['status'] == constants.STATUS_ACTIVE: msg = "Network %s is in use" % id raise exc.HTTPBadRequest(explanation=msg) db_api.share_network_delete(context, id) return webob.Response(status_int=202) @wsgi.serializers(xml=ShareNetworksTemplate) def index(self, req): """Returns a summary list of share's networks.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'index') search_opts = {} search_opts.update(req.GET) if search_opts.pop('all_tenants', None): networks = db_api.share_network_get_all(context) else: networks = db_api.share_network_get_all_by_project( context, context.project_id) if search_opts: for key, value in search_opts.iteritems(): networks = [network for network in networks if network[key] == value] return self._view_builder.build_share_networks(networks) @wsgi.serializers(xml=ShareNetworkTemplate) def update(self, req, id, body): """Update specified share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'update') if not body or RESOURCE_NAME not in body: raise exc.HTTPUnprocessableEntity() try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: msg = "%s" % e raise exc.HTTPNotFound(explanation=msg) if share_network['status'] == constants.STATUS_ACTIVE: msg = "Network %s is in use" % id raise exc.HTTPBadRequest(explanation=msg) update_values = body[RESOURCE_NAME] try: share_network = db_api.share_network_update(context, id, update_values) except exception.DBError: msg = "Could not save supplied data due to database error" raise exc.HTTPBadRequest(explanation=msg) return self._view_builder.build_share_network(share_network) @wsgi.serializers(xml=ShareNetworkTemplate) def create(self, req, body): """Creates a new share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'create') if not body or RESOURCE_NAME not in body: raise exc.HTTPUnprocessableEntity() values = body[RESOURCE_NAME] values['project_id'] = context.project_id try: share_network = db_api.share_network_create(context, values) except exception.DBError: msg = "Could not save supplied data due to database error" raise exc.HTTPBadRequest(explanation=msg) return self._view_builder.build_share_network(share_network) @wsgi.serializers(xml=ShareNetworkTemplate) def action(self, req, id, body): _actions = { 'add_security_service': self._add_security_service, 'remove_security_service': self._remove_security_service, } for action, data in body.iteritems(): try: return _actions[action](req, id, data) except KeyError: msg = _("Share networks does not have %s action") % action raise exc.HTTPBadRequest(explanation=msg) def _add_security_service(self, req, id, data): context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'add_security_service') try: share_network = db_api.share_network_add_security_service( context, id, data['security_service_id']) except KeyError: msg = "Malformed request body" raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as e: msg = "%s" % e raise exc.HTTPNotFound(explanation=msg) except exception.ShareNetworkSecurityServiceAssociationError as e: msg = "%s" % e raise exc.HTTPBadRequest(explanation=msg) return self._view_builder.build_share_network(share_network) def _remove_security_service(self, req, id, data): context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'remove_security_service') try: share_network = db_api.share_network_remove_security_service( context, id, data['security_service_id']) except KeyError: msg = "Malformed request body" raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as e: msg = "%s" % e raise exc.HTTPNotFound(explanation=msg) except exception.ShareNetworkSecurityServiceDissociationError as e: msg = "%s" % e raise exc.HTTPBadRequest(explanation=msg) return self._view_builder.build_share_network(share_network) def create_resource(): return wsgi.Resource(ShareNetworkController()) manila-2013.2.dev175.gbf1a399/manila/api/__init__.py0000664000175000017500000000211312301410454021630 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg import paste.urlmap CONF = cfg.CONF def root_app_factory(loader, global_conf, **local_conf): if not CONF.enable_v1_api: del local_conf['/v1'] if not CONF.enable_v2_api: del local_conf['/v2'] return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) manila-2013.2.dev175.gbf1a399/manila/api/common.py0000664000175000017500000002675112301410454021377 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import urlparse import webob from manila.api.openstack import wsgi from manila.api import xmlutil from manila.openstack.common import log as logging from manila import utils from oslo.config import cfg LOG = logging.getLogger(__name__) CONF = cfg.CONF XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1' def get_pagination_params(request): """Return marker, limit tuple from request. :param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either marker or limit will cause exc.HTTPBadRequest() exceptions to be raised. """ params = {} if 'limit' in request.GET: params['limit'] = _get_limit_param(request) if 'marker' in request.GET: params['marker'] = _get_marker_param(request) return params def _get_limit_param(request): """Extract integer limit from request or fail""" try: limit = int(request.GET['limit']) except ValueError: msg = _('limit param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _('limit param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _get_marker_param(request): """Extract marker id from request or fail""" return request.GET['marker'] def limited(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to requested offset and limit. :param items: A sliceable entity :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' GET variables. 'offset' is where to start in the list, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause exc.HTTPBadRequest() exceptions to be raised. :kwarg max_limit: The maximum number of items to return from 'items' """ try: offset = int(request.GET.get('offset', 0)) except ValueError: msg = _('offset param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) try: limit = int(request.GET.get('limit', max_limit)) except ValueError: msg = _('limit param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _('limit param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) if offset < 0: msg = _('offset param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) limit = min(max_limit, limit or max_limit) range_end = offset + limit return items[offset:range_end] def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to the requested marker and limit.""" params = get_pagination_params(request) limit = params.get('limit', max_limit) marker = params.get('marker') limit = min(max_limit, limit) start_index = 0 if marker: start_index = -1 for i, item in enumerate(items): if 'flavorid' in item: if item['flavorid'] == marker: start_index = i + 1 break elif item['id'] == marker or item.get('uuid') == marker: start_index = i + 1 break if start_index < 0: msg = _('marker [%s] not found') % marker raise webob.exc.HTTPBadRequest(explanation=msg) range_end = start_index + limit return items[start_index:range_end] def remove_version_from_href(href): """Removes the first api version from the href. Given: 'http://www.manila.com/v1.1/123' Returns: 'http://www.manila.com/123' Given: 'http://www.manila.com/v1.1' Returns: 'http://www.manila.com' """ parsed_url = urlparse.urlsplit(href) url_parts = parsed_url.path.split('/', 2) # NOTE: this should match vX.X or vX expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') if expression.match(url_parts[1]): del url_parts[1] new_path = '/'.join(url_parts) if new_path == parsed_url.path: msg = _('href %s does not contain version') % href LOG.debug(msg) raise ValueError(msg) parsed_url = list(parsed_url) parsed_url[2] = new_path return urlparse.urlunsplit(parsed_url) def dict_to_query_str(params): # TODO(throughnothing): we should just use urllib.urlencode instead of this # But currently we don't work with urlencoded url's param_str = "" for key, val in params.iteritems(): param_str = param_str + '='.join([str(key), str(val)]) + '&' return param_str.rstrip('&') class ViewBuilder(object): """Model API responses as dictionaries.""" _collection_name = None def _get_links(self, request, identifier): return [{"rel": "self", "href": self._get_href_link(request, identifier), }, {"rel": "bookmark", "href": self._get_bookmark_link(request, identifier), }] def _get_next_link(self, request, identifier): """Return href string with proper limit and marker params.""" params = request.params.copy() params["marker"] = identifier prefix = self._update_link_prefix(request.application_url, CONF.osapi_share_base_URL) url = os.path.join(prefix, request.environ["manila.context"].project_id, self._collection_name) return "%s?%s" % (url, dict_to_query_str(params)) def _get_href_link(self, request, identifier): """Return an href string pointing to this object.""" prefix = self._update_link_prefix(request.application_url, CONF.osapi_share_base_URL) return os.path.join(prefix, request.environ["manila.context"].project_id, self._collection_name, str(identifier)) def _get_bookmark_link(self, request, identifier): """Create a URL that refers to a specific resource.""" base_url = remove_version_from_href(request.application_url) base_url = self._update_link_prefix(base_url, CONF.osapi_share_base_URL) return os.path.join(base_url, request.environ["manila.context"].project_id, self._collection_name, str(identifier)) def _get_collection_links(self, request, items, id_key="uuid"): """Retrieve 'next' link, if applicable.""" links = [] limit = int(request.params.get("limit", 0)) if limit and limit == len(items): last_item = items[-1] if id_key in last_item: last_item_id = last_item[id_key] else: last_item_id = last_item["id"] links.append({ "rel": "next", "href": self._get_next_link(request, last_item_id), }) return links def _update_link_prefix(self, orig_url, prefix): if not prefix: return orig_url url_parts = list(urlparse.urlsplit(orig_url)) prefix_parts = list(urlparse.urlsplit(prefix)) url_parts[0:2] = prefix_parts[0:2] return urlparse.urlunsplit(url_parts) class MetadataDeserializer(wsgi.MetadataXMLDeserializer): def deserialize(self, text): dom = utils.safe_minidom_parse_string(text) metadata_node = self.find_first_child_named(dom, "metadata") metadata = self.extract_metadata(metadata_node) return {'body': {'metadata': metadata}} class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): def deserialize(self, text): dom = utils.safe_minidom_parse_string(text) metadata_item = self.extract_metadata(dom) return {'body': {'meta': metadata_item}} class MetadataXMLDeserializer(wsgi.XMLDeserializer): def extract_metadata(self, metadata_node): """Marshal the metadata attribute of a parsed request""" if metadata_node is None: return {} metadata = {} for meta_node in self.find_children_named(metadata_node, "meta"): key = meta_node.getAttribute("key") metadata[key] = self.extract_text(meta_node) return metadata def _extract_metadata_container(self, datastring): dom = utils.safe_minidom_parse_string(datastring) metadata_node = self.find_first_child_named(dom, "metadata") metadata = self.extract_metadata(metadata_node) return {'body': {'metadata': metadata}} def create(self, datastring): return self._extract_metadata_container(datastring) def update_all(self, datastring): return self._extract_metadata_container(datastring) def update(self, datastring): dom = utils.safe_minidom_parse_string(datastring) metadata_item = self.extract_metadata(dom) return {'body': {'meta': metadata_item}} metadata_nsmap = {None: xmlutil.XMLNS_V11} class MetaItemTemplate(xmlutil.TemplateBuilder): def construct(self): sel = xmlutil.Selector('meta', xmlutil.get_items, 0) root = xmlutil.TemplateElement('meta', selector=sel) root.set('key', 0) root.text = 1 return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) class MetadataTemplateElement(xmlutil.TemplateElement): def will_render(self, datum): return True class MetadataTemplate(xmlutil.TemplateBuilder): def construct(self): root = MetadataTemplateElement('metadata', selector='metadata') elem = xmlutil.SubTemplateElement(root, 'meta', selector=xmlutil.get_items) elem.set('key', 0) elem.text = 1 return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) def remove_invalid_options(context, search_options, allowed_search_options): """Remove search options that are not valid for non-admin API/context.""" if context.is_admin: # Allow all options return # Otherwise, strip out all unknown options unknown_options = [opt for opt in search_options if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) log_msg = _("Removing options '%(bad_options)s' from query") % locals() LOG.debug(log_msg) for opt in unknown_options: del search_options[opt] manila-2013.2.dev175.gbf1a399/manila/api/middleware/0000775000175000017500000000000012301410516021636 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/middleware/fault.py0000664000175000017500000000540112301410454023324 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.dec import webob.exc from manila.api.openstack import wsgi from manila.openstack.common import log as logging from manila import utils from manila import wsgi as base_wsgi LOG = logging.getLogger(__name__) class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def _error(self, inner, req): LOG.exception(_("Caught error: %s"), unicode(inner)) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: outer.explanation = '%s: %s' % (inner.__class__.__name__, unicode(inner)) return wsgi.Fault(outer) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req) manila-2013.2.dev175.gbf1a399/manila/api/middleware/sizelimit.py0000664000175000017500000000542512301410454024230 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Request Body limiting middleware. """ from oslo.config import cfg import webob.dec import webob.exc from manila.openstack.common import log as logging from manila import wsgi #default request size is 112k max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', default=114688, help='Max size for body of a request') CONF = cfg.CONF CONF.register_opt(max_request_body_size_opt) LOG = logging.getLogger(__name__) class LimitingReader(object): """Reader to limit the size of an incoming request.""" def __init__(self, data, limit): """ :param data: Underlying data object :param limit: maximum number of bytes the reader should allow """ self.data = data self.limit = limit self.bytes_read = 0 def __iter__(self): for chunk in self.data: self.bytes_read += len(chunk) if self.bytes_read > self.limit: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) else: yield chunk def read(self, i=None): result = self.data.read(i) self.bytes_read += len(result) if self.bytes_read > self.limit: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) return result class RequestBodySizeLimiter(wsgi.Middleware): """Add a 'manila.context' to WSGI environ.""" def __init__(self, *args, **kwargs): super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if req.content_length > CONF.osapi_max_request_body_size: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) if req.content_length is None and req.is_body_readable: limiter = LimitingReader(req.body_file, CONF.osapi_max_request_body_size) req.body_file = limiter return self.application manila-2013.2.dev175.gbf1a399/manila/api/middleware/auth.py0000664000175000017500000001313012301410454023150 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common Auth Middleware. """ import os from oslo.config import cfg import webob.dec import webob.exc from manila.api.openstack import wsgi from manila import context from manila.openstack.common import jsonutils from manila.openstack.common import log as logging from manila import wsgi as base_wsgi use_forwarded_for_opt = cfg.BoolOpt( 'use_forwarded_for', default=False, help='Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') CONF = cfg.CONF CONF.register_opt(use_forwarded_for_opt) LOG = logging.getLogger(__name__) def pipeline_factory(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" pipeline = local_conf[CONF.auth_strategy] if not CONF.api_rate_limit: limit_name = CONF.auth_strategy + '_nolimit' pipeline = local_conf.get(limit_name, pipeline) pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app class InjectContext(base_wsgi.Middleware): """Add a 'manila.context' to WSGI environ.""" def __init__(self, context, *args, **kwargs): self.context = context super(InjectContext, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): req.environ['manila.context'] = self.context return self.application class ManilaKeystoneContext(base_wsgi.Middleware): """Make a request context from keystone headers""" @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): user_id = req.headers.get('X_USER') user_id = req.headers.get('X_USER_ID', user_id) if user_id is None: LOG.debug("Neither X_USER_ID nor X_USER found in request") return webob.exc.HTTPUnauthorized() # get the roles roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] if 'X_TENANT_ID' in req.headers: # This is the new header since Keystone went to ID/Name project_id = req.headers['X_TENANT_ID'] else: # This is for legacy compatibility project_id = req.headers['X_TENANT'] # Get the auth token auth_token = req.headers.get('X_AUTH_TOKEN', req.headers.get('X_STORAGE_TOKEN')) # Build a context, including the auth_token... remote_address = req.remote_addr if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) service_catalog = None if req.headers.get('X_SERVICE_CATALOG') is not None: try: catalog_header = req.headers.get('X_SERVICE_CATALOG') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) ctx = context.RequestContext(user_id, project_id, roles=roles, auth_token=auth_token, remote_address=remote_address, service_catalog=service_catalog) req.environ['manila.context'] = ctx return self.application class NoAuthMiddleware(base_wsgi.Middleware): """Return a fake token if one isn't specified.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if 'X-Auth-Token' not in req.headers: user_id = req.headers.get('X-Auth-User', 'admin') project_id = req.headers.get('X-Auth-Project-Id', 'admin') os_url = os.path.join(req.url, project_id) res = webob.Response() # NOTE(vish): This is expecting and returning Auth(1.1), whereas # keystone uses 2.0 auth. We should probably allow # 2.0 auth here as well. res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) res.headers['X-Server-Management-Url'] = os_url res.content_type = 'text/plain' res.status = '204' return res token = req.headers['X-Auth-Token'] user_id, _sep, project_id = token.partition(':') project_id = project_id or user_id remote_address = getattr(req, 'remote_address', '127.0.0.1') if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, is_admin=True, remote_address=remote_address) req.environ['manila.context'] = ctx return self.application manila-2013.2.dev175.gbf1a399/manila/api/middleware/__init__.py0000664000175000017500000000124212301410454023747 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/api/contrib/0000775000175000017500000000000012301410516021161 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/contrib/extended_quotas.py0000664000175000017500000000201512301410454024726 0ustar chuckchuck00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import extensions class Extended_quotas(extensions.ExtensionDescriptor): """Adds ability for admins to delete quota and optionally force the update Quota command. """ name = "ExtendedQuotas" alias = "os-extended-quotas" namespace = ("http://docs.openstack.org/compute/ext/extended_quotas" "/api/v1.1") updated = "2013-06-09T00:00:00+00:00" manila-2013.2.dev175.gbf1a399/manila/api/contrib/quota_classes.py0000664000175000017500000000657512301410454024417 0ustar chuckchuck00000000000000# Copyright 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from manila.api import extensions from manila.api.openstack import wsgi from manila.api import xmlutil from manila import db from manila import exception from manila import quota QUOTAS = quota.QUOTAS authorize = extensions.extension_authorizer('share', 'quota_classes') class QuotaClassTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('quota_class_set', selector='quota_class_set') root.set('id') for resource in QUOTAS.resources: elem = xmlutil.SubTemplateElement(root, resource) elem.text = resource return xmlutil.MasterTemplate(root, 1) class QuotaClassSetsController(object): def _format_quota_set(self, quota_class, quota_set): """Convert the quota object to a result dict""" result = dict(id=str(quota_class)) for resource in QUOTAS.resources: result[resource] = quota_set[resource] return dict(quota_class_set=result) @wsgi.serializers(xml=QuotaClassTemplate) def show(self, req, id): context = req.environ['manila.context'] authorize(context) try: db.sqlalchemy.api.authorize_quota_class_context(context, id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() return self._format_quota_set(id, QUOTAS.get_class_quotas(context, id)) @wsgi.serializers(xml=QuotaClassTemplate) def update(self, req, id, body): context = req.environ['manila.context'] authorize(context) quota_class = id for key in body['quota_class_set'].keys(): if key in QUOTAS: value = int(body['quota_class_set'][key]) try: db.quota_class_update(context, quota_class, key, value) except exception.QuotaClassNotFound: db.quota_class_create(context, quota_class, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return {'quota_class_set': QUOTAS.get_class_quotas(context, quota_class)} class Quota_classes(extensions.ExtensionDescriptor): """Quota classes management support""" name = "QuotaClasses" alias = "os-quota-class-sets" namespace = ("http://docs.openstack.org/volume/ext/" "quota-classes-sets/api/v1.1") updated = "2012-03-12T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-class-sets', QuotaClassSetsController()) resources.append(res) return resources manila-2013.2.dev175.gbf1a399/manila/api/contrib/quotas.py0000664000175000017500000002410512301410454023052 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urlparse import webob from manila.api import extensions from manila.api.openstack import wsgi from manila.api import xmlutil from manila import db from manila.db.sqlalchemy import api as sqlalchemy_api from manila import exception from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging from manila.openstack.common import strutils from manila import quota QUOTAS = quota.QUOTAS LOG = logging.getLogger(__name__) NON_QUOTA_KEYS = ['tenant_id', 'id', 'force'] authorize_update = extensions.extension_authorizer('compute', 'quotas:update') authorize_show = extensions.extension_authorizer('compute', 'quotas:show') authorize_delete = extensions.extension_authorizer('compute', 'quotas:delete') class QuotaTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('quota_set', selector='quota_set') root.set('id') for resource in QUOTAS.resources: elem = xmlutil.SubTemplateElement(root, resource) elem.text = resource return xmlutil.MasterTemplate(root, 1) class QuotaSetsController(object): def __init__(self, ext_mgr): self.ext_mgr = ext_mgr def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict""" result = dict(id=str(project_id)) for resource in QUOTAS.resources: result[resource] = quota_set[resource] return dict(quota_set=result) def _validate_quota_limit(self, limit, minimum, maximum, force_update): # NOTE: -1 is a flag value for unlimited if limit < -1: msg = _("Quota limit must be -1 or greater.") raise webob.exc.HTTPBadRequest(explanation=msg) if ((limit < minimum and not force_update) and (maximum != -1 or (maximum == -1 and limit != -1))): msg = _("Quota limit must greater than %s.") % minimum raise webob.exc.HTTPBadRequest(explanation=msg) if maximum != -1 and limit > maximum: msg = _("Quota limit must less than %s.") % maximum raise webob.exc.HTTPBadRequest(explanation=msg) def _get_quotas(self, context, id, user_id=None, usages=False): if user_id: values = QUOTAS.get_user_quotas(context, id, user_id, usages=usages) else: values = QUOTAS.get_project_quotas(context, id, usages=usages) if usages: return values else: return dict((k, v['limit']) for k, v in values.items()) @wsgi.serializers(xml=QuotaTemplate) def show(self, req, id): context = req.environ['manila.context'] authorize_show(context) params = urlparse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = None if self.ext_mgr.is_loaded('os-user-quotas'): user_id = params.get('user_id', [None])[0] try: sqlalchemy_api.authorize_project_context(context, id) return self._format_quota_set(id, self._get_quotas(context, id, user_id=user_id)) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() @wsgi.serializers(xml=QuotaTemplate) def update(self, req, id, body): context = req.environ['manila.context'] authorize_update(context) project_id = id bad_keys = [] # By default, we can force update the quota if the extended # is not loaded force_update = True extended_loaded = False if self.ext_mgr.is_loaded('os-extended-quotas'): # force optional has been enabled, the default value of # force_update need to be changed to False extended_loaded = True force_update = False user_id = None if self.ext_mgr.is_loaded('os-user-quotas'): # Update user quotas only if the extended is loaded params = urlparse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] try: settable_quotas = QUOTAS.get_settable_quotas(context, project_id, user_id=user_id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() for key, value in body['quota_set'].items(): if (key not in QUOTAS and key not in NON_QUOTA_KEYS): bad_keys.append(key) continue if key == 'force' and extended_loaded: # only check the force optional when the extended has # been loaded force_update = strutils.bool_from_string(value) elif key not in NON_QUOTA_KEYS and value: try: value = int(value) except (ValueError, TypeError): msg = _("Quota '%(value)s' for %(key)s should be " "integer.") % {'value': value, 'key': key} LOG.warn(msg) raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug(_("force update quotas: %s") % force_update) if len(bad_keys) > 0: msg = _("Bad key(s) %s in quota_set") % ",".join(bad_keys) raise webob.exc.HTTPBadRequest(explanation=msg) try: quotas = self._get_quotas(context, id, user_id=user_id, usages=True) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() for key, value in body['quota_set'].items(): if key in NON_QUOTA_KEYS or (not value and value != 0): continue # validate whether already used and reserved exceeds the new # quota, this check will be ignored if admin want to force # update try: value = int(value) except (ValueError, TypeError): msg = _("Quota '%(value)s' for %(key)s should be " "integer.") % {'value': value, 'key': key} LOG.warn(msg) raise webob.exc.HTTPBadRequest(explanation=msg) if force_update is not True and value >= 0: quota_value = quotas.get(key) if quota_value and quota_value['limit'] >= 0: quota_used = (quota_value['in_use'] + quota_value['reserved']) LOG.debug(_("Quota %(key)s used: %(quota_used)s, " "value: %(value)s."), {'key': key, 'quota_used': quota_used, 'value': value}) if quota_used > value: msg = (_("Quota value %(value)s for %(key)s are " "greater than already used and reserved " "%(quota_used)s") % {'value': value, 'key': key, 'quota_used': quota_used}) raise webob.exc.HTTPBadRequest(explanation=msg) minimum = settable_quotas[key]['minimum'] maximum = settable_quotas[key]['maximum'] self._validate_quota_limit(value, minimum, maximum, force_update) try: db.quota_create(context, project_id, key, value, user_id=user_id) except exception.QuotaExists: db.quota_update(context, project_id, key, value, user_id=user_id) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return {'quota_set': self._get_quotas(context, id, user_id=user_id)} @wsgi.serializers(xml=QuotaTemplate) def defaults(self, req, id): context = req.environ['manila.context'] authorize_show(context) return self._format_quota_set(id, QUOTAS.get_defaults(context)) def delete(self, req, id): if self.ext_mgr.is_loaded('os-extended-quotas'): context = req.environ['manila.context'] authorize_delete(context) params = urlparse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] if user_id and not self.ext_mgr.is_loaded('os-user-quotas'): raise webob.exc.HTTPNotFound() try: sqlalchemy_api.authorize_project_context(context, id) if user_id: QUOTAS.destroy_all_by_project_and_user(context, id, user_id) else: QUOTAS.destroy_all_by_project(context, id) return webob.Response(status_int=202) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() raise webob.exc.HTTPNotFound() class Quotas(extensions.ExtensionDescriptor): """Quotas management support""" name = "Quotas" alias = "os-quota-sets" namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1" updated = "2011-08-08T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-sets', QuotaSetsController(self.ext_mgr), member_actions={'defaults': 'GET'}) resources.append(res) return resources manila-2013.2.dev175.gbf1a399/manila/api/contrib/admin_actions.py0000664000175000017500000001030112301410454024337 0ustar chuckchuck00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from webob import exc from manila.api import extensions from manila.api.openstack import wsgi from manila import db from manila import exception from manila.openstack.common import log as logging from manila.openstack.common import strutils from manila import share LOG = logging.getLogger(__name__) class AdminController(wsgi.Controller): """Abstract base class for AdminControllers.""" collection = None valid_status = set([ 'creating', 'available', 'deleting', 'error', 'error_deleting', ]) def __init__(self, *args, **kwargs): super(AdminController, self).__init__(*args, **kwargs) self.resource_name = self.collection.rstrip('s') self.share_api = share.API() def _update(self, *args, **kwargs): raise NotImplementedError() def _get(self, *args, **kwargs): raise NotImplementedError() def _delete(self, *args, **kwargs): raise NotImplementedError() def validate_update(self, body): update = {} try: update['status'] = body['status'] except (TypeError, KeyError): raise exc.HTTPBadRequest(explanation="Must specify 'status'") if update['status'] not in self.valid_status: expl = "Invalid state. Valid states: " +\ ', '.join(self.valid_status) + '.' raise exc.HTTPBadRequest(explanation=expl) return update def authorize(self, context, action_name): action = '%s_admin_actions:%s' % (self.resource_name, action_name) extensions.extension_authorizer('share', action)(context) @wsgi.action('os-reset_status') def _reset_status(self, req, id, body): """Reset status on the resource.""" context = req.environ['manila.context'] self.authorize(context, 'reset_status') update = self.validate_update(body['os-reset_status']) msg = _("Updating %(resource)s '%(id)s' with '%(update)r'") LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) try: self._update(context, id, update) except exception.NotFound as e: raise exc.HTTPNotFound(e) return webob.Response(status_int=202) class ShareAdminController(AdminController): """AdminController for Shares.""" collection = 'shares' def _update(self, *args, **kwargs): db.share_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.share_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.share_api.delete(*args, **kwargs) class SnapshotAdminController(AdminController): """AdminController for Snapshots.""" collection = 'snapshots' def _update(self, *args, **kwargs): db.share_snapshot_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.share_api.get_snapshot(*args, **kwargs) def _delete(self, *args, **kwargs): return self.share_api.delete_snapshot(*args, **kwargs) class Admin_actions(extensions.ExtensionDescriptor): """Enable admin actions.""" name = "AdminActions" alias = "os-admin-actions" namespace = "http://docs.openstack.org/share/ext/admin-actions/api/v1.1" updated = "2012-08-25T00:00:00+00:00" def get_controller_extensions(self): exts = [] for class_ in (ShareAdminController, SnapshotAdminController): controller = class_() extension = extensions.ControllerExtension( self, class_.collection, controller) exts.append(extension) return exts manila-2013.2.dev175.gbf1a399/manila/api/contrib/__init__.py0000664000175000017500000000240012301410454023267 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contrib contains extensions that are shipped with manila. It can't be called 'extensions' because that causes namespacing problems. """ from manila.api import extensions from manila.openstack.common import log as logging from oslo.config import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) def standard_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) def select_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, CONF.osapi_share_ext_list) manila-2013.2.dev175.gbf1a399/manila/api/contrib/user_quotas.py0000664000175000017500000000176112301410454024113 0ustar chuckchuck00000000000000# Copyright 2013 OpenStack Foundation # Author: Andrei Ostapenko # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import extensions class User_quotas(extensions.ExtensionDescriptor): """Project user quota support.""" name = "UserQuotas" alias = "os-user-quotas" namespace = ("http://docs.openstack.org/compute/ext/user_quotas" "/api/v1.1") updated = "2013-07-18T00:00:00+00:00" manila-2013.2.dev175.gbf1a399/manila/api/contrib/services.py0000664000175000017500000001066312301410454023365 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from manila.api import extensions from manila.api.openstack import wsgi from manila.api import xmlutil from manila import db from manila import exception from manila.openstack.common import log as logging from manila.openstack.common import timeutils from manila import utils LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('share', 'services') class ServicesIndexTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('services') elem = xmlutil.SubTemplateElement(root, 'service', selector='services') elem.set('binary') elem.set('host') elem.set('zone') elem.set('status') elem.set('state') elem.set('update_at') return xmlutil.MasterTemplate(root, 1) class ServicesUpdateTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('host') root.set('host') root.set('service') root.set('disabled') return xmlutil.MasterTemplate(root, 1) class ServiceController(object): @wsgi.serializers(xml=ServicesIndexTemplate) def index(self, req): """ Return a list of all running services. Filter by host & service name. """ context = req.environ['manila.context'] authorize(context) now = timeutils.utcnow() services = db.service_get_all(context) host = '' if 'host' in req.GET: host = req.GET['host'] service = '' if 'service' in req.GET: service = req.GET['service'] if host: services = [s for s in services if s['host'] == host] if service: services = [s for s in services if s['binary'] == service] svcs = [] for svc in services: delta = now - (svc['updated_at'] or svc['created_at']) alive = abs(utils.total_seconds(delta)) art = (alive and "up") or "down" active = 'enabled' if svc['disabled']: active = 'disabled' svcs.append({"binary": svc['binary'], 'host': svc['host'], 'zone': svc['availability_zone'], 'status': active, 'state': art, 'updated_at': svc['updated_at']}) return {'services': svcs} @wsgi.serializers(xml=ServicesUpdateTemplate) def update(self, req, id, body): """Enable/Disable scheduling for a service""" context = req.environ['manila.context'] authorize(context) if id == "enable": disabled = False elif id == "disable": disabled = True else: raise webob.exc.HTTPNotFound("Unknown action") try: host = body['host'] service = body['service'] except (TypeError, KeyError): raise webob.exc.HTTPBadRequest() try: svc = db.service_get_by_args(context, host, service) if not svc: raise webob.exc.HTTPNotFound('Unknown service') db.service_update(context, svc['id'], {'disabled': disabled}) except exception.ServiceNotFound: raise webob.exc.HTTPNotFound("service not found") return {'host': host, 'service': service, 'disabled': disabled} class Services(extensions.ExtensionDescriptor): """Services support""" name = "Services" alias = "os-services" namespace = "http://docs.openstack.org/volume/ext/services/api/v2" updated = "2012-10-28T00:00:00-00:00" def get_resources(self): resources = [] resource = extensions.ResourceExtension('os-services', ServiceController()) resources.append(resource) return resources manila-2013.2.dev175.gbf1a399/manila/api/contrib/share_actions.py0000664000175000017500000001361212301410454024361 0ustar chuckchuck00000000000000# Copyright 2013 NetApp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import webob from manila.api import extensions from manila.api.openstack import wsgi from manila.api import xmlutil from manila import exception from manila import share authorize = extensions.extension_authorizer('share', 'services') class ShareAccessTemplate(xmlutil.TemplateBuilder): """XML Template for share access management methods.""" def construct(self): root = xmlutil.TemplateElement('access', selector='access') root.set("share_id") root.set("deleted") root.set("created_at") root.set("updated_at") root.set("access_type") root.set("access_to") root.set("state") root.set("deleted_at") root.set("id") return xmlutil.MasterTemplate(root, 1) class ShareAccessListTemplate(xmlutil.TemplateBuilder): """XML Template for share access list.""" def construct(self): root = xmlutil.TemplateElement('access_list') elem = xmlutil.SubTemplateElement(root, 'share', selector='access_list') elem.set("state") elem.set("id") elem.set("access_type") elem.set("access_to") return xmlutil.MasterTemplate(root, 1) class ShareActionsController(wsgi.Controller): def __init__(self, *args, **kwargs): super(ShareActionsController, self).__init__(*args, **kwargs) self.share_api = share.API() @staticmethod def _validate_username(access): valid_useraname_re = '[\w\.\-_\`;\'\{\}\[\]]{4,32}$' username = access if not re.match(valid_useraname_re, username): exc_str = ('Invalid user or group name. Must be 4-32 chars long ' 'and consist of alfanum and ]{.-_\'`;}[') raise webob.exc.HTTPBadRequest(explanation=exc_str) @staticmethod def _validate_ip_range(ip_range): ip_range = ip_range.split('/') exc_str = ('Supported ip format examples:\n' '\t10.0.0.2, 10.0.0.0/24') if len(ip_range) > 2: raise webob.exc.HTTPBadRequest(explanation=exc_str) if len(ip_range) == 2: try: prefix = int(ip_range[1]) if prefix < 0 or prefix > 32: raise ValueError() except ValueError: msg = 'IP prefix should be in range from 0 to 32' raise webob.exc.HTTPBadRequest(explanation=msg) ip_range = ip_range[0].split('.') if len(ip_range) != 4: raise webob.exc.HTTPBadRequest(explanation=exc_str) for item in ip_range: try: if 0 <= int(item) <= 255: continue raise ValueError() except ValueError: raise webob.exc.HTTPBadRequest(explanation=exc_str) @wsgi.action('os-allow_access') @wsgi.serializers(xml=ShareAccessTemplate) def _allow_access(self, req, id, body): """Add share access rule.""" context = req.environ['manila.context'] share = self.share_api.get(context, id) access_type = body['os-allow_access']['access_type'] access_to = body['os-allow_access']['access_to'] if access_type == 'ip': self._validate_ip_range(access_to) elif access_type == 'sid': self._validate_username(access_to) else: exc_str = "Only 'ip' or 'sid' access types are supported" raise webob.exc.HTTPBadRequest(explanation=exc_str) try: access = self.share_api.allow_access( context, share, access_type, access_to) except exception.ShareAccessExists as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) return {'access': access} @wsgi.action('os-deny_access') @wsgi.serializers(xml=ShareAccessTemplate) def _deny_access(self, req, id, body): """Remove access rule.""" context = req.environ['manila.context'] access_id = body['os-deny_access']['access_id'] try: access = self.share_api.access_get(context, access_id) if access.share_id != id: raise exception.NotFound() share = self.share_api.get(context, id) except exception.NotFound, error: raise webob.exc.HTTPNotFound(explanation=unicode(error)) self.share_api.deny_access(context, share, access) return webob.Response(status_int=202) @wsgi.action('os-access_list') @wsgi.serializers(xml=ShareAccessListTemplate) def _access_list(self, req, id, body): """list access rules.""" context = req.environ['manila.context'] share = self.share_api.get(context, id) access_list = self.share_api.access_get_all(context, share) return {'access_list': access_list} # def create_resource(): # return wsgi.Resource(ShareActionsController()) class Share_actions(extensions.ExtensionDescriptor): """Enable share actions.""" name = 'ShareActions' alias = 'share-actions' namespace = '' updated = '2012-08-14T00:00:00+00:00' def get_controller_extensions(self): controller = ShareActionsController() extension = extensions.ControllerExtension(self, 'shares', controller) return [extension] manila-2013.2.dev175.gbf1a399/manila/api/extensions.py0000664000175000017500000003227112301410454022300 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import webob.dec import webob.exc import manila.api.openstack from manila.api.openstack import wsgi from manila.api import xmlutil from manila import exception from manila.openstack.common import exception as common_exception from manila.openstack.common import importutils from manila.openstack.common import log as logging import manila.policy from oslo.config import cfg LOG = logging.getLogger(__name__) CONF = cfg.CONF class ExtensionDescriptor(object): """Base class that defines the contract for extensions. Note that you don't have to derive from this class to have a valid extension; it is purely a convenience. """ # The name of the extension, e.g., 'Fox In Socks' name = None # The alias for the extension, e.g., 'FOXNSOX' alias = None # Description comes from the docstring for the class # The XML namespace for the extension, e.g., # 'http://www.fox.in.socks/api/ext/pie/v1.0' namespace = None # The timestamp when the extension was last updated, e.g., # '2011-01-22T13:25:27-06:00' updated = None def __init__(self, ext_mgr): """Register extension with the extension manager.""" ext_mgr.register(self) self.ext_mgr = ext_mgr def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_controller_extensions(self): """List of extensions.ControllerExtension extension objects. Controller extensions are used to extend existing controllers. """ controller_exts = [] return controller_exts @classmethod def nsmap(cls): """Synthesize a namespace map from extension.""" # Start with a base nsmap nsmap = ext_nsmap.copy() # Add the namespace for the extension nsmap[cls.alias] = cls.namespace return nsmap @classmethod def xmlname(cls, name): """Synthesize element and attribute names.""" return '{%s}%s' % (cls.namespace, name) def make_ext(elem): elem.set('name') elem.set('namespace') elem.set('alias') elem.set('updated') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' xmlutil.make_links(elem, 'links') ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} class ExtensionTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('extension', selector='extension') make_ext(root) return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) class ExtensionsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('extensions') elem = xmlutil.SubTemplateElement(root, 'extension', selector='extensions') make_ext(elem) return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager super(ExtensionsResource, self).__init__(None) def _translate(self, ext): ext_data = {} ext_data['name'] = ext.name ext_data['alias'] = ext.alias ext_data['description'] = ext.__doc__ ext_data['namespace'] = ext.namespace ext_data['updated'] = ext.updated ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data @wsgi.serializers(xml=ExtensionsTemplate) def index(self, req): extensions = [] for _alias, ext in self.extension_manager.extensions.iteritems(): extensions.append(self._translate(ext)) return dict(extensions=extensions) @wsgi.serializers(xml=ExtensionTemplate) def show(self, req, id): try: # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions[id] except KeyError: raise webob.exc.HTTPNotFound() return dict(extension=self._translate(ext)) def delete(self, req, id): raise webob.exc.HTTPNotFound() def create(self, req): raise webob.exc.HTTPNotFound() class ExtensionManager(object): """Load extensions from the configured extension path. See manila/tests/api/extensions/foxinsocks/extension.py for an example extension implementation. """ def __init__(self): LOG.audit(_('Initializing extension manager.')) self.cls_list = CONF.osapi_share_extension self.extensions = {} self._load_extensions() def is_loaded(self, alias): return alias in self.extensions def register(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.alias LOG.audit(_('Loaded extension: %s'), alias) if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) self.extensions[alias] = ext def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', ExtensionsResource(self))) for ext in self.extensions.values(): try: resources.extend(ext.get_resources()) except AttributeError: # NOTE(dprince): Extension aren't required to have resource # extensions pass return resources def get_controller_extensions(self): """Returns a list of ControllerExtension objects.""" controller_exts = [] for ext in self.extensions.values(): try: get_ext_method = ext.get_controller_extensions except AttributeError: # NOTE(Vek): Extensions aren't required to have # controller extensions continue controller_exts.extend(get_ext_method()) return controller_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug(_('Ext name: %s'), extension.name) LOG.debug(_('Ext alias: %s'), extension.alias) LOG.debug(_('Ext description: %s'), ' '.join(extension.__doc__.strip().split())) LOG.debug(_('Ext namespace: %s'), extension.namespace) LOG.debug(_('Ext updated: %s'), extension.updated) except AttributeError as ex: LOG.exception(_("Exception loading extension: %s"), unicode(ex)) return False return True def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug(_("Loading extension %s"), ext_factory) # Load the factory factory = importutils.import_class(ext_factory) # Call it LOG.debug(_("Calling extension factory %s"), ext_factory) factory(self) def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) # NOTE(thingee): Backwards compat for the old extension loader path. # We can drop this post-grizzly in the H release. old_contrib_path = ('manila.api.openstack.share.contrib.' 'standard_extensions') new_contrib_path = 'manila.api.contrib.standard_extensions' if old_contrib_path in extensions: LOG.warn(_('osapi_share_extension is set to deprecated path: %s'), old_contrib_path) LOG.warn(_('Please set your flag or manila.conf settings for ' 'osapi_share_extension to: %s'), new_contrib_path) extensions = [e.replace(old_contrib_path, new_contrib_path) for e in extensions] for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warn(_('Failed to load extension %(ext_factory)s: ' '%(exc)s') % locals()) class ControllerExtension(object): """Extend core controllers of manila OpenStack API. Provide a way to extend existing manila OpenStack API core controllers. """ def __init__(self, extension, collection, controller): self.extension = extension self.collection = collection self.controller = controller class ResourceExtension(object): """Add top level resources to the OpenStack API in manila.""" def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, custom_routes_fn=None): if not collection_actions: collection_actions = {} if not member_actions: member_actions = {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions self.custom_routes_fn = custom_routes_fn def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != '.py' or root == '__init__': continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warn(_('Failed to load extension %(classpath)s: ' '%(exc)s') % locals()) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) except common_exception.NotFound: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warn(_('Failed to load extension %(ext_name)s: ' '%(exc)s') % locals()) # Update the list of directories we'll explore... dirnames[:] = subdirs def extension_authorizer(api_name, extension_name): def authorize(context, target=None): if target is None: target = {'project_id': context.project_id, 'user_id': context.user_id} action = '%s_extension:%s' % (api_name, extension_name) manila.policy.enforce(context, action, target) return authorize def soft_extension_authorizer(api_name, extension_name): hard_authorize = extension_authorizer(api_name, extension_name) def authorize(context): try: hard_authorize(context) return True except exception.NotAuthorized: return False return authorize manila-2013.2.dev175.gbf1a399/manila/api/urlmap.py0000664000175000017500000002453212301410454021402 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import paste.urlmap import re import urllib2 from manila.api.openstack import wsgi from manila.openstack.common import log as logging _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile( r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) LOG = logging.getLogger(__name__) def unquote_header_value(value): """Unquotes a header value. This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] return value def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list` """ result = [] for item in urllib2.parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_options_header(value): """Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('Content-Type: text/html; mimetype=text/html') ('Content-Type:', {'mimetype': 'text/html'}) :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value) yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = parts.next()[0] extra = dict(parts) return name, extra class Accept(object): def __init__(self, value): self._content_types = [parse_options_header(v) for v in parse_list_header(value)] def best_match(self, supported_content_types): # FIXME: Should we have a more sophisticated matching algorithm that # takes into account the version as well? best_quality = -1 best_content_type = None best_params = {} best_match = '*/*' for content_type in supported_content_types: for content_mask, params in self._content_types: try: quality = float(params.get('q', 1)) except ValueError: continue if quality < best_quality: continue elif best_quality == quality: if best_match.count('*') <= content_mask.count('*'): continue if self._match_mask(content_mask, content_type): best_quality = quality best_content_type = content_type best_params = params best_match = content_mask return best_content_type, best_params def content_type_params(self, best_content_type): """Find parameters in Accept header for given content type.""" for content_type, params in self._content_types: if best_content_type == content_type: return params return {} def _match_mask(self, mask, content_type): if '*' not in mask: return content_type == mask if mask == '*/*': return True mask_major = mask[:-2] content_type_major = content_type.split('/', 1)[0] return content_type_major == mask_major def urlmap_factory(loader, global_conf, **local_conf): if 'not_found_app' in local_conf: not_found_app = local_conf.pop('not_found_app') else: not_found_app = global_conf.get('not_found_app') if not_found_app: not_found_app = loader.get_app(not_found_app, global_conf=global_conf) urlmap = URLMap(not_found_app=not_found_app) for path, app_name in local_conf.items(): path = paste.urlmap.parse_path_expression(path) app = loader.get_app(app_name, global_conf=global_conf) urlmap[path] = app return urlmap class URLMap(paste.urlmap.URLMap): def _match(self, host, port, path_info): """Find longest match for a given URL path.""" for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url return None, None def _set_script_name(self, app, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url return app(environ, start_response) return wrap def _munge_path(self, app, path_info, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url environ['PATH_INFO'] = path_info[len(app_url):] return app(environ, start_response) return wrap def _path_strategy(self, host, port, path_info): """Check path suffix for MIME type and path prefix for API version.""" mime_type = app = app_url = None parts = path_info.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: mime_type = possible_type parts = path_info.split('/') if len(parts) > 1: possible_app, possible_app_url = self._match(host, port, path_info) # Don't use prefix if it ends up matching default if possible_app and possible_app_url: app_url = possible_app_url app = self._munge_path(possible_app, path_info, app_url) return mime_type, app, app_url def _content_type_strategy(self, host, port, environ): """Check Content-Type header for API version.""" app = None params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return app def _accept_strategy(self, host, port, environ, supported_content_types): """Check Accept header for best matching MIME type and API version.""" accept = Accept(environ.get('HTTP_ACCEPT', '')) app = None # Find the best match in the Accept header mime_type, params = accept.best_match(supported_content_types) if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return mime_type, app def __call__(self, environ, start_response): host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() if ':' in host: host, port = host.split(':', 1) else: if environ['wsgi.url_scheme'] == 'http': port = '80' else: port = '443' path_info = environ['PATH_INFO'] path_info = self.normalize_url(path_info, False)[1] # The MIME type for the response is determined in one of two ways: # 1) URL path suffix (eg /servers/detail.json) # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) # The API version is determined in one of three ways: # 1) URL path prefix (eg /v1.1/tenant/servers/detail) # 2) Content-Type header (eg application/json;version=1.1) # 3) Accept header (eg application/json;q=0.8;version=1.1) supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) mime_type, app, app_url = self._path_strategy(host, port, path_info) # Accept application/atom+xml for the index query of each API # version mount point as well as the root index if (app_url and app_url + '/' == path_info) or path_info == '/': supported_content_types.append('application/atom+xml') if not app: app = self._content_type_strategy(host, port, environ) if not mime_type or not app: possible_mime_type, possible_app = self._accept_strategy( host, port, environ, supported_content_types) if possible_mime_type and not mime_type: mime_type = possible_mime_type if possible_app and not app: app = possible_app if not mime_type: mime_type = 'application/json' if not app: # Didn't match a particular version, probably matches default app, app_url = self._match(host, port, path_info) if app: app = self._munge_path(app, path_info, app_url) if app: environ['manila.best_content_type'] = mime_type return app(environ, start_response) environ['paste.urlmap_object'] = self return self.not_found_application(environ, start_response) manila-2013.2.dev175.gbf1a399/manila/api/views/0000775000175000017500000000000012301410516020656 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/api/views/versions.py0000664000175000017500000000521112301410454023100 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os def get_view_builder(req): base_url = req.application_url return ViewBuilder(base_url) class ViewBuilder(object): def __init__(self, base_url): """ :param base_url: url of the root wsgi application """ self.base_url = base_url def build_choices(self, VERSIONS, req): version_objs = [] for version in VERSIONS: version = VERSIONS[version] version_objs.append({ "id": version['id'], "status": version['status'], "links": [{"rel": "self", "href": self.generate_href(req.path), }, ], "media-types": version['media-types'], }) return dict(choices=version_objs) def build_versions(self, versions): version_objs = [] for version in sorted(versions.keys()): version = versions[version] version_objs.append({ "id": version['id'], "status": version['status'], "updated": version['updated'], "links": self._build_links(version), }) return dict(versions=version_objs) def build_version(self, version): reval = copy.deepcopy(version) reval['links'].insert(0, { "rel": "self", "href": self.base_url.rstrip('/') + '/', }) return dict(version=reval) def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" href = self.generate_href() links = [{'rel': 'self', 'href': href, }, ] return links def generate_href(self, path=None): """Create an url that refers to a specific version_number.""" version_number = 'v1' if path: path = path.strip('/') return os.path.join(self.base_url, version_number, path) else: return os.path.join(self.base_url, version_number) + '/' manila-2013.2.dev175.gbf1a399/manila/api/views/shares.py0000664000175000017500000000603312301410454022520 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.openstack.common import log as logging class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'shares' def summary_list(self, request, shares): """Show a list of shares without many details.""" return self._list_view(self.summary, request, shares) def detail_list(self, request, shares): """Detailed view of a list of shares.""" return self._list_view(self.detail, request, shares) def summary(self, request, share): """Generic, non-detailed view of an share.""" return { 'share': { 'id': share.get('id'), 'name': share.get('display_name'), 'links': self._get_links(request, share['id']) } } def detail(self, request, share): """Detailed view of a single share.""" metadata = share.get('share_metadata') if metadata: metadata = dict((item['key'], item['value']) for item in metadata) else: metadata = {} return { 'share': { 'id': share.get('id'), 'size': share.get('size'), 'availability_zone': share.get('availability_zone'), 'created_at': share.get('created_at'), 'status': share.get('status'), 'name': share.get('display_name'), 'description': share.get('display_description'), 'snapshot_id': share.get('snapshot_id'), 'share_network_id': share.get('share_network_id'), 'share_proto': share.get('share_proto'), 'export_location': share.get('export_location'), 'metadata': metadata, 'links': self._get_links(request, share['id']) } } def _list_view(self, func, request, shares): """Provide a view for a list of shares.""" shares_list = [func(request, share)['share'] for share in shares] shares_links = self._get_collection_links(request, shares, self._collection_name) shares_dict = dict(shares=shares_list) if shares_links: shares_dict['shares_links'] = shares_links return shares_dict manila-2013.2.dev175.gbf1a399/manila/api/views/share_snapshots.py0000664000175000017500000000565312301410454024446 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.openstack.common import log as logging class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'snapshots' def summary_list(self, request, snapshots): """Show a list of share snapshots without many details.""" return self._list_view(self.summary, request, snapshots) def detail_list(self, request, snapshots): """Detailed view of a list of share snapshots.""" return self._list_view(self.detail, request, snapshots) def summary(self, request, snapshot): """Generic, non-detailed view of an share snapshot.""" return { 'snapshot': { 'id': snapshot.get('id'), 'name': snapshot.get('display_name'), 'links': self._get_links(request, snapshot['id']) } } def detail(self, request, snapshot): """Detailed view of a single share snapshot.""" return { 'snapshot': { 'id': snapshot.get('id'), 'share_id': snapshot.get('share_id'), 'share_size': snapshot.get('share_size'), 'created_at': snapshot.get('created_at'), 'status': snapshot.get('status'), 'name': snapshot.get('display_name'), 'description': snapshot.get('display_description'), 'size': snapshot.get('size'), 'share_proto': snapshot.get('share_proto'), 'export_location': snapshot.get('export_location'), 'links': self._get_links(request, snapshot['id']) } } def _list_view(self, func, request, snapshots): """Provide a view for a list of share snapshots.""" snapshots_list = [func(request, snapshot)['snapshot'] for snapshot in snapshots] snapshots_links = self._get_collection_links(request, snapshots, self._collection_name) snapshots_dict = {self._collection_name: snapshots_list} if snapshots_links: snapshots_dict['share_snapshots_links'] = snapshots_links return snapshots_dict manila-2013.2.dev175.gbf1a399/manila/api/views/__init__.py0000664000175000017500000000124212301410454022767 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/api/views/limits.py0000664000175000017500000000605012301410454022533 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from manila.openstack.common import timeutils class ViewBuilder(object): """OpenStack API base limits view builder.""" def build(self, rate_limits, absolute_limits): rate_limits = self._build_rate_limits(rate_limits) absolute_limits = self._build_absolute_limits(absolute_limits) output = { "limits": { "rate": rate_limits, "absolute": absolute_limits, }, } return output def _build_absolute_limits(self, absolute_limits): """Builder for absolute limits absolute_limits should be given as a dict of limits. For example: {"ram": 512, "gigabytes": 1024}. """ limit_names = { "gigabytes": ["maxTotalShareGigabytes"], "shares": ["maxTotalShares"], "snapshots": ["maxTotalSnapshots"] } limits = {} for name, value in absolute_limits.iteritems(): if name in limit_names and value is not None: for name in limit_names[name]: limits[name] = value return limits def _build_rate_limits(self, rate_limits): limits = [] for rate_limit in rate_limits: _rate_limit_key = None _rate_limit = self._build_rate_limit(rate_limit) # check for existing key for limit in limits: if (limit["uri"] == rate_limit["URI"] and limit["regex"] == rate_limit["regex"]): _rate_limit_key = limit break # ensure we have a key if we didn't find one if not _rate_limit_key: _rate_limit_key = { "uri": rate_limit["URI"], "regex": rate_limit["regex"], "limit": [], } limits.append(_rate_limit_key) _rate_limit_key["limit"].append(_rate_limit) return limits def _build_rate_limit(self, rate_limit): _get_utc = datetime.datetime.utcfromtimestamp next_avail = _get_utc(rate_limit["resetTime"]) return { "verb": rate_limit["verb"], "value": rate_limit["value"], "remaining": int(rate_limit["remaining"]), "unit": rate_limit["unit"], "next-available": timeutils.isotime(at=next_avail), } manila-2013.2.dev175.gbf1a399/manila/api/views/security_service.py0000664000175000017500000000545712301410454024633 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'security_services' def summary_list(self, request, security_services): """Show a list of security services without many details.""" return self._list_view(self.summary, request, security_services) def detail_list(self, request, security_services): """Detailed view of a list of security services.""" return self._list_view(self.detail, request, security_services) def summary(self, request, security_service): """Generic, non-detailed view of an security service.""" return { 'security_service': { 'id': security_service.get('id'), 'name': security_service.get('name'), 'status': security_service.get('status') } } def detail(self, request, security_service): """Detailed view of a single security service.""" return { 'security_service': { 'id': security_service.get('id'), 'name': security_service.get('name'), 'created_at': security_service.get('created_at'), 'updated_at': security_service.get('updated_at'), 'status': security_service.get('status'), 'description': security_service.get('description'), 'dns_ip': security_service.get('dns_ip'), 'server': security_service.get('server'), 'domain': security_service.get('domain'), 'sid': security_service.get('sid'), 'password': security_service.get('password'), 'type': security_service.get('type') } } def _list_view(self, func, request, security_services): """Provide a view for a list of security services.""" security_services_list = [func(request, service)['security_service'] for service in security_services] security_services_dict = dict(security_services=security_services_list) return security_services_dict manila-2013.2.dev175.gbf1a399/manila/api/views/share_networks.py0000664000175000017500000000411612301410454024271 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_networks' def build_share_network(self, share_network): """View of a share network.""" return {'share_network': self._build_share_network_view(share_network)} def build_share_networks(self, share_networks): return {'share_networks': [self._build_share_network_view(share_network) for share_network in share_networks]} def _build_share_network_view(self, share_network): return { 'id': share_network.get('id'), 'project_id': share_network.get('project_id'), 'created_at': share_network.get('created_at'), 'updated_at': share_network.get('updated_at'), 'neutron_net_id': share_network.get('neutron_net_id'), 'neutron_subnet_id': share_network.get('neutron_subnet_id'), 'network_type': share_network.get('network_type'), 'segmentation_id': share_network.get('segmentation_id'), 'cidr': share_network.get('cidr'), 'ip_version': share_network.get('ip_version'), 'name': share_network.get('name'), 'description': share_network.get('description'), 'status': share_network.get('status'), } manila-2013.2.dev175.gbf1a399/manila/test.py0000664000175000017500000002374312301410454020313 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ import functools import unittest import uuid import mox import nose.plugins.skip from oslo.config import cfg import stubout from manila.common import config from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.openstack.common import timeutils from manila import service from manila import tests from manila.tests import conf_fixture test_opts = [ cfg.StrOpt('sqlite_clean_db', default='clean.sqlite', help='File name of clean sqlite db'), cfg.BoolOpt('fake_tests', default=True, help='should we use everything for testing'), ] CONF = cfg.CONF CONF.register_opts(test_opts) LOG = logging.getLogger(__name__) class skip_test(object): """Decorator that skips a test.""" # TODO(tr3buchet): remember forever what comstud did here def __init__(self, msg): self.message = msg def __call__(self, func): @functools.wraps(func) def _skipper(*args, **kw): """Wrapped skipper function.""" raise nose.SkipTest(self.message) return _skipper class skip_if(object): """Decorator that skips a test if condition is true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): @functools.wraps(func) def _skipper(*args, **kw): """Wrapped skipper function.""" if self.condition: raise nose.SkipTest(self.message) func(*args, **kw) return _skipper class skip_unless(object): """Decorator that skips a test if condition is not true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): @functools.wraps(func) def _skipper(*args, **kw): """Wrapped skipper function.""" if not self.condition: raise nose.SkipTest(self.message) func(*args, **kw) return _skipper def skip_if_fake(func): """Decorator that skips a test if running in fake mode.""" def _skipper(*args, **kw): """Wrapped skipper function.""" if CONF.fake_tests: raise unittest.SkipTest('Test cannot be run in fake mode') else: return func(*args, **kw) return _skipper class TestingException(Exception): pass class TestCase(unittest.TestCase): """Test case base class for all unit tests.""" def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() tests.reset_db() # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() self.injected = [] self._services = [] CONF.set_override('fatal_exception_format_errors', True) def tearDown(self): """Runs after each test method to tear down test environment.""" try: self.mox.UnsetStubs() self.stubs.UnsetAll() self.stubs.SmartUnsetAll() self.mox.VerifyAll() super(TestCase, self).tearDown() finally: # Reset any overridden flags CONF.reset() # Stop any timers for x in self.injected: try: x.stop() except AssertionError: pass # Kill any services for x in self._services: try: x.kill() except Exception: pass # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__.keys() if k[0] != '_']: del self.__dict__[key] def flags(self, **kw): """Override flag variables for a test.""" for k, v in kw.iteritems(): CONF.set_override(k, v) def start_service(self, name, host=None, **kwargs): host = host and host or uuid.uuid4().hex kwargs.setdefault('host', host) kwargs.setdefault('binary', 'manila-%s' % name) svc = service.Service.create(**kwargs) svc.start() self._services.append(svc) return svc # Useful assertions def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested dictionaries appropriately. NOTE: If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. """ def raise_assertion(msg): d1str = str(d1) d2str = str(d2) base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' 'd2: %(d2str)s' % locals()) raise AssertionError(base_msg) d1keys = set(d1.keys()) d2keys = set(d2.keys()) if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys raise_assertion('Keys in d1 and not d2: %(d1only)s. ' 'Keys in d2 and not d1: %(d2only)s' % locals()) for key in d1keys: d1value = d1[key] d2value = d2[key] try: error = abs(float(d1value) - float(d2value)) within_tolerance = error <= tolerance except (ValueError, TypeError): # If both values aren't convertable to float, just ignore # ValueError if arg is a str, TypeError if it's something else # (like None) within_tolerance = False if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue elif approx_equal and within_tolerance: continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % locals()) def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) L2str = str(L2) base_msg = ('List of dictionaries do not match: %(msg)s ' 'L1: %(L1str)s L2: %(L2str)s' % locals()) raise AssertionError(base_msg) L1count = len(L1) L2count = len(L2) if L1count != L2count: raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' 'len(L2)=%(L2count)d' % locals()) for d1, d2 in zip(L1, L2): self.assertDictMatch(d1, d2, approx_equal=approx_equal, tolerance=tolerance) def assertSubDictMatch(self, sub_dict, super_dict): """Assert a sub_dict is subset of super_dict.""" self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys()))) for k, sub_value in sub_dict.items(): super_value = super_dict[k] if isinstance(sub_value, dict): self.assertSubDictMatch(sub_value, super_value) elif 'DONTCARE' in (sub_value, super_value): continue else: self.assertEqual(sub_value, super_value) def assertIn(self, a, b, *args, **kwargs): """Python < v2.7 compatibility. Assert 'a' in 'b'""" try: f = super(TestCase, self).assertIn except AttributeError: self.assertTrue(a in b, *args, **kwargs) else: f(a, b, *args, **kwargs) def assertNotIn(self, a, b, *args, **kwargs): """Python < v2.7 compatibility. Assert 'a' NOT in 'b'""" try: f = super(TestCase, self).assertNotIn except AttributeError: self.assertFalse(a in b, *args, **kwargs) else: f(a, b, *args, **kwargs) def assertIsInstance(self, a, b, *args, **kwargs): """Python < v2.7 compatibility.""" try: f = super(TestCase, self).assertIsInstance except AttributeError: self.assertTrue(isinstance(a, b)) else: f(a, b, *args, **kwargs) def assertIsNone(self, a, *args, **kwargs): """Python < v2.7 compatibility.""" try: f = super(TestCase, self).assertIsNone except AttributeError: self.assertTrue(a is None) else: f(a, *args, **kwargs) manila-2013.2.dev175.gbf1a399/manila/volume/0000775000175000017500000000000012301410516020257 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/volume/__init__.py0000664000175000017500000000236712301410454022401 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo.config.cfg import manila.openstack.common.importutils _volume_opts = [ oslo.config.cfg.StrOpt('volume_api_class', default='manila.volume.cinder.API', help='The full class name of the ' 'volume API class to use'), ] oslo.config.cfg.CONF.register_opts(_volume_opts) def API(): importutils = manila.openstack.common.importutils volume_api_class = oslo.config.cfg.CONF.volume_api_class cls = importutils.import_class(volume_api_class) return cls() manila-2013.2.dev175.gbf1a399/manila/volume/cinder.py0000664000175000017500000003227712301410454022111 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to volumes + cinder. """ import copy import sys from cinderclient import exceptions as cinder_exception from cinderclient import service_catalog from cinderclient.v1 import client as cinder_client from oslo.config import cfg from manila.db import base from manila import exception from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging cinder_opts = [ cfg.StrOpt('cinder_catalog_info', default='volume:cinder:publicURL', help='Info to match when looking for cinder in the service ' 'catalog. Format is : separated values of the form: ' '::'), cfg.StrOpt('os_region_name', help='region name of this node'), cfg.StrOpt('cinder_ca_certificates_file', help='Location of ca certificates file to use for cinder ' 'client requests.'), cfg.IntOpt('cinder_http_retries', default=3, help='Number of cinderclient retries on failed http calls'), cfg.BoolOpt('cinder_api_insecure', default=False, help='Allow to perform insecure SSL requests to cinder'), cfg.BoolOpt('cinder_cross_az_attach', default=True, help='Allow attach between instance and volume in different ' 'availability zones.'), cfg.StrOpt('cinder_admin_username', default='cinder', help='Cinder admin username'), cfg.StrOpt('cinder_admin_password', help='Cinder admin password'), cfg.StrOpt('cinder_admin_tenant_name', default='service', help='Cinder admin tenant name'), cfg.StrOpt('cinder_admin_auth_url', default='http://localhost:5000/v2.0', help='Identity service url') ] CONF = cfg.CONF CONF.register_opts(cinder_opts) LOG = logging.getLogger(__name__) def cinderclient(context): if context.is_admin and context.project_id is None: c = cinder_client.Client(CONF.cinder_admin_username, CONF.cinder_admin_password, CONF.cinder_admin_tenant_name, CONF.cinder_admin_auth_url) c.authenticate() return c compat_catalog = { 'access': {'serviceCatalog': context.service_catalog or []} } sc = service_catalog.ServiceCatalog(compat_catalog) info = CONF.cinder_catalog_info service_type, service_name, endpoint_type = info.split(':') # extract the region if set in configuration if CONF.os_region_name: attr = 'region' filter_value = CONF.os_region_name else: attr = None filter_value = None url = sc.url_for(attr=attr, filter_value=filter_value, service_type=service_type, service_name=service_name, endpoint_type=endpoint_type) LOG.debug(_('Cinderclient connection created using URL: %s') % url) c = cinder_client.Client(context.user_id, context.auth_token, project_id=context.project_id, auth_url=url, insecure=CONF.cinder_api_insecure, retries=CONF.cinder_http_retries, cacert=CONF.cinder_ca_certificates_file) # noauth extracts user_id:project_id from auth_token c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id, context.project_id) c.client.management_url = url return c def _untranslate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} d['id'] = vol.id d['status'] = vol.status d['size'] = vol.size d['availability_zone'] = vol.availability_zone d['created_at'] = vol.created_at d['attach_time'] = "" d['mountpoint'] = "" if vol.attachments: att = vol.attachments[0] d['attach_status'] = 'attached' d['instance_uuid'] = att['server_id'] d['mountpoint'] = att['device'] else: d['attach_status'] = 'detached' d['display_name'] = vol.display_name d['display_description'] = vol.display_description d['volume_type_id'] = vol.volume_type d['snapshot_id'] = vol.snapshot_id d['volume_metadata'] = {} for key, value in vol.metadata.items(): d['volume_metadata'][key] = value if hasattr(vol, 'volume_image_metadata'): d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata) return d def _untranslate_snapshot_summary_view(context, snapshot): """Maps keys for snapshots summary view.""" d = {} d['id'] = snapshot.id d['status'] = snapshot.status d['progress'] = snapshot.progress d['size'] = snapshot.size d['created_at'] = snapshot.created_at d['display_name'] = snapshot.display_name d['display_description'] = snapshot.display_description d['volume_id'] = snapshot.volume_id d['project_id'] = snapshot.project_id d['volume_size'] = snapshot.size return d def translate_volume_exception(method): """Transforms the exception for the volume but keeps its traceback intact. """ def wrapper(self, ctx, volume_id, *args, **kwargs): try: res = method(self, ctx, volume_id, *args, **kwargs) except cinder_exception.ClientException: exc_type, exc_value, exc_trace = sys.exc_info() if isinstance(exc_value, cinder_exception.NotFound): exc_value = exception.VolumeNotFound(volume_id=volume_id) elif isinstance(exc_value, cinder_exception.BadRequest): exc_value = exception.InvalidInput(reason=exc_value.message) raise exc_value, None, exc_trace return res return wrapper def translate_snapshot_exception(method): """Transforms the exception for the snapshot but keeps its traceback intact. """ def wrapper(self, ctx, snapshot_id, *args, **kwargs): try: res = method(self, ctx, snapshot_id, *args, **kwargs) except cinder_exception.ClientException: exc_type, exc_value, exc_trace = sys.exc_info() if isinstance(exc_value, cinder_exception.NotFound): exc_value = exception.\ VolumeSnapshotNotFound(snapshot_id=snapshot_id) raise exc_value, None, exc_trace return res return wrapper class API(base.Base): """API for interacting with the volume manager.""" @translate_volume_exception def get(self, context, volume_id): item = cinderclient(context).volumes.get(volume_id) return _untranslate_volume_summary_view(context, item) def get_all(self, context, search_opts={}): items = cinderclient(context).volumes.list(detailed=True, search_opts=search_opts) rval = [] for item in items: rval.append(_untranslate_volume_summary_view(context, item)) return rval def check_attached(self, context, volume): """Raise exception if volume in use.""" if volume['status'] != "in-use": msg = _("status must be 'in-use'") raise exception.InvalidVolume(reason=msg) def check_attach(self, context, volume, instance=None): if volume['status'] != "available": msg = _("status must be 'available'") raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("already attached") raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder_cross_az_attach: if instance['availability_zone'] != volume['availability_zone']: msg = _("Instance and volume not in same availability_zone") raise exception.InvalidVolume(reason=msg) def check_detach(self, context, volume): if volume['status'] == "available": msg = _("already detached") raise exception.InvalidVolume(reason=msg) @translate_volume_exception def reserve_volume(self, context, volume_id): cinderclient(context).volumes.reserve(volume_id) @translate_volume_exception def unreserve_volume(self, context, volume_id): cinderclient(context).volumes.unreserve(volume_id) @translate_volume_exception def begin_detaching(self, context, volume_id): cinderclient(context).volumes.begin_detaching(volume_id) @translate_volume_exception def roll_detaching(self, context, volume_id): cinderclient(context).volumes.roll_detaching(volume_id) @translate_volume_exception def attach(self, context, volume_id, instance_uuid, mountpoint): cinderclient(context).volumes.attach(volume_id, instance_uuid, mountpoint) @translate_volume_exception def detach(self, context, volume_id): cinderclient(context).volumes.detach(volume_id) @translate_volume_exception def initialize_connection(self, context, volume_id, connector): return cinderclient(context).volumes.initialize_connection(volume_id, connector) @translate_volume_exception def terminate_connection(self, context, volume_id, connector): return cinderclient(context).volumes.terminate_connection(volume_id, connector) def create(self, context, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, availability_zone=None): if snapshot is not None: snapshot_id = snapshot['id'] else: snapshot_id = None kwargs = dict(snapshot_id=snapshot_id, display_name=name, display_description=description, volume_type=volume_type, user_id=context.user_id, project_id=context.project_id, availability_zone=availability_zone, metadata=metadata, imageRef=image_id) try: item = cinderclient(context).volumes.create(size, **kwargs) return _untranslate_volume_summary_view(context, item) except cinder_exception.BadRequest as e: raise exception.InvalidInput(reason=e.message) @translate_volume_exception def delete(self, context, volume_id): cinderclient(context).volumes.delete(volume_id) @translate_volume_exception def update(self, context, volume_id, fields): raise NotImplementedError() def get_volume_encryption_metadata(self, context, volume_id): return cinderclient(context).volumes.get_encryption_metadata(volume_id) @translate_snapshot_exception def get_snapshot(self, context, snapshot_id): item = cinderclient(context).volume_snapshots.get(snapshot_id) return _untranslate_snapshot_summary_view(context, item) def get_all_snapshots(self, context, search_opts=None): items = cinderclient(context).volume_snapshots.list(detailed=True, search_opts=search_opts) rvals = [] for item in items: rvals.append(_untranslate_snapshot_summary_view(context, item)) return rvals @translate_volume_exception def create_snapshot(self, context, volume_id, name, description): item = cinderclient(context).volume_snapshots.create(volume_id, False, name, description) return _untranslate_snapshot_summary_view(context, item) @translate_volume_exception def create_snapshot_force(self, context, volume_id, name, description): item = cinderclient(context).volume_snapshots.create(volume_id, True, name, description) return _untranslate_snapshot_summary_view(context, item) @translate_snapshot_exception def delete_snapshot(self, context, snapshot_id): cinderclient(context).volume_snapshots.delete(snapshot_id) manila-2013.2.dev175.gbf1a399/manila/exception.py0000664000175000017500000003425512301410454021332 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Manila base exception handling. Includes decorator for re-raising Manila-type exceptions. SHOULD include dedicated exception logging. """ from oslo.config import cfg import webob.exc from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='make exception message format errors fatal'), ] CONF = cfg.CONF CONF.register_opts(exc_log_opts) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _('Unexpected error while running command.') if exit_code is None: exit_code = '-' message = _('%(description)s\nCommand: %(cmd)s\n' 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' 'Stderr: %(stderr)r') % locals() IOError.__init__(self, message) class Error(Exception): pass class DBError(Error): """Wraps an implementation specific exception.""" def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) def wrap_db_error(f): def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise InvalidUnicodeParameter() except Exception, e: LOG.exception(_('DB exception wrapped.')) raise DBError(e) _wrap.func_name = f.func_name return _wrap class ManilaException(Exception): """Base Manila Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) if CONF.fatal_exception_format_errors: raise e else: # at least get the core message out if something happened message = self.message self.msg = message super(ManilaException, self).__init__(message) class NetworkException(ManilaException): message = _("Exception due to network failure") class NetworkAllocationException(NetworkException): message = _("Failure during network allocation") class NetworkBadConfigurationException(NetworkException): message = _("Bad network configuration: %(reason)s") class GlanceConnectionFailed(ManilaException): message = _("Connection to glance failed") + ": %(reason)s" class NotAuthorized(ManilaException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class ImageNotAuthorized(ManilaException): message = _("Not authorized for image %(image_id)s.") class Invalid(ManilaException): message = _("Unacceptable parameters.") code = 400 class SfJsonEncodeFailure(ManilaException): message = _("Failed to load data into json format") class InvalidRequest(Invalid): message = _("The request is invalid.") class InvalidResults(Invalid): message = _("The results are invalid.") class InvalidInput(Invalid): message = _("Invalid input received") + ": %(reason)s" class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidUnicodeParameter(Invalid): message = _("Invalid Parameter: " "Unicode is not supported by the current database.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") class InvalidUUID(Invalid): message = _("Expected a uuid but received %(uuid).") class NotFound(ManilaException): message = _("Resource could not be found.") code = 404 safe = True class InUse(ManilaException): message = _("Resource is in use.") class ShareNetworkNotFound(NotFound): message = _("Network %(share_network_id)s could not be found.") class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.") class SchedulerHostWeigherNotFound(NotFound): message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): msg_fmt = _("Change would make usage less than 0 for the following " "resources: %(unders)s") class QuotaNotFound(NotFound): msg_fmt = _("Quota could not be found") class QuotaExists(ManilaException): msg_fmt = _("Quota exists for project %(project_id)s, " "resource %(resource)s") class QuotaResourceUnknown(QuotaNotFound): msg_fmt = _("Unknown quota resources %(unknown)s.") class ProjectUserQuotaNotFound(QuotaNotFound): msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s " "could not be found.") class ProjectQuotaNotFound(QuotaNotFound): msg_fmt = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): msg_fmt = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): msg_fmt = _("Quota usage for project %(project_id)s could not be found.") class ReservationNotFound(QuotaNotFound): msg_fmt = _("Quota reservation %(uuid)s could not be found.") class OverQuota(ManilaException): msg_fmt = _("Quota exceeded for resources: %(overs)s") class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): message = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class ClassNotFound(NotFound): message = _("Class %(class_name)s could not be found: %(exception)s") class NotAllowed(ManilaException): message = _("Action not allowed.") #TODO(bcwaldon): EOL this exception! class Duplicate(ManilaException): pass class KeyPairExists(Duplicate): message = _("Key pair %(key_name)s already exists.") class MigrationError(ManilaException): message = _("Migration error") + ": %(reason)s" class MalformedRequestBody(ManilaException): message = _("Malformed message body: %(reason)s") class ConfigNotFound(NotFound): message = _("Could not find config at %(path)s") class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") class NoValidHost(ManilaException): message = _("No valid host was found. %(reason)s") class WillNotSchedule(ManilaException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(ManilaException): message = _("Quota exceeded") + ": code=%(code)s" code = 413 headers = {'Retry-After': 0} safe = True class ShareSizeExceedsAvailableQuota(QuotaError): message = _("Requested share or snapshot exceeds " "allowed Gigabytes quota") class ShareSizeExceedsQuota(QuotaError): message = _("Maximum share/snapshot size exceeded") class ShareLimitExceeded(QuotaError): message = _("Maximum number of shares allowed (%(allowed)d) exceeded") class SnapshotLimitExceeded(QuotaError): message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") class Duplicate3PARHost(ManilaException): message = _("3PAR Host already exists: %(err)s. %(info)s") class Invalid3PARDomain(ManilaException): message = _("Invalid 3PAR Domain: %(err)s") class SolidFireAPIException(ManilaException): message = _("Bad response from SolidFire API") class SolidFireAPIDataException(SolidFireAPIException): message = _("Error in SolidFire API response: data=%(data)s") class UnknownCmd(Invalid): message = _("Unknown or unsupported command %(cmd)s") class MalformedResponse(Invalid): message = _("Malformed response to command %(cmd)s: %(reason)s") class BadHTTPResponseStatus(ManilaException): message = _("Bad HTTP response status %(status)s") class FailedCmdWithDump(ManilaException): message = _("Operation failed with status=%(status)s. Full dump: %(data)s") class ShareBackendAPIException(ManilaException): message = _("Bad or unexpected response from the storage share " "backend API: %(data)s") class NfsException(ManilaException): message = _("Unknown NFS exception") class NfsNoSharesMounted(NotFound): message = _("No mounted NFS shares found") class NfsNoSuitableShareFound(NotFound): message = _("There is no share which can host %(share_size)sG") class GlusterfsException(ManilaException): message = _("Unknown Gluster exception") class GlusterfsNoSharesMounted(NotFound): message = _("No mounted Gluster shares found") class GlusterfsNoSuitableShareFound(NotFound): message = _("There is no share which can host %(share_size)sG") class ImageCopyFailure(Invalid): message = _("Failed to copy image to share") class InvalidShare(ManilaException): message = _("Invalid share: %(reason)s") class PortLimitExceeded(QuotaError): message = _("Maximum number of ports exceeded") class ShareAccessNotFound(NotFound): message = _("Access_id %(access_id)s not found") class ShareAccessExists(Duplicate): message = _("Share access %(access_type)s:%(access)s exists") class InvalidShareAccess(ManilaException): message = _("Invalid access_rule: %(reason)s") class ShareIsBusy(ManilaException): message = _("Deleting $(share_name) share that used") class ShareBackendException(ManilaException): message = _("Share backend error: %(msg)s") class ShareSnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class ShareSnapshotIsBusy(ManilaException): message = _("Deleting snapshot %(snapshot_name)s that has " "dependent shares.") class InvalidShareSnapshot(ManilaException): message = _("Invalid share snapshot: %(reason)s") class SwiftConnectionFailed(ManilaException): message = _("Connection to swift failed") + ": %(reason)s" class ShareMetadataNotFound(NotFound): message = _("Metadata item is not found") class InvalidShareMetadata(Invalid): message = _("Invalid metadata") class InvalidShareMetadataSize(Invalid): message = _("Invalid metadata size") class SecurityServiceNotFound(NotFound): message = _("Security service %(security_service_id)s could not be found.") class ShareNetworkSecurityServiceAssociationError(ManilaException): message = _("Failed to associate share network %(share_network_id)s" " and security service %(security_service_id)s: %(reason)s.") class ShareNetworkSecurityServiceDissociationError(ManilaException): message = _("Failed to dissociate share network %(share_network_id)s" " and security service %(security_service_id)s: %(reason)s.") class InvalidShareNetwork(ManilaException): message = _("Invalid share network: %(reason)s") class InvalidVolume(Invalid): message = _("Invalid volume.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class VolumeSnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") class BridgeDoesNotExist(ManilaException): message = _("Bridge %(bridge)s does not exist.") manila-2013.2.dev175.gbf1a399/manila/utils.py0000664000175000017500000011661612301410454020476 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import datetime import errno import functools import hashlib import inspect import os import paramiko import pyclbr import random import re import shlex import shutil import signal import socket import struct import sys import tempfile import time from xml.dom import minidom from xml.parsers import expat from xml import sax from xml.sax import expatreader from xml.sax import saxutils from oslo.config import cfg from eventlet import event from eventlet.green import subprocess from eventlet import greenthread from eventlet import pools from manila import exception from manila.openstack.common import excutils from manila.openstack.common import importutils from manila.openstack.common import lockutils from manila.openstack.common import log as logging from manila.openstack.common import timeutils CONF = cfg.CONF LOG = logging.getLogger(__name__) ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" synchronized = lockutils.synchronized_with_prefix('manila-') def find_config(config_path): """Find a configuration file using the given hint. :param config_path: Full or relative path to the config. :returns: Full path of the config, if it exists. :raises: `manila.exception.ConfigNotFound` """ possible_locations = [ config_path, os.path.join(CONF.state_path, "etc", "manila", config_path), os.path.join(CONF.state_path, "etc", config_path), os.path.join(CONF.state_path, config_path), "/etc/manila/%s" % config_path, ] for path in possible_locations: if os.path.exists(path): return os.path.abspath(path) raise exception.ConfigNotFound(path=os.path.abspath(config_path)) def fetchfile(url, target): LOG.debug(_('Fetching %s') % url) execute('curl', '--fail', url, '-o', target) def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def execute(*cmd, **kwargs): """Helper method to execute command with optional retry. If you add a run_as_root=True command, don't forget to add the corresponding filter to etc/manila/rootwrap.d ! :param cmd: Passed to subprocess.Popen. :param process_input: Send to opened process. :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise exception.ProcessExecutionError unless program exits with one of these code. :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :param attempts: How many times to retry cmd. :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper FLAG. :raises exception.Error: on receiving unknown arguments :raises exception.ProcessExecutionError: :returns: a tuple, (stdout, stderr) from the spawned process, or None if the command fails. """ process_input = kwargs.pop('process_input', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) shell = kwargs.pop('shell', False) if len(kwargs): raise exception.Error(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) if run_as_root: if CONF.rootwrap_config is None or CONF.root_helper != 'sudo': LOG.deprecated(_('The root_helper option (which lets you specify ' 'a root wrapper different from manila-rootwrap, ' 'and defaults to using sudo) is now deprecated. ' 'You should use the rootwrap_config option ' 'instead.')) if (CONF.rootwrap_config is not None): cmd = ['sudo', 'manila-rootwrap', CONF.rootwrap_config] + list(cmd) else: cmd = shlex.split(CONF.root_helper) + list(cmd) cmd = map(str, cmd) while attempts > 0: attempts -= 1 try: LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) _PIPE = subprocess.PIPE # pylint: disable=E1101 obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=True, preexec_fn=_subprocess_setup, shell=shell) result = None if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 if _returncode: LOG.debug(_('Result was %s') % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result raise exception.ProcessExecutionError( exit_code=_returncode, stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) return result except exception.ProcessExecutionError: if not attempts: raise else: LOG.debug(_('%r failed. Retrying.'), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) def trycmd(*args, **kwargs): """ A wrapper around execute() to more easily handle warnings and errors. Returns an (out, err) tuple of strings containing the output of the command's stdout and stderr. If 'err' is not empty then the command can be considered to have failed. :discard_warnings True | False. Defaults to False. If set to True, then for succeeding commands, stderr is cleared """ discard_warnings = kwargs.pop('discard_warnings', False) try: out, err = execute(*args, **kwargs) failed = False except exception.ProcessExecutionError, exn: out, err = '', str(exn) LOG.debug(err) failed = True if not failed and discard_warnings and err: # Handle commands that output to stderr but otherwise succeed LOG.debug(err) err = '' return out, err def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): LOG.debug(_('Running cmd (SSH): %s'), cmd) if addl_env: raise exception.Error(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise exception.Error(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel #stdin.write('process_input would go here') #stdin.flush() # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() stdout_stream.close() stderr_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug(_('Result was %s') % exit_status) if check_exit_code and exit_status != 0: raise exception.ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=cmd) channel.close() return (stdout, stderr) def create_channel(client, width, height): """Invoke an interactive shell session on server.""" channel = client.invoke_shell() channel.resize_pty(width, height) return channel class SSHPool(pools.Pool): """A simple eventlet pool to hold ssh connections.""" def __init__(self, ip, port, conn_timeout, login, password=None, privatekey=None, *args, **kwargs): self.ip = ip self.port = port self.login = login self.password = password self.conn_timeout = conn_timeout if conn_timeout else None self.privatekey = privatekey super(SSHPool, self).__init__(*args, **kwargs) def create(self): try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if self.password: ssh.connect(self.ip, port=self.port, username=self.login, password=self.password, timeout=self.conn_timeout) elif self.privatekey: pkfile = os.path.expanduser(self.privatekey) privatekey = paramiko.RSAKey.from_private_key_file(pkfile) ssh.connect(self.ip, port=self.port, username=self.login, pkey=privatekey, timeout=self.conn_timeout) else: msg = _("Specify a password or private_key") raise exception.ManilaException(msg) # Paramiko by default sets the socket timeout to 0.1 seconds, # ignoring what we set thru the sshclient. This doesn't help for # keeping long lived connections. Hence we have to bypass it, by # overriding it after the transport is initialized. We are setting # the sockettimeout to None and setting a keepalive packet so that, # the server will keep the connection open. All that does is send # a keepalive packet every ssh_conn_timeout seconds. if self.conn_timeout: transport = ssh.get_transport() transport.sock.settimeout(None) transport.set_keepalive(self.conn_timeout) return ssh except Exception as e: msg = _("Error connecting via ssh: %s") % e LOG.error(msg) raise paramiko.SSHException(msg) def get(self): """ Return an item from the pool, when one is available. This may cause the calling greenthread to block. Check if a connection is active before returning it. For dead connections create and return a new connection. """ if self.free_items: conn = self.free_items.popleft() if conn: if conn.get_transport().is_active(): return conn else: conn.close() return self.create() if self.current_size < self.max_size: created = self.create() self.current_size += 1 return created return self.channel.get() def remove(self, ssh): """Close an ssh client and remove it from free_items.""" ssh.close() ssh = None if ssh in self.free_items: self.free_items.pop(ssh) if self.current_size > 0: self.current_size -= 1 def maniladir(): import manila return os.path.abspath(manila.__file__).split('manila/__init__.py')[0] def debug(arg): LOG.debug(_('debug in callback: %s'), arg) return arg def generate_uid(topic, size=8): characters = '01234567890abcdefghijklmnopqrstuvwxyz' choices = [random.choice(characters) for x in xrange(size)] return '%s-%s' % (topic, ''.join(choices)) # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O 'abcdefghijkmnopqrstuvwxyz') # Removed: l # ~5 bits per symbol EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O def last_completed_audit_period(unit=None): """This method gives you the most recently *completed* audit period. arguments: units: string, one of 'hour', 'day', 'month', 'year' Periods normally begin at the beginning (UTC) of the period unit (So a 'day' period begins at midnight UTC, a 'month' unit on the 1st, a 'year' on Jan, 1) unit string may be appended with an optional offset like so: 'day@18' This will begin the period at 18:00 UTC. 'month@15' starts a monthly period on the 15th, and year@3 begins a yearly one on March 1st. returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the end of the previous.""" if not unit: unit = CONF.volume_usage_audit_period offset = 0 if '@' in unit: unit, offset = unit.split("@", 1) offset = int(offset) rightnow = timeutils.utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') if unit == 'month': if offset == 0: offset = 1 end = datetime.datetime(day=offset, month=rightnow.month, year=rightnow.year) if end >= rightnow: year = rightnow.year if 1 >= rightnow.month: year -= 1 month = 12 + (rightnow.month - 1) else: month = rightnow.month - 1 end = datetime.datetime(day=offset, month=month, year=year) year = end.year if 1 >= end.month: year -= 1 month = 12 + (end.month - 1) else: month = end.month - 1 begin = datetime.datetime(day=offset, month=month, year=year) elif unit == 'year': if offset == 0: offset = 1 end = datetime.datetime(day=1, month=offset, year=rightnow.year) if end >= rightnow: end = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 2) else: begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) elif unit == 'day': end = datetime.datetime(hour=offset, day=rightnow.day, month=rightnow.month, year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) elif unit == 'hour': end = rightnow.replace(minute=offset, second=0, microsecond=0) if end >= rightnow: end = end - datetime.timedelta(hours=1) begin = end - datetime.timedelta(hours=1) return (begin, end) def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): """Generate a random password from the supplied symbol groups. At least one symbol from each group will be included. Unpredictable results if length is less than the number of symbol groups. Believed to be reasonably secure (with a reasonable password length!) """ r = random.SystemRandom() # NOTE(jerdfelt): Some password policies require at least one character # from each group of symbols, so start off with one random character # from each symbol group password = [r.choice(s) for s in symbolgroups] # If length < len(symbolgroups), the leading characters will only # be from the first length groups. Try our best to not be predictable # by shuffling and then truncating. r.shuffle(password) password = password[:length] length -= len(password) # then fill with random characters from all symbol groups symbols = ''.join(symbolgroups) password.extend([r.choice(symbols) for _i in xrange(length)]) # finally shuffle to ensure first x characters aren't from a # predictable group r.shuffle(password) return ''.join(password) def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): # Use the same implementation as the password generation. return generate_password(length, symbolgroups) def last_octet(address): return int(address.split('.')[-1]) def get_my_linklocal(interface): try: if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] if address[0] is not None: return address[0] else: raise exception.Error(_('Link Local address is not found.:%s') % if_str) except Exception as ex: raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" " :%(ex)s") % locals()) def parse_mailmap(mailmap='.mailmap'): mapping = {} if os.path.exists(mailmap): fp = open(mailmap, 'r') for l in fp: l = l.strip() if not l.startswith('#') and ' ' in l: canonical_email, alias = l.split(' ') mapping[alias.lower()] = canonical_email.lower() return mapping def str_dict_replace(s, mapping): for s1, s2 in mapping.iteritems(): s = s.replace(s1, s2) return s class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" def __init__(self, pivot, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None def __get_backend(self): if not self.__backend: backend_name = CONF[self.__pivot] if backend_name not in self.__backends: raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if isinstance(backend, tuple): name = backend[0] fromlist = backend[1] else: name = backend fromlist = backend self.__backend = __import__(name, None, None, fromlist) LOG.debug(_('backend %s'), self.__backend) return self.__backend def __getattr__(self, key): backend = self.__get_backend() return getattr(backend, key) class LoopingCallDone(Exception): """Exception to break out and stop a LoopingCall. The poll-function passed to LoopingCall can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCall.wait() """ def __init__(self, retvalue=True): """:param retvalue: Value that LoopingCall.wait() should return.""" self.retvalue = retvalue class LoopingCall(object): def __init__(self, f=None, *args, **kw): self.args = args self.kw = kw self.f = f self._running = False def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: self.f(*self.args, **self.kw) if not self._running: break greenthread.sleep(interval) except LoopingCallDone, e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn(_inner) return self.done def stop(self): self._running = False def wait(self): return self.done.wait() class ProtectedExpatParser(expatreader.ExpatParser): """An expat parser which disables DTD's and entities by default.""" def __init__(self, forbid_dtd=True, forbid_entities=True, *args, **kwargs): # Python 2.x old style class expatreader.ExpatParser.__init__(self, *args, **kwargs) self.forbid_dtd = forbid_dtd self.forbid_entities = forbid_entities def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): raise ValueError("Inline DTD forbidden") def entity_decl(self, entityName, is_parameter_entity, value, base, systemId, publicId, notationName): raise ValueError(" forbidden") def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): # expat 1.2 raise ValueError(" forbidden") def reset(self): expatreader.ExpatParser.reset(self) if self.forbid_dtd: self._parser.StartDoctypeDeclHandler = self.start_doctype_decl if self.forbid_entities: self._parser.EntityDeclHandler = self.entity_decl self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl def safe_minidom_parse_string(xml_string): """Parse an XML string using minidom safely. """ try: return minidom.parseString(xml_string, parser=ProtectedExpatParser()) except sax.SAXParseException as se: raise expat.ExpatError() def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML. """ return saxutils.escape(value, {'"': '"', "'": '''}) def utf8(value): """Try to turn a string into utf-8 if possible. Code is directly from the utf8 function in http://github.com/facebook/tornado/blob/master/tornado/escape.py """ if isinstance(value, unicode): return value.encode('utf-8') assert isinstance(value, str) return value def delete_if_exists(pathname): """delete a file, but ignore file not found error""" try: os.unlink(pathname) except OSError as e: if e.errno == errno.ENOENT: return else: raise def get_from_path(items, path): """Returns a list of items matching the specified path. Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, this function will not throw because of None (anywhere) in items. The returned list will contain no None values. """ if path is None: raise exception.Error('Invalid mini_xpath') (first_token, sep, remainder) = path.partition('/') if first_token == '': raise exception.Error('Invalid mini_xpath') results = [] if items is None: return results if not isinstance(items, list): # Wrap single objects in a list items = [items] for item in items: if item is None: continue get_method = getattr(item, 'get', None) if get_method is None: continue child = get_method(first_token) if child is None: continue if isinstance(child, list): # Flatten intermediate lists for x in child: results.append(x) else: results.append(child) if not sep: # No more tokens return results else: return get_from_path(results, remainder) def flatten_dict(dict_, flattened=None): """Recursively flatten a nested dictionary.""" flattened = flattened or {} for key, value in dict_.iteritems(): if hasattr(value, 'iteritems'): flatten_dict(value, flattened) else: flattened[key] = value return flattened def partition_dict(dict_, keys): """Return two dicts, one with `keys` the other with everything else.""" intersection = {} difference = {} for key, value in dict_.iteritems(): if key in keys: intersection[key] = value else: difference[key] = value return intersection, difference def map_dict_keys(dict_, key_map): """Return a dict in which the dictionaries keys are mapped to new keys.""" mapped = {} for key, value in dict_.iteritems(): mapped_key = key_map[key] if key in key_map else key mapped[mapped_key] = value return mapped def subset_dict(dict_, keys): """Return a dict that only contains a subset of keys.""" subset = partition_dict(dict_, keys)[0] return subset def check_isinstance(obj, cls): """Checks that obj is of type cls, and lets PyLint infer types.""" if isinstance(obj, cls): return obj raise Exception(_('Expected object of type: %s') % (str(cls))) # TODO(justinsb): Can we make this better?? return cls() # Ugly PyLint hack def is_valid_boolstr(val): """Check if the provided string is a valid bool string or not. """ val = str(val).lower() return (val == 'true' or val == 'false' or val == 'yes' or val == 'no' or val == 'y' or val == 'n' or val == '1' or val == '0') def is_valid_ipv4(address): """valid the address strictly as per format xxx.xxx.xxx.xxx. where xxx is a value between 0 and 255. """ parts = address.split(".") if len(parts) != 4: return False for item in parts: try: if not 0 <= int(item) <= 255: return False except ValueError: return False return True def monkey_patch(): """ If the Flags.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'manila.api.ec2.cloud:' \ manila.openstack.common.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See manila.openstack.common.notifier.api.notify_decorator) name - name of the function function - object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def convert_to_list_dict(lst, label): """Convert a value or list into a list of dicts""" if not lst: return None if not isinstance(lst, list): lst = [lst] return [{label: x} for x in lst] def timefunc(func): """Decorator that logs how long a particular function took to execute""" @functools.wraps(func) def inner(*args, **kwargs): start_time = time.time() try: return func(*args, **kwargs) finally: total_time = time.time() - start_time LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") % dict(name=func.__name__, total_time=total_time)) return inner def generate_glance_url(): """Generate the URL to glance.""" # TODO(jk0): This will eventually need to take SSL into consideration # when supported in glance. return "http://%s:%d" % (CONF.glance_host, CONF.glance_port) @contextlib.contextmanager def logging_error(message): """Catches exception, write message to the log, re-raise. This is a common refinement of save_and_reraise that writes a specific message to the log. """ try: yield except Exception as error: with excutils.save_and_reraise_exception(): LOG.exception(message) @contextlib.contextmanager def remove_path_on_error(path): """Protect code that wants to operate on PATH atomically. Any exception will cause PATH to be removed. """ try: yield except Exception: with excutils.save_and_reraise_exception(): delete_if_exists(path) def make_dev_path(dev, partition=None, base='/dev'): """Return a path to a particular device. >>> make_dev_path('xvdc') /dev/xvdc >>> make_dev_path('xvdc', 1) /dev/xvdc1 """ path = os.path.join(base, dev) if partition: path += str(partition) return path def total_seconds(td): """Local total_seconds implementation for compatibility with python 2.6""" if hasattr(td, 'total_seconds'): return td.total_seconds() else: return ((td.days * 86400 + td.seconds) * 10 ** 6 + td.microseconds) / 10.0 ** 6 def sanitize_hostname(hostname): """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" if isinstance(hostname, unicode): hostname = hostname.encode('latin-1', 'ignore') hostname = re.sub('[ _]', '-', hostname) hostname = re.sub('[^\w.-]+', '', hostname) hostname = hostname.lower() hostname = hostname.strip('.-') return hostname def read_cached_file(filename, cache_info, reload_func=None): """Read from a file if it has been modified. :param cache_info: dictionary to hold opaque cache. :param reload_func: optional function to be called with data when file is reloaded due to a modification. :returns: data from file """ mtime = os.path.getmtime(filename) if not cache_info or mtime != cache_info.get('mtime'): with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime if reload_func: reload_func(cache_info['data']) return cache_info['data'] def file_open(*args, **kwargs): """Open file see built-in file() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return file(*args, **kwargs) def hash_file(file_like_object): """Generate a hash for the contents of a file.""" checksum = hashlib.sha1() any(map(checksum.update, iter(lambda: file_like_object.read(32768), ''))) return checksum.hexdigest() @contextlib.contextmanager def temporary_mutation(obj, **kwargs): """Temporarily set the attr on a particular object to a given value then revert when finished. One use of this is to temporarily set the read_deleted flag on a context object: with temporary_mutation(context, read_deleted="yes"): do_something_that_needed_deleted_objects() """ NOT_PRESENT = object() old_values = {} for attr, new_value in kwargs.items(): old_values[attr] = getattr(obj, attr, NOT_PRESENT) setattr(obj, attr, new_value) try: yield finally: for attr, old_value in old_values.items(): if old_value is NOT_PRESENT: del obj[attr] else: setattr(obj, attr, old_value) def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. elapsed = total_seconds(timeutils.utcnow() - last_heartbeat) return abs(elapsed) <= CONF.service_down_time def generate_mac_address(): """Generate an Ethernet MAC address.""" # NOTE(vish): We would prefer to use 0xfe here to ensure that linux # bridge mac addresses don't change, but it appears to # conflict with libvirt, so we use the next highest octet # that has the unicast and locally administered bits set # properly: 0xfa. # Discussion: https://bugs.launchpad.net/manila/+bug/921838 mac = [0xfa, 0x16, 0x3e, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) def read_file_as_root(file_path): """Secure helper to read file as root.""" try: out, _err = execute('cat', file_path, run_as_root=True) return out except exception.ProcessExecutionError: raise exception.FileNotFound(file_path=file_path) @contextlib.contextmanager def temporary_chown(path, owner_uid=None): """Temporarily chown a path. :params owner_uid: UID of temporary owner (defaults to current user) """ if owner_uid is None: owner_uid = os.getuid() orig_uid = os.stat(path).st_uid if orig_uid != owner_uid: execute('chown', owner_uid, path, run_as_root=True) try: yield finally: if orig_uid != owner_uid: execute('chown', orig_uid, path, run_as_root=True) @contextlib.contextmanager def tempdir(**kwargs): tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError, e: LOG.debug(_('Could not remove tmpdir: %s'), str(e)) def strcmp_const_time(s1, s2): """Constant-time string comparison. :params s1: the first string :params s2: the second string :return: True if the strings are equal. This function takes two strings and compares them. It is intended to be used when doing a comparison for authentication purposes to help guard against timing attacks. """ if len(s1) != len(s2): return False result = 0 for (a, b) in zip(s1, s2): result |= ord(a) ^ ord(b) return result == 0 def walk_class_hierarchy(clazz, encountered=None): """Walk class hierarchy, yielding most derived classes first""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass class UndoManager(object): """Provides a mechanism to facilitate rolling back a series of actions when an exception is raised. """ def __init__(self): self.undo_stack = [] def undo_with(self, undo_func): self.undo_stack.append(undo_func) def _rollback(self): for undo_func in reversed(self.undo_stack): undo_func() def rollback_and_reraise(self, msg=None, **kwargs): """Rollback a series of actions then re-raise the exception. .. note:: (sirp) This should only be called within an exception handler. """ with excutils.save_and_reraise_exception(): if msg: LOG.exception(msg, **kwargs) self._rollback() def ensure_tree(path): """Create a directory (and any ancestor directories required) :param path: Directory to create """ try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST: if not os.path.isdir(path): raise else: raise def to_bytes(text, default=0): """Try to turn a string into a number of bytes. Looks at the last characters of the text to determine what conversion is needed to turn the input text into a byte number. Supports: B/b, K/k, M/m, G/g, T/t (or the same with b/B on the end) """ BYTE_MULTIPLIERS = { '': 1, 't': 1024 ** 4, 'g': 1024 ** 3, 'm': 1024 ** 2, 'k': 1024, } # Take off everything not number 'like' (which should leave # only the byte 'identifier' left) mult_key_org = text.lstrip('-1234567890') mult_key = mult_key_org.lower() mult_key_len = len(mult_key) if mult_key.endswith("b"): mult_key = mult_key[0:-1] try: multiplier = BYTE_MULTIPLIERS[mult_key] if mult_key_len: # Empty cases shouldn't cause text[0:-0] text = text[0:-mult_key_len] return int(text) * multiplier except KeyError: msg = _('Unknown byte multiplier: %s') % mult_key_org raise TypeError(msg) except ValueError: return default def cidr_to_netmask(cidr): """ Convert cidr notation to the netmask string :param cidr: integer which represents cidr notation :rtype: string """ cidr = int(cidr) bits = 0 for i in xrange(32 - cidr, 32): bits |= (1 << i) return socket.inet_ntoa(struct.pack('>I', bits)) manila-2013.2.dev175.gbf1a399/manila/quota.py0000664000175000017500000013173412301410454020465 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for shares.""" import datetime from oslo.config import cfg from manila import db from manila import exception from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.openstack.common import timeutils LOG = logging.getLogger(__name__) quota_opts = [ cfg.IntOpt('quota_shares', default=10, help='number of shares allowed per project'), cfg.IntOpt('quota_snapshots', default=10, help='number of share snapshots allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='number of share gigabytes (snapshots are also included) ' 'allowed per project'), cfg.IntOpt('reservation_expire', default=86400, help='number of seconds until a reservation expires'), cfg.IntOpt('until_refresh', default=0, help='count of reservations until usage is refreshed'), cfg.IntOpt('max_age', default=0, help='number of seconds between subsequent usage refreshes'), cfg.StrOpt('quota_driver', default='manila.quota.DbQuotaDriver', help='default driver to use for quota checks'), ] CONF = cfg.CONF CONF.register_opts(quota_opts) class DbQuotaDriver(object): """ Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ def get_by_project_and_user(self, context, project_id, user_id, resource): """Get a specific quota by project and user.""" return db.quota_get(context, project_id, user_id, resource) def get_by_project(self, context, project_id, resource): """Get a specific quota by project.""" return db.quota_get(context, project_id, resource) def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" return db.quota_class_get(context, quota_class, resource) def get_defaults(self, context, resources): """Given a list of resources, retrieve the default quotas. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} default_quotas = db.quota_class_get_default(context) for resource in resources.values(): quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_class_quotas(self, context, resources, quota_class, defaults=True): """ Given a list of resources, retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ quotas = {} class_quotas = db.quota_class_get_all_by_name(context, quota_class) for resource in resources.values(): if defaults or resource.name in class_quotas: quotas[resource.name] = class_quotas.get(resource.name, resource.default) return quotas def _process_quotas(self, context, resources, project_id, quotas, quota_class=None, defaults=True, usages=None, remains=False): modified_quotas = {} # Get the quotas for the appropriate class. If the project ID # matches the one in the context, we use the quota_class from # the context, otherwise, we use the provided quota_class (if # any) if project_id == context.project_id: quota_class = context.quota_class if quota_class: class_quotas = db.quota_class_get_all_by_name(context, quota_class) else: class_quotas = {} default_quotas = self.get_defaults(context, resources) for resource in resources.values(): # Omit default/quota class values if not defaults and resource.name not in quotas: continue limit = quotas.get(resource.name, class_quotas.get( resource.name, default_quotas[resource.name])) modified_quotas[resource.name] = dict(limit=limit) # Include usages if desired. This is optional because one # internal consumer of this interface wants to access the # usages directly from inside a transaction. if usages: usage = usages.get(resource.name, {}) modified_quotas[resource.name].update( in_use=usage.get('in_use', 0), reserved=usage.get('reserved', 0), ) # Initialize remains quotas. if remains: modified_quotas[resource.name].update(remains=limit) if remains: all_quotas = db.quota_get_all(context, project_id) for quota in all_quotas: if quota.resource in modified_quotas: modified_quotas[quota.resource]['remains'] -= \ quota.hard_limit return modified_quotas def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): """ Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ project_quotas = db.quota_get_all_by_project(context, project_id) project_usages = None if usages: project_usages = db.quota_usage_get_all_by_project(context, project_id) return self._process_quotas(context, resources, project_id, project_quotas, quota_class, defaults=defaults, usages=project_usages, remains=remains) def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, defaults=True, usages=True): """ Given a list of resources, retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ user_quotas = db.quota_get_all_by_project_and_user(context, project_id, user_id) # Use the project quota for default user quota. proj_quotas = db.quota_get_all_by_project(context, project_id) for key, value in proj_quotas.iteritems(): if key not in user_quotas.keys(): user_quotas[key] = value user_usages = None if usages: user_usages = db.quota_usage_get_all_by_project_and_user(context, project_id, user_id) return self._process_quotas(context, resources, project_id, user_quotas, quota_class, defaults=defaults, usages=user_usages) def get_settable_quotas(self, context, resources, project_id, user_id=None): """ Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ settable_quotas = {} project_quotas = self.get_project_quotas(context, resources, project_id, remains=True) if user_id: user_quotas = self.get_user_quotas(context, resources, project_id, user_id) setted_quotas = db.quota_get_all_by_project_and_user(context, project_id, user_id) for key, value in user_quotas.items(): maximum = project_quotas[key]['remains'] +\ setted_quotas.get(key, 0) settable_quotas[key] = dict( minimum=value['in_use'] + value['reserved'], maximum=maximum ) else: for key, value in project_quotas.items(): minimum = max(int(value['limit'] - value['remains']), int(value['in_use'] + value['reserved'])) settable_quotas[key] = dict(minimum=minimum, maximum=-1) return settable_quotas def _get_quotas(self, context, resources, keys, has_sync, project_id=None, user_id=None): """ A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param keys: A list of the desired quotas to retrieve. :param has_sync: If True, indicates that the resource must have a sync attribute; if False, indicates that the resource must NOT have a sync attribute. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ # Filter resources if has_sync: sync_filt = lambda x: hasattr(x, 'sync') else: sync_filt = lambda x: not hasattr(x, 'sync') desired = set(keys) sub_resources = dict((k, v) for k, v in resources.items() if k in desired and sync_filt(v)) # Make sure we accounted for all of them... if len(keys) != len(sub_resources): unknown = desired - set(sub_resources.keys()) raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) if user_id: # Grab and return the quotas (without usages) quotas = self.get_user_quotas(context, sub_resources, project_id, user_id, context.quota_class, usages=False) else: # Grab and return the quotas (without usages) quotas = self.get_project_quotas(context, sub_resources, project_id, context.quota_class, usages=False) return dict((k, v['limit']) for k, v in quotas.items()) def limit_check(self, context, resources, values, project_id=None, user_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user id is None, then we use the user_id in context if user_id is None: user_id = context.user_id # Get the applicable quotas quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id) user_quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id, user_id=user_id) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if (quotas[key] >= 0 and quotas[key] < val) or (user_quotas[key] >= 0 and user_quotas[key] < val)] if overs: raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages={}) def reserve(self, context, resources, deltas, expire=None, project_id=None, user_id=None): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ # Set up the reservation expiration if expire is None: expire = CONF.reservation_expire if isinstance(expire, (int, long)): expire = datetime.timedelta(seconds=expire) if isinstance(expire, datetime.timedelta): expire = timeutils.utcnow() + expire if not isinstance(expire, datetime.datetime): raise exception.InvalidReservationExpiration(expire=expire) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the project_id in context if user_id is None: user_id = context.user_id # Get the applicable quotas. # NOTE(Vek): We're not worried about races at this point. # Yes, the admin may be in the process of reducing # quotas, but that's a pretty rare thing. quotas = self._get_quotas(context, resources, deltas.keys(), has_sync=True, project_id=project_id) user_quotas = self._get_quotas(context, resources, deltas.keys(), has_sync=True, project_id=project_id, user_id=user_id) # NOTE(Vek): Most of the work here has to be done in the DB # API, because we have to do it in a transaction, # which means access to the session. Since the # session isn't available outside the DBAPI, we # have to do the work there. return db.quota_reserve(context, resources, quotas, user_quotas, deltas, expire, CONF.until_refresh, CONF.max_age, project_id=project_id, user_id=user_id) def commit(self, context, reservations, project_id=None, user_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id db.reservation_commit(context, reservations, project_id=project_id, user_id=user_id) def rollback(self, context, reservations, project_id=None, user_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id db.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id) def usage_reset(self, context, resources): """ Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ # We need an elevated context for the calls to # quota_usage_update() elevated = context.elevated() for resource in resources: try: # Reset the usage to -1, which will force it to be # refreshed db.quota_usage_update(elevated, context.project_id, context.user_id, resource, in_use=-1) except exception.QuotaUsageNotFound: # That means it'll be refreshed anyway pass def destroy_all_by_project(self, context, project_id): """ Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ db.quota_destroy_all_by_project(context, project_id) def destroy_all_by_project_and_user(self, context, project_id, user_id): """ Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ db.quota_destroy_all_by_project_and_user(context, project_id, user_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ db.reservation_expire(context) class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag=None): """ Initializes a Resource. :param name: The name of the resource, i.e., "shares". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ self.name = name self.flag = flag def quota(self, driver, context, **kwargs): """ Given a driver and context, obtain the quota for this resource. :param driver: A quota driver. :param context: The request context. :param project_id: The project to obtain the quota value for. If not provided, it is taken from the context. If it is given as None, no project-specific quota will be searched for. :param quota_class: The quota class corresponding to the project, or for which the quota is to be looked up. If not provided, it is taken from the context. If it is given as None, no quota class-specific quota will be searched for. Note that the quota class defaults to the value in the context, which may not correspond to the project if project_id is not the same as the one in the context. """ # Get the project ID project_id = kwargs.get('project_id', context.project_id) # Ditto for the quota class quota_class = kwargs.get('quota_class', context.quota_class) # Look up the quota for the project if project_id: try: return driver.get_by_project(context, project_id, self.name) except exception.ProjectQuotaNotFound: pass # Try for the quota class if quota_class: try: return driver.get_by_class(context, quota_class, self.name) except exception.QuotaClassNotFound: pass # OK, return the default return self.default @property def default(self): """Return the default value of the quota.""" return CONF[self.flag] if self.flag else -1 class ReservableResource(BaseResource): """Describe a reservable resource.""" def __init__(self, name, sync, flag=None): """ Initializes a ReservableResource. Reservable resources are those resources which directly correspond to objects in the database, i.e., shares, gigabytes, etc. A ReservableResource must be constructed with a usage synchronization function, which will be called to determine the current counts of one or more resources. The usage synchronization function will be passed three arguments: an admin context, the project ID, and an opaque session object, which should in turn be passed to the underlying database function. Synchronization functions should return a dictionary mapping resource names to the current in_use count for those resources; more than one resource and resource count may be returned. Note that synchronization functions may be associated with more than one ReservableResource. :param name: The name of the resource, i.e., "shares". :param sync: A callable which returns a dictionary to resynchronize the in_use count for one or more resources, as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(ReservableResource, self).__init__(name, flag=flag) self.sync = sync class AbsoluteResource(BaseResource): """Describe a non-reservable resource.""" pass class CountableResource(AbsoluteResource): """ Describe a resource where the counts aren't based solely on the project ID. """ def __init__(self, name, count, flag=None): """ Initializes a CountableResource. Countable resources are those resources which directly correspond to objects in the database, i.e., shares, gigabytes, etc., but for which a count by project ID is inappropriate. A CountableResource must be constructed with a counting function, which will be called to determine the current counts of the resource. The counting function will be passed the context, along with the extra positional and keyword arguments that are passed to Quota.count(). It should return an integer specifying the count. Note that this counting is not performed in a transaction-safe manner. This resource class is a temporary measure to provide required functionality, until a better approach to solving this problem can be evolved. :param name: The name of the resource, i.e., "shares". :param count: A callable which returns the count of the resource. The arguments passed are as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(CountableResource, self).__init__(name, flag=flag) self.count = count class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._resources = {} self._driver_cls = quota_driver_class self.__driver = None @property def _driver(self): if self.__driver: return self.__driver if not self._driver_cls: self._driver_cls = CONF.quota_driver if isinstance(self._driver_cls, basestring): self._driver_cls = importutils.import_object(self._driver_cls) self.__driver = self._driver_cls return self.__driver def __contains__(self, resource): return resource in self._resources def register_resource(self, resource): """Register a resource.""" self._resources[resource.name] = resource def register_resources(self, resources): """Register a list of resources.""" for resource in resources: self.register_resource(resource) def get_by_project_and_user(self, context, project_id, user_id, resource): """Get a specific quota by project and user.""" return self._driver.get_by_project_and_user(context, project_id, user_id, resource) def get_by_project(self, context, project_id, resource): """Get a specific quota by project.""" return self._driver.get_by_project(context, project_id, resource) def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" return self._driver.get_by_class(context, quota_class, resource) def get_defaults(self, context): """Retrieve the default quotas. :param context: The request context, for access checks. """ return self._driver.get_defaults(context, self._resources) def get_class_quotas(self, context, quota_class, defaults=True): """Retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ return self._driver.get_class_quotas(context, self._resources, quota_class, defaults=defaults) def get_user_quotas(self, context, project_id, user_id, quota_class=None, defaults=True, usages=True): """Retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ return self._driver.get_user_quotas(context, self._resources, project_id, user_id, quota_class=quota_class, defaults=defaults, usages=usages) def get_project_quotas(self, context, project_id, quota_class=None, defaults=True, usages=True, remains=False): """Retrieve the quotas for the given project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ return self._driver.get_project_quotas(context, self._resources, project_id, quota_class=quota_class, defaults=defaults, usages=usages, remains=remains) def get_settable_quotas(self, context, project_id, user_id=None): """ Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ return self._driver.get_settable_quotas(context, self._resources, project_id, user_id=user_id) def count(self, context, resource, *args, **kwargs): """Count a resource. For countable resources, invokes the count() function and returns its result. Arguments following the context and resource are passed directly to the count function declared by the resource. :param context: The request context, for access checks. :param resource: The name of the resource, as a string. """ # Get the resource res = self._resources.get(resource) if not res or not hasattr(res, 'count'): raise exception.QuotaResourceUnknown(unknown=[resource]) return res.count(context, *args, **kwargs) def limit_check(self, context, project_id=None, user_id=None, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ return self._driver.limit_check(context, self._resources, values, project_id=project_id, user_id=user_id) def reserve(self, context, expire=None, project_id=None, user_id=None, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. The deltas are given as keyword arguments, and current usage and other reservations are factored into the quota check. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ reservations = self._driver.reserve(context, self._resources, deltas, expire=expire, project_id=project_id, user_id=user_id) LOG.debug(_("Created reservations %s"), reservations) return reservations def commit(self, context, reservations, project_id=None, user_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.commit(context, reservations, project_id=project_id, user_id=user_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_("Failed to commit reservations %s"), reservations) return LOG.debug(_("Committed reservations %s"), reservations) def rollback(self, context, reservations, project_id=None, user_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.rollback(context, reservations, project_id=project_id, user_id=user_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_("Failed to roll back reservations %s"), reservations) return LOG.debug(_("Rolled back reservations %s"), reservations) def usage_reset(self, context, resources): """ Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ self._driver.usage_reset(context, resources) def destroy_all_by_project_and_user(self, context, project_id, user_id): """ Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ self._driver.destroy_all_by_project_and_user(context, project_id, user_id) def destroy_all_by_project(self, context, project_id): """ Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ self._driver.destroy_all_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ self._driver.expire(context) @property def resources(self): return sorted(self._resources.keys()) QUOTAS = QuotaEngine() resources = [ ReservableResource('shares', '_sync_shares', 'quota_shares'), ReservableResource('snapshots', '_sync_snapshots', 'quota_snapshots'), ReservableResource('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), ] QUOTAS.register_resources(resources) manila-2013.2.dev175.gbf1a399/manila/scheduler/0000775000175000017500000000000012301410516020726 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/scheduler/weights/0000775000175000017500000000000012301410516022400 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/scheduler/weights/capacity.py0000664000175000017500000000402612301410454024552 0ustar chuckchuck00000000000000# Copyright (c) 2012 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Capacity Weigher. Weigh hosts by their available capacity. The default is to spread volumes across all hosts evenly. If you prefer stacking, you can set the 'capacity_weight_multiplier' option to a negative number and the weighing has the opposite effect of the default. """ import math from oslo.config import cfg from manila.openstack.common.scheduler import weights capacity_weight_opts = [ cfg.FloatOpt('capacity_weight_multiplier', default=1.0, help='Multiplier used for weighing volume capacity. ' 'Negative numbers mean to stack vs spread.'), ] CONF = cfg.CONF CONF.register_opts(capacity_weight_opts) class CapacityWeigher(weights.BaseHostWeigher): def _weight_multiplier(self): """Override the weight multiplier.""" return CONF.capacity_weight_multiplier def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want spreading to be the default.""" reserved = float(host_state.reserved_percentage) / 100 free_space = host_state.free_capacity_gb if free_space == 'infinite' or free_space == 'unknown': #(zhiteng) 'infinite' and 'unknown' are treated the same # here, for sorting purpose. free = float('inf') else: free = math.floor(host_state.free_capacity_gb * (1 - reserved)) return free manila-2013.2.dev175.gbf1a399/manila/scheduler/weights/__init__.py0000664000175000017500000000117212301410454024513 0ustar chuckchuck00000000000000# Copyright (c) 2013 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/scheduler/rpcapi.py0000664000175000017500000000424112301410454022560 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ from manila.openstack.common import jsonutils import manila.openstack.common.rpc.proxy from oslo.config import cfg CONF = cfg.CONF class SchedulerAPI(manila.openstack.common.rpc.proxy.RpcProxy): '''Client side of the scheduler rpc API. API version history: 1.0 - Initial version. 1.1 - Add create_volume() method 1.2 - Add request_spec, filter_properties arguments to create_volume() 1.3 - Add create_share() method ''' RPC_API_VERSION = '1.0' def __init__(self): super(SchedulerAPI, self).__init__( topic=CONF.scheduler_topic, default_version=self.RPC_API_VERSION) def create_share(self, ctxt, topic, share_id, snapshot_id=None, request_spec=None, filter_properties=None): request_spec_p = jsonutils.to_primitive(request_spec) return self.cast(ctxt, self.make_msg( 'create_share', topic=topic, share_id=share_id, snapshot_id=snapshot_id, request_spec=request_spec_p, filter_properties=filter_properties), version='1.3') def update_service_capabilities(self, ctxt, service_name, host, capabilities): self.fanout_cast(ctxt, self.make_msg('update_service_capabilities', service_name=service_name, host=host, capabilities=capabilities)) manila-2013.2.dev175.gbf1a399/manila/scheduler/filters/0000775000175000017500000000000012301410516022376 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/scheduler/filters/capacity_filter.py0000664000175000017500000000421312301410454026113 0ustar chuckchuck00000000000000# Copyright (c) 2012 Intel # Copyright (c) 2012 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from manila.openstack.common import log as logging from manila.openstack.common.scheduler import filters LOG = logging.getLogger(__name__) class CapacityFilter(filters.BaseHostFilter): """CapacityFilter filters based on volume host's capacity utilization.""" def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" volume_size = filter_properties.get('size') if host_state.free_capacity_gb is None: # Fail Safe LOG.error(_("Free capacity not set: " "volume node info collection broken.")) return False free_space = host_state.free_capacity_gb if free_space == 'infinite' or free_space == 'unknown': # NOTE(zhiteng) for those back-ends cannot report actual # available capacity, we assume it is able to serve the # request. Even if it was not, the retry mechanism is # able to handle the failure by rescheduling return True reserved = float(host_state.reserved_percentage) / 100 free = math.floor(free_space * (1 - reserved)) if free < volume_size: LOG.warning(_("Insufficient free space for volume creation " "(requested / avail): " "%(requested)s/%(available)s") % {'requested': volume_size, 'available': free}) return free >= volume_size manila-2013.2.dev175.gbf1a399/manila/scheduler/filters/__init__.py0000664000175000017500000000117212301410454024511 0ustar chuckchuck00000000000000# Copyright (c) 2013 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/scheduler/filters/retry_filter.py0000664000175000017500000000310112301410454025456 0ustar chuckchuck00000000000000# Copyright (c) 2012 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.openstack.common import log as logging from manila.openstack.common.scheduler import filters LOG = logging.getLogger(__name__) class RetryFilter(filters.BaseHostFilter): """Filter out nodes that have already been attempted for scheduling purposes """ def host_passes(self, host_state, filter_properties): """Skip nodes that have already been attempted.""" retry = filter_properties.get('retry', None) if not retry: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled") return True hosts = retry.get('hosts', []) host = host_state.host passes = host not in hosts pass_msg = "passes" if passes else "fails" LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: " "%(hosts)s") % locals()) # Host passes if it's not in the list of previously attempted hosts: return passes manila-2013.2.dev175.gbf1a399/manila/scheduler/host_manager.py0000664000175000017500000002471712301410454023763 0ustar chuckchuck00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manage hosts in the current zone. """ import UserDict from oslo.config import cfg from manila import db from manila import exception from manila.openstack.common import log as logging from manila.openstack.common.scheduler import filters from manila.openstack.common.scheduler import weights from manila.openstack.common import timeutils from manila import utils host_manager_opts = [ cfg.ListOpt('scheduler_default_filters', default=[ 'AvailabilityZoneFilter', 'CapacityFilter', 'CapabilitiesFilter' ], help='Which filter class names to use for filtering hosts ' 'when not specified in the request.'), cfg.ListOpt('scheduler_default_weighers', default=[ 'CapacityWeigher' ], help='Which weigher class names to use for weighing hosts.') ] CONF = cfg.CONF CONF.register_opts(host_manager_opts) LOG = logging.getLogger(__name__) class ReadOnlyDict(UserDict.IterableUserDict): """A read-only dict.""" def __init__(self, source=None): self.data = {} self.update(source) def __setitem__(self, key, item): raise TypeError def __delitem__(self, key): raise TypeError def clear(self): raise TypeError def pop(self, key, *args): raise TypeError def popitem(self): raise TypeError def update(self, source=None): if source is None: return elif isinstance(source, UserDict.UserDict): self.data = source.data elif isinstance(source, type({})): self.data = source else: raise TypeError class HostState(object): """Mutable and immutable information tracked for a host.""" def __init__(self, host, capabilities=None, service=None): self.host = host self.update_capabilities(capabilities, service) self.share_backend_name = None self.vendor_name = None self.driver_version = 0 self.storage_protocol = None self.QoS_support = False # Mutable available resources. # These will change as resources are virtually "consumed". self.total_capacity_gb = 0 self.free_capacity_gb = None self.reserved_percentage = 0 self.updated = None def update_capabilities(self, capabilities=None, service=None): # Read-only capability dicts if capabilities is None: capabilities = {} self.capabilities = ReadOnlyDict(capabilities) if service is None: service = {} self.service = ReadOnlyDict(service) def update_from_share_capability(self, capability): """Update information about a host from its volume_node info.""" if capability: if self.updated and self.updated > capability['timestamp']: return self.share_backend = capability.get('share_backend_name', None) self.vendor_name = capability.get('vendor_name', None) self.driver_version = capability.get('driver_version', None) self.storage_protocol = capability.get('storage_protocol', None) self.QoS_support = capability.get('QoS_support', False) self.total_capacity_gb = capability['total_capacity_gb'] self.free_capacity_gb = capability['free_capacity_gb'] self.reserved_percentage = capability['reserved_percentage'] self.updated = capability['timestamp'] def consume_from_share(self, share): """Incrementally update host state from an share""" share_gb = share['size'] if self.free_capacity_gb == 'infinite': # There's virtually infinite space on back-end pass elif self.free_capacity_gb == 'unknown': # Unable to determine the actual free space on back-end pass else: self.free_capacity_gb -= share_gb self.updated = timeutils.utcnow() class HostManager(object): """Base HostManager class.""" host_state_cls = HostState def __init__(self): self.service_states = {} # { : {: {cap k : v}}} self.host_state_map = {} self.filter_handler = filters.HostFilterHandler('manila.scheduler.' 'filters') self.filter_classes = self.filter_handler.get_all_classes() self.weight_handler = weights.HostWeightHandler('manila.scheduler.' 'weights') self.weight_classes = self.weight_handler.get_all_classes() def _choose_host_filters(self, filter_cls_names): """Since the caller may specify which filters to use we need to have an authoritative list of what is permissible. This function checks the filter names against a predefined set of acceptable filters. """ if filter_cls_names is None: filter_cls_names = CONF.scheduler_default_filters if not isinstance(filter_cls_names, (list, tuple)): filter_cls_names = [filter_cls_names] good_filters = [] bad_filters = [] for filter_name in filter_cls_names: found_class = False for cls in self.filter_classes: if cls.__name__ == filter_name: found_class = True good_filters.append(cls) break if not found_class: bad_filters.append(filter_name) if bad_filters: msg = ", ".join(bad_filters) raise exception.SchedulerHostFilterNotFound(filter_name=msg) return good_filters def _choose_host_weighers(self, weight_cls_names): """Since the caller may specify which weighers to use, we need to have an authoritative list of what is permissible. This function checks the weigher names against a predefined set of acceptable weighers. """ if weight_cls_names is None: weight_cls_names = CONF.scheduler_default_weighers if not isinstance(weight_cls_names, (list, tuple)): weight_cls_names = [weight_cls_names] good_weighers = [] bad_weighers = [] for weigher_name in weight_cls_names: found_class = False for cls in self.weight_classes: if cls.__name__ == weigher_name: good_weighers.append(cls) found_class = True break if not found_class: bad_weighers.append(weigher_name) if bad_weighers: msg = ", ".join(bad_weighers) raise exception.SchedulerHostWeigherNotFound(weigher_name=msg) return good_weighers def get_filtered_hosts(self, hosts, filter_properties, filter_class_names=None): """Filter hosts and return only ones passing all filters""" filter_classes = self._choose_host_filters(filter_class_names) return self.filter_handler.get_filtered_objects(filter_classes, hosts, filter_properties) def get_weighed_hosts(self, hosts, weight_properties, weigher_class_names=None): """Weigh the hosts""" weigher_classes = self._choose_host_weighers(weigher_class_names) return self.weight_handler.get_weighed_objects(weigher_classes, hosts, weight_properties) def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" if service_name not in ('share'): LOG.debug(_('Ignoring %(service_name)s service update ' 'from %(host)s'), locals()) return LOG.debug(_("Received %(service_name)s service update from " "%(host)s.") % locals()) # Copy the capabilities, so we don't modify the original dict capab_copy = dict(capabilities) capab_copy["timestamp"] = timeutils.utcnow() # Reported time self.service_states[host] = capab_copy def get_all_host_states_share(self, context): """Returns a dict of all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. For example: {'192.168.1.100': HostState(), ...} """ # Get resource usage across the available share nodes: topic = CONF.share_topic share_services = db.service_get_all_by_topic(context, topic) for service in share_services: if not utils.service_is_up(service) or service['disabled']: LOG.warn(_("service is down or disabled.")) continue host = service['host'] capabilities = self.service_states.get(host, None) host_state = self.host_state_map.get(host) if host_state: # copy capabilities to host_state.capabilities host_state.update_capabilities(capabilities, dict(service.iteritems())) else: host_state = self.host_state_cls(host, capabilities=capabilities, service= dict(service.iteritems())) self.host_state_map[host] = host_state # update host_state host_state.update_from_share_capability(capabilities) return self.host_state_map.itervalues() manila-2013.2.dev175.gbf1a399/manila/scheduler/__init__.py0000664000175000017500000000201512301410454023036 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`manila.scheduler` -- Scheduler Nodes ===================================================== .. automodule:: manila.scheduler :platform: Unix :synopsis: Module that picks a volume node to create a volume. .. moduleauthor:: Sandy Walsh .. moduleauthor:: Ed Leafe .. moduleauthor:: Chris Behrens """ manila-2013.2.dev175.gbf1a399/manila/scheduler/chance.py0000664000175000017500000000522012301410454022521 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Chance (Random) Scheduler implementation """ import random from manila import exception from manila.scheduler import driver from oslo.config import cfg CONF = cfg.CONF class ChanceScheduler(driver.Scheduler): """Implements Scheduler as a random node selector.""" def _filter_hosts(self, request_spec, hosts, **kwargs): """Filter a list of hosts based on request_spec.""" filter_properties = kwargs.get('filter_properties', {}) ignore_hosts = filter_properties.get('ignore_hosts', []) hosts = [host for host in hosts if host not in ignore_hosts] return hosts def _schedule(self, context, topic, request_spec, **kwargs): """Picks a host that is up at random.""" elevated = context.elevated() hosts = self.hosts_up(elevated, topic) if not hosts: msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) hosts = self._filter_hosts(request_spec, hosts, **kwargs) if not hosts: msg = _("Could not find another host") raise exception.NoValidHost(reason=msg) return hosts[int(random.random() * len(hosts))] def schedule_create_share(self, context, request_spec, filter_properties): """Picks a host that is up at random.""" topic = CONF.share_topic host = self._schedule(context, topic, request_spec, filter_properties=filter_properties) share_id = request_spec['share_id'] snapshot_id = request_spec['snapshot_id'] updated_share = driver.share_update_db(context, share_id, host) self.share_rpcapi.create_share(context, updated_share, host, request_spec, filter_properties, snapshot_id) manila-2013.2.dev175.gbf1a399/manila/scheduler/scheduler_options.py0000664000175000017500000000670612301410454025043 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ import datetime import json import os from oslo.config import cfg from manila.openstack.common import log as logging from manila.openstack.common import timeutils scheduler_json_config_location_opt = cfg.StrOpt( 'scheduler_json_config_location', default='', help='Absolute path to scheduler configuration JSON file.') CONF = cfg.CONF CONF.register_opt(scheduler_json_config_location_opt) LOG = logging.getLogger(__name__) class SchedulerOptions(object): """ SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ def __init__(self): super(SchedulerOptions, self).__init__() self.data = {} self.last_modified = None self.last_checked = None def _get_file_handle(self, filename): """Get file handle. Broken out for testing.""" return open(filename) def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error, e: LOG.exception(_("Could not stat scheduler options file " "%(filename)s: '%(e)s'"), locals()) raise def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return json.load(handle) except ValueError, e: LOG.exception(_("Could not decode scheduler options: " "'%(e)s'") % locals()) return {} def _get_time_now(self): """Get current UTC. Broken out for testing.""" return timeutils.utcnow() def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" if not filename: filename = CONF.scheduler_json_config_location if not filename: return self.data if self.last_checked: now = self._get_time_now() if now - self.last_checked < datetime.timedelta(minutes=5): return self.data last_modified = self._get_file_timestamp(filename) if (not last_modified or not self.last_modified or last_modified > self.last_modified): self.data = self._load_file(self._get_file_handle(filename)) self.last_modified = last_modified if not self.data: self.data = {} return self.data manila-2013.2.dev175.gbf1a399/manila/scheduler/manager.py0000664000175000017500000001103712301410454022715 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler Service """ from oslo.config import cfg from manila import context from manila import db from manila import exception from manila import manager from manila.openstack.common import excutils from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.openstack.common.notifier import api as notifier from manila.share import rpcapi as share_rpcapi LOG = logging.getLogger(__name__) scheduler_driver_opt = cfg.StrOpt('scheduler_driver', default='manila.scheduler.filter_scheduler.' 'FilterScheduler', help='Default scheduler driver to use') CONF = cfg.CONF CONF.register_opt(scheduler_driver_opt) class SchedulerManager(manager.Manager): """Chooses a host to create shares.""" RPC_API_VERSION = '1.3' def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs) def init_host(self): ctxt = context.get_admin_context() self.request_service_capabilities(ctxt) def get_host_list(self, context): """Get a list of hosts from the HostManager.""" return self.driver.get_host_list() def get_service_capabilities(self, context): """Get the normalized set of capabilities for this zone.""" return self.driver.get_service_capabilities() def update_service_capabilities(self, context, service_name=None, host=None, capabilities=None, **kwargs): """Process a capability update from a service node.""" if capabilities is None: capabilities = {} self.driver.update_service_capabilities(service_name, host, capabilities) def create_share(self, context, topic, share_id, snapshot_id=None, request_spec=None, filter_properties=None): try: self.driver.schedule_create_share(context, request_spec, filter_properties) except exception.NoValidHost as ex: self._set_share_error_state_and_notify('create_share', context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_share_error_state_and_notify('create_share', context, ex, request_spec) def _set_share_error_state_and_notify(self, method, context, ex, request_spec): LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) share_state = {'status': 'error'} properties = request_spec.get('share_properties', {}) share_id = request_spec.get('share_id', None) if share_id: db.share_update(context, share_id, share_state) payload = dict(request_spec=request_spec, share_properties=properties, share_id=share_id, state=share_state, method=method, reason=ex) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.' + method, notifier.ERROR, payload) def request_service_capabilities(self, context): share_rpcapi.ShareAPI().publish_service_capabilities(context) manila-2013.2.dev175.gbf1a399/manila/scheduler/simple.py0000664000175000017500000000753312301410454022602 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Simple Scheduler """ from oslo.config import cfg from manila import db from manila import exception from manila.scheduler import chance from manila.scheduler import driver from manila import utils simple_scheduler_opts = [ cfg.IntOpt("max_gigabytes", default=10000, help="maximum number of volume gigabytes to allow per host"), ] CONF = cfg.CONF CONF.register_opts(simple_scheduler_opts) class SimpleScheduler(chance.ChanceScheduler): """Implements Naive Scheduler that tries to find least loaded host.""" def schedule_create_share(self, context, request_spec, filter_properties): """Picks a host that is up and has the fewest shares.""" #TODO(rushiagr) - pick only hosts that run shares elevated = context.elevated() share_id = request_spec.get('share_id') snapshot_id = request_spec.get('snapshot_id') share_properties = request_spec.get('share_properties') share_size = share_properties.get('size') availability_zone = share_properties.get('availability_zone') zone, host = None, None if availability_zone: zone, _x, host = availability_zone.partition(':') if host and context.is_admin: service = db.service_get_by_args(elevated, host, CONF.share_topic) if not utils.service_is_up(service): raise exception.WillNotSchedule(host=host) updated_share = driver.share_update_db(context, share_id, host) self.share_rpcapi.create_share(context, updated_share, host, request_spec, None, snapshot_id=snapshot_id ) return None results = db.service_get_all_share_sorted(elevated) if zone: results = [(service, gigs) for (service, gigs) in results if service['availability_zone'] == zone] for result in results: (service, share_gigabytes) = result if share_gigabytes + share_size > CONF.max_gigabytes: msg = _("Not enough allocatable share gigabytes remaining") raise exception.NoValidHost(reason=msg) if utils.service_is_up(service) and not service['disabled']: updated_share = driver.share_update_db(context, share_id, service['host']) self.share_rpcapi.create_share(context, updated_share, service['host'], request_spec, None, snapshot_id=snapshot_id) return None msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) manila-2013.2.dev175.gbf1a399/manila/scheduler/filter_scheduler.py0000664000175000017500000002115712301410454024632 0ustar chuckchuck00000000000000# Copyright (c) 2011 Intel Corporation # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The FilterScheduler is for creating shares. You can customize this scheduler by specifying your own share Filters and Weighing Functions. """ import operator from manila import exception from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.scheduler import driver from manila.scheduler import scheduler_options from oslo.config import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) class FilterScheduler(driver.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.cost_function_cache = None self.options = scheduler_options.SchedulerOptions() self.max_attempts = self._max_attempts() def schedule(self, context, topic, method, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ self._schedule(context, topic, *args, **kwargs) def _get_configuration_options(self): """Fetch options dictionary. Broken out for testing.""" return self.options.get_configuration() def _post_select_populate_filter_properties(self, filter_properties, host_state): """Add additional information to the filter properties after a host has been selected by the scheduling process. """ # Add a retry entry for the selected volume backend: self._add_retry_host(filter_properties, host_state.host) def _add_retry_host(self, filter_properties, host): """Add a retry entry for the selected volume backend. In the event that the request gets re-scheduled, this entry will signal that the given backend has already been tried. """ retry = filter_properties.get('retry', None) if not retry: return hosts = retry['hosts'] hosts.append(host) def _max_attempts(self): max_attempts = CONF.scheduler_max_attempts if max_attempts < 1: msg = _("Invalid value for 'scheduler_max_attempts', " "must be >=1") raise exception.InvalidParameterValue(err=msg) return max_attempts def schedule_create_share(self, context, request_spec, filter_properties): weighed_host = self._schedule_share(context, request_spec, filter_properties) if not weighed_host: raise exception.NoValidHost(reason="") host = weighed_host.obj.host share_id = request_spec['share_id'] snapshot_id = request_spec['snapshot_id'] updated_share = driver.share_update_db(context, share_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.share_rpcapi.create_share(context, updated_share, host, request_spec=request_spec, filter_properties=filter_properties, snapshot_id=snapshot_id) def _schedule_share(self, context, request_spec, filter_properties=None): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ elevated = context.elevated() share_properties = request_spec['share_properties'] # Since Manila is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. resource_properties = share_properties.copy() share_type = request_spec.get("share_type", {}) resource_type = request_spec.get("share_type", {}) request_spec.update({'resource_properties': resource_properties}) config_options = self._get_configuration_options() if filter_properties is None: filter_properties = {} self._populate_retry_share(filter_properties, resource_properties) filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_type': share_type, 'resource_type': resource_type }) self.populate_filter_properties_share(request_spec, filter_properties) # Find our local list of acceptable hosts by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. hosts = self.host_manager.get_all_host_states_share(elevated) # Filter local hosts based on requirements ... hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties) if not hosts: return None LOG.debug(_("Filtered share %(hosts)s") % locals()) # weighted_host = WeightedHost() ... the best # host for the job. weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) best_host = weighed_hosts[0] LOG.debug(_("Choosing for share: %(best_host)s") % locals()) #NOTE(rushiagr): updating the available space parameters at same place best_host.obj.consume_from_share(share_properties) return best_host def _populate_retry_share(self, filter_properties, properties): """Populate filter properties with history of retries for this request. If maximum retries is exceeded, raise NoValidHost. """ max_attempts = self.max_attempts retry = filter_properties.pop('retry', {}) if max_attempts == 1: # re-scheduling is disabled. return # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'hosts': [] # list of share service hosts tried } filter_properties['retry'] = retry share_id = properties.get('share_id') self._log_share_error(share_id, retry) if retry['num_attempts'] > max_attempts: msg = _("Exceeded max scheduling attempts %(max_attempts)d for " "share %(share_id)s") % locals() raise exception.NoValidHost(reason=msg) def _log_share_error(self, share_id, retry): """If the request contained an exception from a previous share create operation, log it to aid debugging. """ exc = retry.pop('exc', None) # string-ified exception from share if not exc: return # no exception info from a previous attempt, skip hosts = retry.get('hosts', None) if not hosts: return # no previously attempted hosts, skip last_host = hosts[-1] msg = _("Error scheduling %(share_id)s from last share-service: " "%(last_host)s : %(exc)s") % locals() LOG.error(msg) def populate_filter_properties_share(self, request_spec, filter_properties): """Stuff things into filter_properties. Can be overridden in a subclass to add more data. """ shr = request_spec['share_properties'] filter_properties['size'] = shr['size'] filter_properties['availability_zone'] = shr.get('availability_zone') filter_properties['user_id'] = shr.get('user_id') filter_properties['metadata'] = shr.get('metadata') manila-2013.2.dev175.gbf1a399/manila/scheduler/driver.py0000664000175000017500000000662612301410454022606 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler base class that all Schedulers should inherit from """ from oslo.config import cfg from manila import db from manila.openstack.common import importutils from manila.openstack.common import timeutils from manila.share import rpcapi as share_rpcapi from manila import utils scheduler_driver_opts = [ cfg.StrOpt('scheduler_host_manager', default='manila.scheduler.host_manager.HostManager', help='The scheduler host manager class to use'), cfg.IntOpt('scheduler_max_attempts', default=3, help='Maximum number of attempts to schedule a share'), ] CONF = cfg.CONF CONF.register_opts(scheduler_driver_opts) def share_update_db(context, share_id, host): '''Set the host and set the scheduled_at field of a share. :returns: A Share with the updated fields set properly. ''' now = timeutils.utcnow() values = {'host': host, 'scheduled_at': now} return db.share_update(context, share_id, values) class Scheduler(object): """The base class that all Scheduler classes should inherit from.""" def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) self.share_rpcapi = share_rpcapi.ShareAPI() def get_host_list(self): """Get a list of hosts from the HostManager.""" return self.host_manager.get_host_list() def get_service_capabilities(self): """Get the normalized set of capabilities for the services. """ return self.host_manager.get_service_capabilities() def update_service_capabilities(self, service_name, host, capabilities): """Process a capability update from a service node.""" self.host_manager.update_service_capabilities(service_name, host, capabilities) def hosts_up(self, context, topic): """Return the list of hosts that have a running service for topic.""" services = db.service_get_all_by_topic(context, topic) return [service['host'] for service in services if utils.service_is_up(service)] def schedule(self, context, topic, method, *_args, **_kwargs): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) def schedule_create_share(self, context, request_spec, filter_properties): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement schedule_create_share")) manila-2013.2.dev175.gbf1a399/manila/share/0000775000175000017500000000000012301410516020052 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/share/configuration.py0000664000175000017500000000503712301410454023301 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (c) 2012 Rackspace Hosting # Copyright (c) 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Configuration support for all drivers. This module allows support for setting configurations either from default or from a particular CONF group, to be able to set multiple configurations for a given set of values. For instance, two lvm configurations can be set by naming them in groups as [lvm1] volume_group=lvm-group-1 ... [lvm2] volume_group=lvm-group-2 ... And the configuration group name will be passed in so that all calls to configuration.volume_group within that instance will be mapped to the proper named group. This class also ensures the implementation's configuration is grafted into the option group. This is due to the way cfg works. All cfg options must be defined and registered in the group in which they are used. """ from oslo.config import cfg from manila.openstack.common import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) class Configuration(object): def __init__(self, share_opts, config_group=None): """This takes care of grafting the implementation's config values into the config group.""" self.config_group = config_group # set the local conf so that __call__'s know what to use if self.config_group: self._ensure_config_values(share_opts) self.local_conf = CONF._get(self.config_group) else: self.local_conf = CONF def _ensure_config_values(self, share_opts): CONF.register_opts(share_opts, group=self.config_group) def append_config_values(self, share_opts): self._ensure_config_values(share_opts) def safe_get(self, value): try: return self.__getattr__(value) except cfg.NoSuchOptError: return None def __getattr__(self, value): return getattr(self.local_conf, value) manila-2013.2.dev175.gbf1a399/manila/share/rpcapi.py0000664000175000017500000000663012301410454021710 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the share RPC API. """ from manila import exception from manila.openstack.common import rpc import manila.openstack.common.rpc.proxy from oslo.config import cfg CONF = cfg.CONF class ShareAPI(manila.openstack.common.rpc.proxy.RpcProxy): '''Client side of the share rpc API. API version history: 1.0 - Initial version. 1.1 - Add snapshot support. 1.2 - Add filter scheduler support ''' BASE_RPC_API_VERSION = '1.1' def __init__(self, topic=None): super(ShareAPI, self).__init__( topic=topic or CONF.share_topic, default_version=self.BASE_RPC_API_VERSION) def create_share(self, ctxt, share, host, request_spec, filter_properties, snapshot_id=None): self.cast(ctxt, self.make_msg('create_share', share_id=share['id'], request_spec=request_spec, filter_properties=filter_properties, snapshot_id=snapshot_id), topic=rpc.queue_get_for(ctxt, self.topic, host)) def delete_share(self, ctxt, share): self.cast(ctxt, self.make_msg('delete_share', share_id=share['id']), topic=rpc.queue_get_for(ctxt, self.topic, share['host'])) def create_snapshot(self, ctxt, share, snapshot): self.cast(ctxt, self.make_msg('create_snapshot', share_id=share['id'], snapshot_id=snapshot['id']), topic=rpc.queue_get_for(ctxt, self.topic, share['host'])) def delete_snapshot(self, ctxt, snapshot, host): self.cast(ctxt, self.make_msg('delete_snapshot', snapshot_id=snapshot['id']), topic=rpc.queue_get_for(ctxt, self.topic, host)) def allow_access(self, ctxt, share, access): self.cast(ctxt, self.make_msg('allow_access', access_id=access['id']), topic=rpc.queue_get_for(ctxt, self.topic, share['host'])) def deny_access(self, ctxt, share, access): self.cast(ctxt, self.make_msg('deny_access', access_id=access['id']), topic=rpc.queue_get_for(ctxt, self.topic, share['host'])) def publish_service_capabilities(self, ctxt): self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'), version='1.0') manila-2013.2.dev175.gbf1a399/manila/share/drivers/0000775000175000017500000000000012301410516021530 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/share/drivers/netapp/0000775000175000017500000000000012301410516023017 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/share/drivers/netapp/__init__.py0000664000175000017500000000126012301410454025130 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Openstack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/share/drivers/netapp/api.py0000664000175000017500000001033212301410454024142 0ustar chuckchuck00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import suds from suds.sax import text from manila import exception from manila.openstack.common import log from oslo.config import cfg LOG = log.getLogger(__name__) CONF = cfg.CONF class NetAppApiClient(object): """Wrapper around DFM commands.""" REQUIRED_FLAGS = ['netapp_nas_wsdl_url', 'netapp_nas_login', 'netapp_nas_password', 'netapp_nas_server_hostname', 'netapp_nas_server_port'] def __init__(self, configuration): self.configuration = configuration self._client = None def do_setup(self): """Setup suds (web services) client.""" protocol = 'https' if self.configuration.netapp_nas_server_secure \ else 'http' soap_url = ('%s://%s:%s/apis/soap/v1' % (protocol, self.configuration.netapp_nas_server_hostname, self.configuration.netapp_nas_server_port)) self._client = \ suds.client.Client(self.configuration.netapp_nas_wsdl_url, username=self.configuration.netapp_nas_login, password=self.configuration.netapp_nas_password, location=soap_url) LOG.info('NetApp RPC client started') def send_request_to(self, target, request, xml_args=None, do_response_check=True): """ Sends RPC :request: to :target:. :param target: IP address, ID or network name of OnTap device :param request: API name :param xml_args: call arguments :param do_response_check: if set to True and RPC call has failed, raises exception. """ client = self._client srv = client.service rpc = client.factory.create('Request') rpc.Name = request rpc.Args = text.Raw(xml_args) response = srv.ApiProxy(Request=rpc, Target=target) if do_response_check: _check_response(rpc, response) return response def get_available_aggregates(self): """Returns list of aggregates known by DFM.""" srv = self._client.service resp = srv.AggregateListInfoIterStart() tag = resp.Tag try: avail_aggrs = srv.AggregateListInfoIterNext(Tag=tag, Maximum=resp.Records) finally: srv.AggregateListInfoIterEnd(tag) return avail_aggrs def get_host_ip_by(self, host_id): """Returns IP address of a host known by DFM.""" if (type(host_id) is str or type(host_id) is unicode) and \ len(host_id.split('.')) == 4: # already IP return host_id client = self._client srv = client.service filer_filter = client.factory.create('HostListInfoIterStart') filer_filter.ObjectNameOrId = host_id resp = srv.HostListInfoIterStart(HostListInfoIterStart=filer_filter) tag = resp.Tag try: filers = srv.HostListInfoIterNext(Tag=tag, Maximum=resp.Records) finally: srv.HostListInfoIterEnd(Tag=tag) ip = None for host in filers.Hosts.HostInfo: if int(host.HostId) == int(host_id): ip = host.HostAddress return ip @staticmethod def check_configuration(config_object): """Ensure that the flags we care about are set.""" for flag in NetAppApiClient.REQUIRED_FLAGS: if not getattr(config_object, flag, None): raise exception.Error(_('%s is not set') % flag) manila-2013.2.dev175.gbf1a399/manila/share/drivers/netapp/driver.py0000664000175000017500000005753212301410454024701 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp specific NAS storage driver. Supports NFS and CIFS protocols. This driver requires NetApp OnCommand 5.0 and one or more Data ONTAP 7-mode storage systems with installed CIFS and NFS licenses. """ from manila import exception from manila.openstack.common import log from manila.share import driver from manila.share.drivers.netapp.api import NetAppApiClient from oslo.config import cfg LOG = log.getLogger(__name__) NETAPP_NAS_OPTS = [ cfg.StrOpt('netapp_nas_wsdl_url', default=None, help='URL of the WSDL file for the DFM server'), cfg.StrOpt('netapp_nas_login', default=None, help='User name for the DFM server'), cfg.StrOpt('netapp_nas_password', default=None, help='Password for the DFM server'), cfg.StrOpt('netapp_nas_server_hostname', default=None, help='Hostname for the DFM server'), cfg.IntOpt('netapp_nas_server_port', default=8088, help='Port number for the DFM server'), cfg.BoolOpt('netapp_nas_server_secure', default=True, help='Use secure connection to server.'), ] CONF = cfg.CONF CONF.register_opts(NETAPP_NAS_OPTS) class NetAppShareDriver(driver.ShareDriver): """ NetApp specific NAS driver. Allows for NFS and CIFS NAS storage usage. """ def __init__(self, db, *args, **kwargs): super(NetAppShareDriver, self).__init__(*args, **kwargs) self.db = db self._helpers = None self._share_table = {} self.configuration.append_config_values(NETAPP_NAS_OPTS) self._client = NetAppApiClient(self.configuration) def allocate_container(self, context, share): """Allocate space for the share on aggregates.""" aggregate = self._find_best_aggregate() filer = aggregate.FilerId self._allocate_share_space(aggregate, share) self._remember_share(share['id'], filer) def allocate_container_from_snapshot(self, context, share, snapshot): """Creates a share from a snapshot.""" share_name = _get_valid_share_name(share['id']) parent_share_name = _get_valid_share_name(snapshot['share_id']) parent_snapshot_name = _get_valid_snapshot_name(snapshot['id']) filer = self._get_filer(snapshot['share_id']) xml_args = ('%s' '%s' '%s') % \ (share_name, parent_share_name, parent_snapshot_name) self._client.send_request_to(filer, 'volume-clone-create', xml_args) self._remember_share(share['id'], filer) def deallocate_container(self, context, share): """Free share space.""" target = self._get_filer(share['id']) if target: self._share_offline(target, share) self._delete_share(target, share) self._forget_share(share['id']) def create_share(self, context, share): """Creates NAS storage.""" helper = self._get_helper(share) filer = self._get_filer(share['id']) export_location = helper.create_share(filer, share) return export_location def create_snapshot(self, context, snapshot): """Creates a snapshot of a share.""" share_name = _get_valid_share_name(snapshot['share_id']) snapshot_name = _get_valid_snapshot_name(snapshot['id']) filer = self._get_filer(snapshot['share_id']) xml_args = ('%s' '%s') % (share_name, snapshot_name) self._client.send_request_to(filer, 'snapshot-create', xml_args) def delete_share(self, context, share): """Deletes NAS storage.""" helper = self._get_helper(share) target = helper.get_target(share) # share may be in error state, so there's no share and target if target: helper.delete_share(share) def delete_snapshot(self, context, snapshot): """Deletes a snapshot of a share.""" share_name = _get_valid_share_name(snapshot['share_id']) snapshot_name = _get_valid_snapshot_name(snapshot['id']) filer = self._get_filer(snapshot['share_id']) self._is_snapshot_busy(filer, share_name, snapshot_name) xml_args = ('%s' '%s') % (snapshot_name, share_name) self._client.send_request_to(filer, 'snapshot-delete', xml_args) def create_export(self, context, share): """Share already exported.""" pass def remove_export(self, context, share): """Share already removed.""" pass def ensure_share(self, context, share): """Remember previously created shares.""" helper = self._get_helper(share) filer = helper.get_target(share) self._remember_share(share['id'], filer) def allow_access(self, context, share, access): """Allows access to a given NAS storage for IPs in :access:""" helper = self._get_helper(share) return helper.allow_access(context, share, access) def deny_access(self, context, share, access): """Denies access to a given NAS storage for IPs in :access:""" helper = self._get_helper(share) return helper.deny_access(context, share, access) def do_setup(self, context): """Prepare once the driver. Called once by the manager after the driver is loaded. Validate the flags we care about and setup the suds (web services) client. """ self._client.do_setup() self._setup_helpers() def check_for_setup_error(self): """Raises error if prerequisites are not met.""" self._client.check_configuration(self.configuration) def _get_filer(self, share_id): """Returns filer name for the share_id.""" try: return self._share_table[share_id] except KeyError: return def _remember_share(self, share_id, filer): """Stores required share info in local state.""" self._share_table[share_id] = filer def _forget_share(self, share_id): """Remove share info about share.""" try: self._share_table.pop(share_id) except KeyError: pass def _share_offline(self, target, share): """Sends share offline. Required before deleting a share.""" share_name = _get_valid_share_name(share['id']) xml_args = ('%s') % share_name self._client.send_request_to(target, 'volume-offline', xml_args) def _delete_share(self, target, share): """Destroys share on a target OnTap device.""" share_name = _get_valid_share_name(share['id']) xml_args = ('true' '%s') % share_name self._client.send_request_to(target, 'volume-destroy', xml_args) def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" #TODO(rushiagr): better way to handle configuration instead of just # passing to the helper self._helpers = { 'CIFS': NetAppCIFSHelper(self._client, self.configuration), 'NFS': NetAppNFSHelper(self._client, self.configuration), } def _get_helper(self, share): """Returns driver which implements share protocol.""" share_proto = share['share_proto'] for proto in self._helpers.keys(): if share_proto.upper().startswith(proto): return self._helpers[proto] err_msg = _("Invalid NAS protocol supplied: %s. ") % (share_proto) raise exception.Error(err_msg) def _find_best_aggregate(self): """Returns aggregate with the most free space left.""" aggrs = self._client.get_available_aggregates() if aggrs is None: raise exception.Error(_("No aggregates available")) best_aggregate = max(aggrs.Aggregates.AggregateInfo, key=lambda ai: ai.AggregateSize.SizeAvailable) return best_aggregate def _allocate_share_space(self, aggregate, share): """Create new share on aggregate.""" filer_id = aggregate.FilerId aggr_name = aggregate.AggregateName.split(':')[1] share_name = _get_valid_share_name(share['id']) args_xml = ('%s' '%dg' '%s') % (aggr_name, share['size'], share_name) self._client.send_request_to(filer_id, 'volume-create', args_xml) def _is_snapshot_busy(self, filer, share_name, snapshot_name): """Raises ShareSnapshotIsBusy if snapshot is busy.""" xml_args = ('%s') % share_name snapshots = self._client.send_request_to(filer, 'snapshot-list-info', xml_args, do_response_check=False) for snap in snapshots.Results.snapshots[0]['snapshot-info']: if snap['name'][0] == snapshot_name and snap['busy'][0] == 'true': raise exception.ShareSnapshotIsBusy( snapshot_name=snapshot_name) def get_share_stats(self, refresh=False): """Get share status. If 'refresh' is True, run update the stats first.""" if refresh: self._update_share_status() return self._stats def _update_share_status(self): """Retrieve status info from share volume group.""" LOG.debug(_("Updating share status")) data = {} # Note(zhiteng): These information are driver/backend specific, # each driver may define these values in its own config options # or fetch from driver specific configuration file. data["share_backend_name"] = 'NetApp_7_mode' data["vendor_name"] = 'NetApp' data["driver_version"] = '1.0' #TODO(rushiagr): Pick storage_protocol from the helper used. data["storage_protocol"] = 'NFS_CIFS' data['total_capacity_gb'] = 'infinite' data['free_capacity_gb'] = 'infinite' data['reserved_percentage'] = 0 data['QoS_support'] = False self._stats = data def get_network_allocations_number(self): """7mode driver does not need to create VIFS""" return 0 def setup_network(self, network_info): """Nothing to set up""" pass def _check_response(request, response): """Checks RPC responses from NetApp devices.""" if response.Status == 'failed': name = request.Name reason = response.Reason msg = _('API %(name)s failed: %(reason)s') raise exception.Error(msg % locals()) def _get_valid_share_name(share_id): """The name can contain letters, numbers, and the underscore character (_). The first character must be a letter or an underscore.""" return 'share_' + share_id.replace('-', '_') def _get_valid_snapshot_name(snapshot_id): """The name can contain letters, numbers, and the underscore character (_). The first character must be a letter or an underscore.""" return 'share_snapshot_' + snapshot_id.replace('-', '_') class NetAppNASHelperBase(object): """Interface for protocol-specific NAS drivers.""" def __init__(self, suds_client, config_object): self.configuration = config_object self._client = suds_client def create_share(self, target_id, share): """Creates NAS share.""" raise NotImplementedError() def delete_share(self, share): """Deletes NAS share.""" raise NotImplementedError() def allow_access(self, context, share, new_rules): """Allows new_rules to a given NAS storage for IPs in :new_rules.""" raise NotImplementedError() def deny_access(self, context, share, new_rules): """Denies new_rules to a given NAS storage for IPs in :new_rules:.""" raise NotImplementedError() def get_target(self, share): """Returns host where the share located..""" raise NotImplementedError() class NetAppNFSHelper(NetAppNASHelperBase): """Netapp specific NFS sharing driver.""" def __init__(self, suds_client, config_object): self.configuration = config_object super(NetAppNFSHelper, self).__init__(suds_client, config_object) def create_share(self, target_id, share): """Creates NFS share""" args_xml = ('' '' '%s' '' '' '' '' 'localhost' '' '' '' '' 'false' 'localhost' '' '' '' '' '' '') client = self._client valid_share_name = _get_valid_share_name(share['id']) export_pathname = '/vol/' + valid_share_name client.send_request_to(target_id, 'nfs-exportfs-append-rules-2', args_xml % export_pathname) export_ip = client.get_host_ip_by(target_id) export_location = ':'.join([export_ip, export_pathname]) return export_location def delete_share(self, share): """Deletes NFS share.""" target, export_path = self._get_export_path(share) xml_args = ('' '' '%s' '' '') % export_path self._client.send_request_to(target, 'nfs-exportfs-delete-rules', xml_args) def allow_access(self, context, share, access): """Allows access to a given NFS storage for IPs in :access:.""" if access['access_type'] != 'ip': raise exception.Error(('Invalid access type supplied. ' 'Only \'ip\' type is supported')) ips = access['access_to'] existing_rules = self._get_exisiting_rules(share) new_rules_xml = self._append_new_rules_to(existing_rules, ips) self._modify_rule(share, new_rules_xml) def deny_access(self, context, share, access): """Denies access to a given NFS storage for IPs in :access:.""" denied_ips = access['access_to'] existing_rules = self._get_exisiting_rules(share) if type(denied_ips) is not list: denied_ips = [denied_ips] for deny_rule in denied_ips: try: existing_rules.remove(deny_rule) except ValueError: pass new_rules_xml = self._append_new_rules_to([], existing_rules) self._modify_rule(share, new_rules_xml) def get_target(self, share): """Returns ID of target OnTap device based on export location.""" return self._get_export_path(share)[0] def _modify_rule(self, share, rw_rules): """Modifies access rule for a share.""" target, export_path = self._get_export_path(share) xml_args = ('true' '' '' '%s' '%s' '' '' '') % (export_path, ''.join(rw_rules)) self._client.send_request_to(target, 'nfs-exportfs-append-rules-2', xml_args) def _get_exisiting_rules(self, share): """Returns available access rules for the share.""" target, export_path = self._get_export_path(share) xml_args = '%s' % export_path response = self._client.send_request_to(target, 'nfs-exportfs-list-rules-2', xml_args) rules = response.Results.rules[0] security_rule = rules['exports-rule-info-2'][0]['security-rules'][0] security_info = security_rule['security-rule-info'][0] root_rules = security_info['root'][0] allowed_hosts = root_rules['exports-hostname-info'] existing_rules = [] for allowed_host in allowed_hosts: if 'name' in allowed_host: existing_rules.append(allowed_host['name'][0]) return existing_rules @staticmethod def _append_new_rules_to(existing_rules, new_rules): """Adds new rules to existing.""" security_rule_xml = ('' '%s' '' '%s' '' '') hostname_info_xml = ('' '%s' '') allowed_hosts_xml = [] if type(new_rules) is not list: new_rules = [new_rules] all_rules = existing_rules + new_rules for ip in all_rules: allowed_hosts_xml.append(hostname_info_xml % ip) return security_rule_xml % (allowed_hosts_xml, allowed_hosts_xml) @staticmethod def _get_export_path(share): """Returns IP address and export location of a share.""" export_location = share['export_location'] if export_location is None: export_location = ':' return export_location.split(':') class NetAppCIFSHelper(NetAppNASHelperBase): """Netapp specific NFS sharing driver.""" CIFS_USER_GROUP = 'Administrators' def __init__(self, suds_client, config_object): self.configuration = config_object super(NetAppCIFSHelper, self).__init__(suds_client, config_object) def create_share(self, target_id, share): """Creates CIFS storage.""" cifs_status = self._get_cifs_status(target_id) if cifs_status == 'stopped': self._start_cifs_service(target_id) share_name = _get_valid_share_name(share['id']) self._set_qtree_security(target_id, share) self._add_share(target_id, share_name) self._restrict_access(target_id, 'everyone', share_name) ip_address = self._client.get_host_ip_by(target_id) cifs_location = self._set_export_location(ip_address, share_name) return cifs_location def delete_share(self, share): """Deletes CIFS storage.""" host_ip, share_name = self._get_export_location(share) xml_args = '%s' % share_name self._client.send_request_to(host_ip, 'cifs-share-delete', xml_args) def allow_access(self, context, share, access): """Allows access to a given CIFS storage for IPs in :access:.""" if access['access_type'] != 'passwd': ex_text = ('NetApp only supports "passwd" access type for CIFS.') raise exception.Error(ex_text) user = access['access_to'] target, share_name = self._get_export_location(share) if self._user_exists(target, user): self._allow_access_for(target, user, share_name) else: exc_text = ('User "%s" does not exist on %s OnTap.') % (user, target) raise exception.Error(exc_text) def deny_access(self, context, share, access): """Denies access to a given CIFS storage for IPs in access.""" host_ip, share_name = self._get_export_location(share) user = access['access_to'] self._restrict_access(host_ip, user, share_name) def get_target(self, share): """Returns OnTap target IP based on share export location.""" return self._get_export_location(share)[0] def _set_qtree_security(self, target, share): client = self._client share_name = '/vol/' + _get_valid_share_name(share['id']) xml_args = ('' 'qtree' 'security' '%s' 'mixed' '') % share_name client.send_request_to(target, 'system-cli', xml_args) def _restrict_access(self, target, user_name, share_name): xml_args = ('%s' '%s') % (user_name, share_name) self._client.send_request_to(target, 'cifs-share-ace-delete', xml_args) def _start_cifs_service(self, target_id): """Starts CIFS service on OnTap target.""" client = self._client return client.send_request_to(target_id, 'cifs-start', do_response_check=False) @staticmethod def _get_export_location(share): """Returns export location for a given CIFS share.""" export_location = share['export_location'] if export_location is None: export_location = '///' _, _, host_ip, share_name = export_location.split('/') return host_ip, share_name @staticmethod def _set_export_location(ip, share_name): """Returns export location of a share.""" return "//%s/%s" % (ip, share_name) def _get_cifs_status(self, target_id): """Returns status of a CIFS service on target OnTap.""" client = self._client response = client.send_request_to(target_id, 'cifs-status') return response.Status def _allow_access_for(self, target, username, share_name): """Allows access to the CIFS share for a given user.""" xml_args = ('rwx' '%s' '%s') % (share_name, username) self._client.send_request_to(target, 'cifs-share-ace-set', xml_args) def _user_exists(self, target, user): """Returns True if user already exists on a target OnTap.""" xml_args = ('%s') % user resp = self._client.send_request_to(target, 'useradmin-user-list', xml_args, do_response_check=False) return (resp.Status == 'passed') def _add_share(self, target_id, share_name): """Creates CIFS share on target OnTap host.""" client = self._client xml_args = ('/vol/%s' '%s') % (share_name, share_name) client.send_request_to(target_id, 'cifs-share-add', xml_args) manila-2013.2.dev175.gbf1a399/manila/share/drivers/generic.py0000664000175000017500000014267712301410454023540 0ustar chuckchuck00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Generic Driver for shares. """ import ConfigParser import netaddr import os import re import shutil import socket import threading import time from manila import compute from manila import context from manila import exception from manila.network.linux import ip_lib from manila.network.neutron import api as neutron from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.share import driver from manila import utils from manila import volume from oslo.config import cfg LOG = logging.getLogger(__name__) share_opts = [ cfg.StrOpt('service_image_name', default='manila-service-image', help="Name of image in glance, that will be used to create " "service instance"), cfg.StrOpt('smb_template_config_path', default='$state_path/smb.conf', help="Path to smb config"), cfg.StrOpt('service_instance_name_template', default='manila_service_instance-%s', help="Name of service instance"), cfg.StrOpt('service_instance_user', help="User in service instance"), cfg.StrOpt('service_instance_password', default=None, help="Password to service instance user"), cfg.StrOpt('volume_name_template', default='manila-share-%s', help="Volume name template"), cfg.StrOpt('manila_service_keypair_name', default='manila-service', help="Name of keypair that will be created and used " "for service instance"), cfg.StrOpt('path_to_public_key', default='/home/stack/.ssh/id_rsa.pub', help="Path to hosts public key"), cfg.StrOpt('path_to_private_key', default='/home/stack/.ssh/id_rsa', help="Path to hosts private key"), cfg.StrOpt('volume_snapshot_name_template', default='manila-snapshot-%s', help="Volume snapshot name template"), cfg.IntOpt('max_time_to_build_instance', default=300, help="Maximum time to wait for creating service instance"), cfg.StrOpt('share_mount_path', default='/shares', help="Parent path in service instance where shares " "will be mounted"), cfg.IntOpt('max_time_to_create_volume', default=180, help="Maximum time to wait for creating cinder volume"), cfg.IntOpt('max_time_to_attach', default=120, help="Maximum time to wait for attaching cinder volume"), cfg.IntOpt('service_instance_flavor_id', default=100, help="ID of flavor, that will be used for service instance " "creation"), cfg.StrOpt('service_instance_smb_config_path', default='$share_mount_path/smb.conf', help="Path to smb config in service instance"), cfg.StrOpt('service_network_name', default='manila_service_network', help="Name of manila service network"), cfg.StrOpt('service_network_cidr', default='10.254.0.0/16', help="CIDR of manila service network"), cfg.StrOpt('interface_driver', default='manila.network.linux.interface.OVSInterfaceDriver', help="Vif driver"), cfg.ListOpt('share_helpers', default=[ 'CIFS=manila.share.drivers.generic.CIFSHelper', 'NFS=manila.share.drivers.generic.NFSHelper', ], help='Specify list of share export helpers.'), ] CONF = cfg.CONF CONF.register_opts(share_opts) def synchronized(f): """Decorates function with unique locks for each share network.""" def wrapped_func(self, *args, **kwargs): for arg in args: share_network_id = getattr(arg, 'share_network_id', None) if isinstance(arg, dict): share_network_id = arg.get('share_network_id', None) if share_network_id: break else: raise exception.\ ManilaException(_('Could not get share network id')) with self.share_networks_locks.setdefault(share_network_id, threading.RLock()): return f(self, *args, **kwargs) return wrapped_func def _ssh_exec(server, command): """Executes ssh commands and checks/restores ssh connection.""" if not server['ssh'].get_transport().is_active(): server['ssh_pool'].remove(server['ssh']) server['ssh'] = server['ssh_pool'].create() return utils.ssh_execute(server['ssh'], ' '.join(command)) class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver): """Executes commands relating to Shares.""" def __init__(self, db, *args, **kwargs): """Do initialization.""" super(GenericShareDriver, self).__init__(*args, **kwargs) self.admin_context = context.get_admin_context() self.db = db self.configuration.append_config_values(share_opts) self._helpers = None def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if not self.configuration.service_instance_user: raise exception.ManilaException(_('Service instance user is not ' 'specified')) def do_setup(self, context): """Any initialization the generic driver does while starting.""" super(GenericShareDriver, self).do_setup(context) self.compute_api = compute.API() self.volume_api = volume.API() self.neutron_api = neutron.API() self.share_networks_locks = {} self.share_networks_servers = {} attempts = 5 while attempts: try: self.service_tenant_id = self.neutron_api.admin_tenant_id break except exception.NetworkException: LOG.debug(_('Connection to neutron failed')) attempts -= 1 time.sleep(3) else: raise exception.\ ManilaException(_('Can not receive service tenant id')) self.service_network_id = self._get_service_network() self.vif_driver = importutils.\ import_class(self.configuration.interface_driver)() self._setup_connectivity_with_service_instances() self._setup_helpers() def _get_service_network(self): """Finds existing or creates new service network.""" service_network_name = self.configuration.service_network_name networks = [network for network in self.neutron_api. get_all_tenant_networks(self.service_tenant_id) if network['name'] == service_network_name] if len(networks) > 1: raise exception.ManilaException(_('Ambiguous service networks')) elif not networks: return self.neutron_api.network_create(self.service_tenant_id, service_network_name)['id'] else: return networks[0]['id'] def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" self._helpers = {} for helper_str in self.configuration.share_helpers: share_proto, _, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper(self._execute, self.configuration, self.share_networks_locks) def create_share(self, context, share): """Creates share.""" if share['share_network_id'] is None: raise exception.\ ManilaException(_('Share Network is not specified')) server = self._get_service_instance(self.admin_context, share) volume = self._allocate_container(context, share) volume = self._attach_volume(context, share, server, volume) self._format_device(server, volume) self._mount_device(context, share, server, volume) location = self._get_helper(share).create_export(server, share['name']) return location def _format_device(self, server, volume): """Formats device attached to the service vm.""" command = ['sudo', 'mkfs.ext4', volume['mountpoint']] _ssh_exec(server, command) def _mount_device(self, context, share, server, volume): """Mounts attached and formatted block device to the directory.""" mount_path = self._get_mount_path(share) command = ['sudo', 'mkdir', '-p', mount_path, ';'] command.extend(['sudo', 'mount', volume['mountpoint'], mount_path]) try: _ssh_exec(server, command) except exception.ProcessExecutionError as e: if 'already mounted' not in e.stderr: raise LOG.debug(_('Share %s is already mounted') % share['name']) command = ['sudo', 'chmod', '777', mount_path] _ssh_exec(server, command) def _unmount_device(self, context, share, server): """Unmounts device from directory on service vm.""" mount_path = self._get_mount_path(share) command = ['sudo', 'umount', mount_path, ';'] command.extend(['sudo', 'rmdir', mount_path]) try: _ssh_exec(server, command) except exception.ProcessExecutionError as e: if 'not found' in e.stderr: LOG.debug(_('%s is not mounted') % share['name']) def _get_mount_path(self, share): """ Returns the path, that will be used for mount device in service vm. """ return os.path.join(self.configuration.share_mount_path, share['name']) @synchronized def _attach_volume(self, context, share, server, volume): """Attaches cinder volume to service vm.""" if volume['status'] == 'in-use': attached_volumes = [vol.id for vol in self.compute_api.instance_volumes_list(self.admin_context, server['id'])] if volume['id'] in attached_volumes: return volume else: raise exception.ManilaException(_('Volume %s is already ' 'attached to another instance') % volume['id']) device_path = self._get_device_path(self.admin_context, server) self.compute_api.instance_volume_attach(self.admin_context, server['id'], volume['id'], device_path) t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] == 'in-use': break elif volume['status'] != 'attaching': raise exception.ManilaException(_('Failed to attach volume %s') % volume['id']) time.sleep(1) else: raise exception.ManilaException(_('Volume have not been attached ' 'in %ss. Giving up') % self.configuration.max_time_to_attach) return volume def _get_volume(self, context, share_id): """Finds volume, associated to the specific share.""" volume_name = self.configuration.volume_name_template % share_id search_opts = {'display_name': volume_name} if context.is_admin: search_opts['all_tenants'] = True volumes_list = self.volume_api.get_all(context, search_opts) volume = None if len(volumes_list) == 1: volume = volumes_list[0] elif len(volumes_list) > 1: raise exception.ManilaException(_('Error. Ambiguous volumes')) return volume def _get_volume_snapshot(self, context, snapshot_id): """Finds volume snaphots, associated to the specific share snaphots.""" volume_snapshot_name = self.configuration.\ volume_snapshot_name_template % snapshot_id volume_snapshot_list = self.volume_api.get_all_snapshots(context, {'display_name': volume_snapshot_name}) volume_snapshot = None if len(volume_snapshot_list) == 1: volume_snapshot = volume_snapshot_list[0] elif len(volume_snapshot_list) > 1: raise exception.\ ManilaException(_('Error. Ambiguous volume snaphots')) return volume_snapshot @synchronized def _detach_volume(self, context, share, server): """Detaches cinder volume from service vm.""" attached_volumes = [vol.id for vol in self.compute_api.instance_volumes_list(self.admin_context, server['id'])] volume = self._get_volume(context, share['id']) if volume and volume['id'] in attached_volumes: self.compute_api.instance_volume_detach(self.admin_context, server['id'], volume['id']) t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] in ('available', 'error'): break time.sleep(1) else: raise exception.ManilaException(_('Volume have not been ' 'detached in %ss. Giving up') % self.configuration.max_time_to_attach) def _get_device_path(self, context, server): """Returns device path, that will be used for cinder volume attaching. """ volumes = self.compute_api.instance_volumes_list(context, server['id']) used_literals = set(volume.device[-1] for volume in volumes if '/dev/vd' in volume.device) lit = 'b' while lit in used_literals: lit = chr(ord(lit) + 1) device_name = '/dev/vd' + lit return device_name def _get_service_instance_name(self, share): """Returns service vms name.""" return self.configuration.service_instance_name_template % \ share['share_network_id'] def _get_server_ip(self, server): """Returns service vms ip address.""" net = server['networks'] try: net_ips = net[self.configuration.service_network_name] return net_ips[0] except KeyError: msg = _('Service vm is not attached to %s network') except IndexError: msg = _('Service vm has no ips on %s network') msg = msg % self.configuration.service_network_name LOG.error(msg) raise exception.ManilaException(msg) def _ensure_or_delete_server(self, context, server, update=False): """Ensures that server exists and active, otherwise deletes it.""" if update: try: server.update(self.compute_api.server_get(context, server['id'])) except exception.InstanceNotFound as e: LOG.debug(e) return False if server['status'] == 'ACTIVE': if self._check_server_availability(server): return True self._delete_server(context, server) return False def _delete_server(self, context, server): """Deletes the server.""" self.compute_api.server_delete(context, server['id']) t = time.time() while time.time() - t < self.configuration.\ max_time_to_build_instance: try: server = self.compute_api.server_get(context, server['id']) except exception.InstanceNotFound: LOG.debug(_('Service instance was deleted succesfully')) break time.sleep(1) else: raise exception.ManilaException(_('Instance have not been deleted ' 'in %ss. Giving up') % self.configuration.max_time_to_build_instance) @synchronized def _get_service_instance(self, context, share, create=True): """Finds or creates and setups service vm.""" server = self.share_networks_servers.get(share['share_network_id'], {}) old_server_ip = server.get('ip', None) if server and self._ensure_or_delete_server(context, server, update=True): return server else: server = {} service_instance_name = self._get_service_instance_name(share) search_opts = {'name': service_instance_name} servers = self.compute_api.server_list(context, search_opts, True) if len(servers) == 1: server = servers[0] server['ip'] = self._get_server_ip(server) old_server_ip = server['ip'] if not self._ensure_or_delete_server(context, server): server.clear() elif len(servers) > 1: raise exception.\ ManilaException(_('Ambiguous service instances')) if not server and create: server = self._create_service_instance(context, service_instance_name, share, old_server_ip) if server: server['share_network_id'] = share['share_network_id'] server['ip'] = self._get_server_ip(server) server['ssh_pool'] = self._get_ssh_pool(server) server['ssh'] = server['ssh_pool'].create() for helper in self._helpers.values(): helper.init_helper(server) self.share_networks_servers[share['share_network_id']] = server return server def _get_ssh_pool(self, server): """Returns ssh connection pool for service vm.""" ssh_pool = utils.SSHPool(server['ip'], 22, None, self.configuration.service_instance_user, password=self.configuration.service_instance_password, privatekey=self.configuration.path_to_private_key, max_size=1) return ssh_pool def _get_key(self, context): """Returns name of key, that will be injected to service vm.""" if not self.configuration.path_to_public_key or \ not self.configuration.path_to_private_key: return path_to_public_key = \ os.path.expanduser(self.configuration.path_to_public_key) path_to_private_key = \ os.path.expanduser(self.configuration.path_to_private_key) if not os.path.exists(path_to_public_key) or \ not os.path.exists(path_to_private_key): return keypair_name = self.configuration.manila_service_keypair_name keypairs = [k for k in self.compute_api.keypair_list(context) if k.name == keypair_name] if len(keypairs) > 1: raise exception.ManilaException(_('Ambiguous keypairs')) public_key, _ = self._execute('cat', path_to_public_key, run_as_root=True) if not keypairs: keypair = self.compute_api.keypair_import(context, keypair_name, public_key) else: keypair = keypairs[0] if keypair.public_key != public_key: LOG.debug('Public key differs from existing keypair. ' 'Creating new keypair') self.compute_api.keypair_delete(context, keypair.id) keypair = self.compute_api.keypair_import(context, keypair_name, public_key) return keypair.name def _get_service_image(self, context): """Returns ID of service image, that will be used for service vm creating. """ images = [image.id for image in self.compute_api.image_list(context) if image.name == self.configuration.service_image_name] if len(images) == 1: return images[0] elif not images: raise exception.\ ManilaException(_('No appropriate image was found')) else: raise exception.ManilaException(_('Ambiguous image name')) def _create_service_instance(self, context, instance_name, share, old_server_ip): """Creates service vm and sets up networking for it.""" service_image_id = self._get_service_image(context) key_name = self._get_key(context) if not self.configuration.service_instance_password and not key_name: raise exception.ManilaException(_('Neither service instance' 'password nor key are available')) port = self._setup_network_for_instance(context, share, old_server_ip) try: self._setup_connectivity_with_service_instances() except Exception as e: LOG.debug(e) self.neutron_api.delete_port(port['id']) raise service_instance = self.compute_api.server_create(context, instance_name, service_image_id, self.configuration.service_instance_flavor_id, key_name, None, None, nics=[{'port-id': port['id']}]) t = time.time() while time.time() - t < self.configuration.max_time_to_build_instance: if service_instance['status'] == 'ACTIVE': break if service_instance['status'] == 'ERROR': raise exception.ManilaException(_('Failed to build service ' 'instance')) time.sleep(1) try: service_instance = self.compute_api.server_get(context, service_instance['id']) except exception.InstanceNotFound as e: LOG.debug(e) else: raise exception.ManilaException(_('Instance have not been spawned ' 'in %ss. Giving up') % self.configuration.max_time_to_build_instance) service_instance['ip'] = self._get_server_ip(service_instance) if not self._check_server_availability(service_instance): raise exception.ManilaException(_('SSH connection have not been ' 'established in %ss. Giving up') % self.configuration.max_time_to_build_instance) return service_instance def _check_server_availability(self, server): t = time.time() while time.time() - t < self.configuration.max_time_to_build_instance: LOG.debug('Checking service vm availablity') try: socket.socket().connect((server['ip'], 22)) LOG.debug(_('Service vm is available via ssh.')) return True except socket.error as e: LOG.debug(e) LOG.debug(_('Server is not available through ssh. Waiting...')) time.sleep(5) return False def _setup_network_for_instance(self, context, share, old_server_ip): """Setups network for service vm.""" service_network = self.neutron_api.get_network(self.service_network_id) all_service_subnets = [self.neutron_api.get_subnet(subnet_id) for subnet_id in service_network['subnets']] service_subnets = [subnet for subnet in all_service_subnets if subnet['name'] == share['share_network_id']] if len(service_subnets) > 1: raise exception.ManilaException(_('Ambiguous subnets')) elif not service_subnets: service_subnet = \ self.neutron_api.subnet_create(self.service_tenant_id, self.service_network_id, share['share_network_id'], self._get_cidr_for_subnet(all_service_subnets)) else: service_subnet = service_subnets[0] share_network = self.db.share_network_get(context, share['share_network_id']) private_router = self._get_private_router(share_network) try: self.neutron_api.router_add_interface(private_router['id'], service_subnet['id']) except exception.NetworkException as e: if 'already has' not in e.msg: raise LOG.debug(_('Subnet %(subnet_id)s is already attached to the ' 'router %(router_id)s') % {'subnet_id': service_subnet['id'], 'router_id': private_router['id']}) return self.neutron_api.create_port(self.service_tenant_id, self.service_network_id, subnet_id=service_subnet['id'], fixed_ip=old_server_ip, device_owner='manila') def _get_private_router(self, share_network): """Returns router attached to private subnet gateway.""" private_subnet = self.neutron_api.\ get_subnet(share_network['neutron_subnet_id']) if not private_subnet['gateway_ip']: raise exception.ManilaException(_('Subnet must have gateway')) private_network_ports = [p for p in self.neutron_api.list_ports( network_id=share_network['neutron_net_id'])] for p in private_network_ports: fixed_ip = p['fixed_ips'][0] if fixed_ip['subnet_id'] == private_subnet['id'] and \ fixed_ip['ip_address'] == private_subnet['gateway_ip']: private_subnet_gateway_port = p break else: raise exception.ManilaException(_('Subnet gateway is not attached ' 'the router')) private_subnet_router = self.neutron_api.show_router( private_subnet_gateway_port['device_id']) return private_subnet_router def _setup_connectivity_with_service_instances(self): """Setups connectivity with service instances by creating port in service network, creating and setting up required network devices. """ port = self._setup_service_port() interface_name = self.vif_driver.get_device_name(port) self.vif_driver.plug(port['id'], interface_name, port['mac_address']) ip_cidrs = [] for fixed_ip in port['fixed_ips']: subnet = self.neutron_api.get_subnet(fixed_ip['subnet_id']) net = netaddr.IPNetwork(subnet['cidr']) ip_cidr = '%s/%s' % (fixed_ip['ip_address'], net.prefixlen) ip_cidrs.append(ip_cidr) self.vif_driver.init_l3(interface_name, ip_cidrs) # ensure that interface is first in the list device = ip_lib.IPDevice(interface_name) device.route.pullup_route(interface_name) # here we are checking for garbage devices from removed service port self._clean_garbage(device) def _clean_garbage(self, device): """Finds and removes network device, that was associated with deleted service port. """ list_dev = [] for dev in ip_lib.IPWrapper().get_devices(): if dev.name != device.name and dev.name[:3] == device.name[:3]: cidr_set = set() for a in dev.addr.list(): if a['ip_version'] == 4: cidr_set.add(str(netaddr.IPNetwork(a['cidr']).cidr)) list_dev.append((dev.name, cidr_set)) device_cidr_set = set(str(netaddr.IPNetwork(a['cidr']).cidr) for a in device.addr.list() if a['ip_version'] == 4) for dev_name, cidr_set in list_dev: if device_cidr_set & cidr_set: self.vif_driver.unplug(dev_name) def _setup_service_port(self): """Find or creates neutron port, that will be used for connectivity with service instances. """ ports = [port for port in self.neutron_api. list_ports(device_id='manila-share')] if len(ports) > 1: raise exception.\ ManilaException(_('Error. Ambiguous service ports')) elif not ports: services = self.db.service_get_all_by_topic(self.admin_context, 'manila-share') host = services[0]['host'] if services else None if host is None: raise exception.ManilaException('Unable to get host') port = self.neutron_api.create_port(self.service_tenant_id, self.service_network_id, device_id='manila-share', device_owner='manila:generic_driver', host_id=host) else: port = ports[0] network = self.neutron_api.get_network(self.service_network_id) subnets = set(network['subnets']) port_fixed_ips = [] for fixed_ip in port['fixed_ips']: port_fixed_ips.append({'subnet_id': fixed_ip['subnet_id'], 'ip_address': fixed_ip['ip_address']}) if fixed_ip['subnet_id'] in subnets: subnets.remove(fixed_ip['subnet_id']) # If there are subnets here that means that # we need to add those to the port and call update. if subnets: port_fixed_ips.extend([dict(subnet_id=s) for s in subnets]) port = self.neutron_api.update_port_fixed_ips( port['id'], {'fixed_ips': port_fixed_ips}) return port def _get_cidr_for_subnet(self, subnets): """Returns not used cidr for service subnet creating.""" used_cidrs = set(subnet['cidr'] for subnet in subnets) serv_cidr = netaddr.IPNetwork(self.configuration.service_network_cidr) for subnet in serv_cidr.subnet(29): cidr = str(subnet.cidr) if cidr not in used_cidrs: return cidr else: raise exception.ManilaException(_('No available cidrs')) def _allocate_container(self, context, share, snapshot=None): """Creates cinder volume, associated to share by name.""" volume_snapshot = None if snapshot: volume_snapshot = self._get_volume_snapshot(context, snapshot['id']) volume = self.volume_api.create(context, share['size'], self.configuration.volume_name_template % share['id'], '', snapshot=volume_snapshot) t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: if volume['status'] == 'available': break if volume['status'] == 'error': raise exception.ManilaException(_('Failed to create volume')) time.sleep(1) volume = self.volume_api.get(context, volume['id']) else: raise exception.ManilaException(_('Volume have not been created ' 'in %ss. Giving up') % self.configuration.max_time_to_create_volume) return volume def _deallocate_container(self, context, share): """Deletes cinder volume.""" volume = self._get_volume(context, share['id']) if volume: self.volume_api.delete(context, volume['id']) t = time.time() while time.time() - t < self.configuration.\ max_time_to_create_volume: try: volume = self.volume_api.get(context, volume['id']) except exception.VolumeNotFound: LOG.debug(_('Volume was deleted succesfully')) break time.sleep(1) else: raise exception.ManilaException(_('Volume have not been ' 'deleted in %ss. Giving up') % self.configuration.max_time_to_create_volume) def get_share_stats(self, refresh=False): """Get share status. If 'refresh' is True, run update the stats first.""" if refresh: self._update_share_status() return self._stats def _update_share_status(self): """Retrieve status info from share volume group.""" LOG.debug(_("Updating share status")) data = {} # Note(zhiteng): These information are driver/backend specific, # each driver may define these values in its own config options # or fetch from driver specific configuration file. data["share_backend_name"] = 'Cinder Volumes' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' data["storage_protocol"] = 'NFS_CIFS' data['total_capacity_gb'] = 'infinite' data['free_capacity_gb'] = 'infinite' data['reserved_percentage'] = \ self.configuration.reserved_share_percentage data['QoS_support'] = False self._stats = data def create_share_from_snapshot(self, context, share, snapshot): """Is called to create share from snapshot.""" server = self._get_service_instance(self.admin_context, share) volume = self._allocate_container(context, share, snapshot) volume = self._attach_volume(context, share, server, volume) self._mount_device(context, share, server, volume) location = self._get_helper(share).create_export(server, share['name']) return location def delete_share(self, context, share): """Deletes share.""" if not share['share_network_id']: return server = self._get_service_instance(self.admin_context, share, create=False) if server: self._get_helper(share).remove_export(server, share['name']) self._unmount_device(context, share, server) self._detach_volume(context, share, server) self._deallocate_container(context, share) def create_snapshot(self, context, snapshot): """Creates a snapshot.""" volume = self._get_volume(context, snapshot['share_id']) volume_snapshot_name = self.configuration.\ volume_snapshot_name_template % snapshot['id'] volume_snapshot = self.volume_api.create_snapshot_force(context, volume['id'], volume_snapshot_name, '') t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: if volume_snapshot['status'] == 'available': break if volume_snapshot['status'] == 'error': raise exception.ManilaException(_('Failed to create volume ' 'snapshot')) time.sleep(1) volume_snapshot = self.volume_api.get_snapshot(context, volume_snapshot['id']) else: raise exception.ManilaException(_('Volume snapshot have not been ' 'created in %ss. Giving up') % self.configuration.max_time_to_create_volume) def delete_snapshot(self, context, snapshot): """Deletes a snapshot.""" volume_snapshot = self._get_volume_snapshot(context, snapshot['id']) if volume_snapshot is None: return self.volume_api.delete_snapshot(context, volume_snapshot['id']) t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: try: snapshot = self.volume_api.get_snapshot(context, volume_snapshot['id']) except exception.VolumeSnapshotNotFound: LOG.debug(_('Volume snapshot was deleted succesfully')) break time.sleep(1) else: raise exception.ManilaException(_('Volume snapshot have not been ' 'deleted in %ss. Giving up') % self.configuration.max_time_to_create_volume) def ensure_share(self, context, share): """Ensure that storage are mounted and exported.""" server = self._get_service_instance(context, share) volume = self._get_volume(context, share['id']) volume = self._attach_volume(context, share, server, volume) self._mount_device(context, share, server, volume) self._get_helper(share).create_export(server, share['name']) def allow_access(self, context, share, access): """Allow access to the share.""" server = self._get_service_instance(self.admin_context, share, create=False) if not server: raise exception.ManilaException('Server not found. Try to ' 'restart manila share service') self._get_helper(share).allow_access(server, share['name'], access['access_type'], access['access_to']) def deny_access(self, context, share, access): """Deny access to the share.""" if not share['share_network_id']: return server = self._get_service_instance(self.admin_context, share, create=False) if server: self._get_helper(share).deny_access(server, share['name'], access['access_type'], access['access_to']) def _get_helper(self, share): if share['share_proto'].startswith('NFS'): return self._helpers['NFS'] elif share['share_proto'].startswith('CIFS'): return self._helpers['CIFS'] else: raise exception.InvalidShare(reason='Wrong share type') def get_network_allocations_number(self): return 0 def setup_network(self, network_info): pass class NASHelperBase(object): """Interface to work with share.""" def __init__(self, execute, config_object, locks): self.configuration = config_object self._execute = execute self.share_networks_locks = locks def init_helper(self, server): pass def create_export(self, server, share_name, recreate=False): """Create new export, delete old one if exists.""" raise NotImplementedError() def remove_export(self, server, share_name): """Remove export.""" raise NotImplementedError() def allow_access(self, server, share_name, access_type, access): """Allow access to the host.""" raise NotImplementedError() def deny_access(self, local_path, share_name, access_type, access, force=False): """Deny access to the host.""" raise NotImplementedError() class NFSHelper(NASHelperBase): """Interface to work with share.""" def create_export(self, server, share_name, recreate=False): """Create new export, delete old one if exists.""" return ':'.join([server['ip'], os.path.join(self.configuration.share_mount_path, share_name)]) def init_helper(self, server): try: _ssh_exec(server, ['sudo', 'exportfs']) except exception.ProcessExecutionError as e: if 'command not found' in e.stderr: raise exception.ManilaException( _('NFS server is not installed on %s') % server['id']) LOG.error(e.stderr) def remove_export(self, server, share_name): """Remove export.""" pass def allow_access(self, server, share_name, access_type, access): """Allow access to the host""" local_path = os.path.join(self.configuration.share_mount_path, share_name) if access_type != 'ip': reason = 'only ip access type allowed' raise exception.InvalidShareAccess(reason) #check if presents in export out, _ = _ssh_exec(server, ['sudo', 'exportfs']) out = re.search(re.escape(local_path) + '[\s\n]*' + re.escape(access), out) if out is not None: raise exception.ShareAccessExists(access_type=access_type, access=access) _ssh_exec(server, ['sudo', 'exportfs', '-o', 'rw,no_subtree_check', ':'.join([access, local_path])]) def deny_access(self, server, share_name, access_type, access, force=False): """Deny access to the host.""" local_path = os.path.join(self.configuration.share_mount_path, share_name) _ssh_exec(server, ['sudo', 'exportfs', '-u', ':'.join([access, local_path])]) class CIFSHelper(NASHelperBase): """Class provides functionality to operate with cifs shares""" def __init__(self, *args): """Store executor and configuration path.""" super(CIFSHelper, self).__init__(*args) self.config_path = self.configuration.service_instance_smb_config_path self.smb_template_config = self.configuration.smb_template_config_path self.test_config = "%s_" % (self.smb_template_config,) self.local_configs = {} def _create_local_config(self, share_network_id): path, ext = os.path.splitext(self.smb_template_config) local_config = '%s-%s%s' % (path, share_network_id, ext) self.local_configs[share_network_id] = local_config shutil.copy(self.smb_template_config, local_config) return local_config def _get_local_config(self, share_network_id): local_config = self.local_configs.get(share_network_id, None) if local_config is None: local_config = self._create_local_config(share_network_id) return local_config def init_helper(self, server): self._recreate_template_config() local_config = self._create_local_config(server['share_network_id']) config_dir = os.path.dirname(self.config_path) try: _ssh_exec(server, ['sudo', 'mkdir', config_dir]) except exception.ProcessExecutionError as e: if 'File exists' not in e.stderr: raise LOG.debug(_('Directory %s already exists') % config_dir) _ssh_exec(server, ['sudo', 'chown', self.configuration.service_instance_user, config_dir]) _ssh_exec(server, ['touch', self.config_path]) try: _ssh_exec(server, ['sudo', 'stop', 'smbd']) except exception.ProcessExecutionError as e: if 'Unknown instance' not in e.stderr: raise LOG.debug(_('Samba service is not running')) self._write_remote_config(local_config, server) _ssh_exec(server, ['sudo', 'smbd', '-s', self.config_path]) self._restart_service(server) def create_export(self, server, share_name, recreate=False): """Create new export, delete old one if exists.""" local_path = os.path.join(self.configuration.share_mount_path, share_name) config = self._get_local_config(server['share_network_id']) parser = ConfigParser.ConfigParser() parser.read(config) #delete old one if parser.has_section(share_name): if recreate: parser.remove_section(share_name) else: raise exception.Error('Section exists') #Create new one parser.add_section(share_name) parser.set(share_name, 'path', local_path) parser.set(share_name, 'browseable', 'yes') parser.set(share_name, 'guest ok', 'yes') parser.set(share_name, 'read only', 'no') parser.set(share_name, 'writable', 'yes') parser.set(share_name, 'create mask', '0755') parser.set(share_name, 'hosts deny', '0.0.0.0/0') # denying all ips parser.set(share_name, 'hosts allow', '127.0.0.1') self._update_config(parser, config) self._write_remote_config(config, server) self._restart_service(server) return '//%s/%s' % (server['ip'], share_name) def remove_export(self, server, share_name): """Remove export.""" config = self._get_local_config(server['share_network_id']) parser = ConfigParser.ConfigParser() parser.read(config) #delete old one if parser.has_section(share_name): parser.remove_section(share_name) self._update_config(parser, config) self._write_remote_config(config, server) _ssh_exec(server, ['sudo', 'smbcontrol', 'all', 'close-share', share_name]) @synchronized def _write_remote_config(self, config, server): with open(config, 'r') as f: cfg = "'" + f.read() + "'" _ssh_exec(server, ['echo %s > %s' % (cfg, self.config_path)]) def allow_access(self, server, share_name, access_type, access): """Allow access to the host.""" if access_type != 'ip': reason = 'only ip access type allowed' raise exception.InvalidShareAccess(reason) config = self._get_local_config(server['share_network_id']) parser = ConfigParser.ConfigParser() parser.read(config) hosts = parser.get(share_name, 'hosts allow') if access in hosts.split(): raise exception.ShareAccessExists(access_type=access_type, access=access) hosts += ' %s' % (access,) parser.set(share_name, 'hosts allow', hosts) self._update_config(parser, config) self._write_remote_config(config, server) self._restart_service(server) def deny_access(self, server, share_name, access_type, access, force=False): """Deny access to the host.""" config = self._get_local_config(server['share_network_id']) parser = ConfigParser.ConfigParser() try: parser.read(config) hosts = parser.get(share_name, 'hosts allow') hosts = hosts.replace(' %s' % (access,), '', 1) parser.set(share_name, 'hosts allow', hosts) self._update_config(parser, config) except ConfigParser.NoSectionError: if not force: raise self._write_remote_config(config, server) self._restart_service(server) def _recreate_template_config(self): """Create new SAMBA configuration file.""" if os.path.exists(self.smb_template_config): os.unlink(self.smb_template_config) parser = ConfigParser.ConfigParser() parser.add_section('global') parser.set('global', 'security', 'user') parser.set('global', 'server string', '%h server (Samba, Openstack)') self._update_config(parser, self.smb_template_config) def _restart_service(self, server): _ssh_exec(server, 'sudo pkill -HUP smbd'.split()) def _update_config(self, parser, config): """Check if new configuration is correct and save it.""" #Check that configuration is correct with open(self.test_config, 'w') as fp: parser.write(fp) self._execute('testparm', '-s', self.test_config, check_exit_code=True) #save it with open(config, 'w') as fp: parser.write(fp) manila-2013.2.dev175.gbf1a399/manila/share/drivers/__init__.py0000664000175000017500000000151012301410454023637 0ustar chuckchuck00000000000000# Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`manila.share.driver` -- Manila Share Drivers ===================================================== .. automodule:: manila.share.driver :platform: Unix :synopsis: Module containing all the Manila Share drivers. """ manila-2013.2.dev175.gbf1a399/manila/share/drivers/glusterfs.py0000664000175000017500000002526512301410454024133 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GlusterFS Driver for shares. """ import errno import os import pdb from pipes import quote as shellquote import re import xml.etree.cElementTree as etree from manila import exception from manila.openstack.common import log as logging from manila.share import driver from manila import utils from oslo.config import cfg LOG = logging.getLogger(__name__) GlusterfsManilaShare_opts = [ cfg.StrOpt('glusterfs_volumes_config', default='/etc/manila/glusterfs_volumes', help='File with the list of Gluster volumes that can' 'be used to create shares'), cfg.StrOpt('glusterfs_mount_point_base', default='$state_path/mnt', help='Base dir containing mount points for Gluster volumes.'), ] CONF = cfg.CONF CONF.register_opts(GlusterfsManilaShare_opts) _nfs_export_dir = 'nfs.export-dir' class GlusterAddress(object): scheme = re.compile('\A(?:(?P[^:@/]+)@)?' '(?P[^:@/]+):' '/(?P.+)') def __init__(self, address): m = self.scheme.search(address) if not m: raise exception.GlusterfsException('invalid gluster address ' + address) self.remote_user = m.group('user') self.host = m.group('host') self.volume = m.group('vol') self.qualified = address self.export = ':/'.join([self.host, self.volume]) def make_gluster_args(self, *args): args = ('gluster',) + args kw = {} if self.remote_user: args = ('ssh', '@'.join([self.remote_user, self.host]), ' '.join(shellquote(a) for a in args)) else: kw['run_as_root'] = True return args, kw class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver): """ Glusterfs Specific driver """ def __init__(self, db, *args, **kwargs): super(GlusterfsShareDriver, self).__init__(*args, **kwargs) self.db = db self._helpers = None self.gluster_address = None self.configuration.append_config_values(GlusterfsManilaShare_opts) def do_setup(self, context): """Native mount the Gluster volume.""" super(GlusterfsShareDriver, self).do_setup(context) self.gluster_address = GlusterAddress( self._read_gluster_vol_from_config() ) try: self._execute('mount.glusterfs', check_exit_code=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.GlusterfsException( _('mount.glusterfs is not installed')) else: raise self._ensure_gluster_vol_mounted() def check_for_setup_error(self): """Is called after do_setup method. Nothing to do.""" pass def _get_mount_point_for_gluster_vol(self): """Return mount point for gluster volume.""" return os.path.join(self.configuration.glusterfs_mount_point_base, self.gluster_address.volume) def _do_mount(self, cmd, ensure): """Finalize mount command. :param cmd: command to do the actual mount :param ensure: boolean to allow remounting a volume with a warning :param glusterfs_export: gluster volume that is mounted """ try: self._execute(*cmd, run_as_root=True) except exception.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.stderr: LOG.warn(_("%s is already mounted"), self.gluster_address.export) else: raise exception.GlusterfsException( 'Unable to mount Gluster volume' ) def _mount_gluster_vol(self, mount_path, ensure=False): """Mount Gluster volume at the specified mount path.""" self._execute('mkdir', '-p', mount_path) command = ['mount', '-t', 'glusterfs', self.gluster_address.export, mount_path] self._do_mount(command, ensure) def _read_gluster_vol_from_config(self): config_file = self.configuration.glusterfs_volumes_config if not os.access(config_file, os.R_OK): msg = (_("Gluster config file at %(config)s doesn't exist") % {'config': config}) LOG.error(msg) raise exception.GlusterfsException(msg) with open(config_file) as f: return f.readline().strip() def _get_export_dir_list(self): try: args, kw = self.gluster_address.make_gluster_args( '--xml', 'volume', 'info', self.gluster_address.volume ) out, err = self._execute(*args, **kw) except exception.ProcessExecutionError as exc: LOG.error(_("Error retrieving volume info: %s") % exc.stderr) raise if not out: raise exception.GlusterfsException( 'Empty answer from gluster command' ) vix = etree.fromstring(out) if int(vix.find('./volInfo/volumes/count').text) != 1: raise exception.InvalidShare('Volume name ambiguity') export_dir = None for o, v in \ ((e.find(a).text for a in ('name', 'value')) for e in vix.findall(".//option")): if o == _nfs_export_dir: export_dir = v break if export_dir: return export_dir.split(',') else: return [] def _ensure_gluster_vol_mounted(self): """Ensure that a Gluster volume is native-mounted on Manila host. """ mount_path = self._get_mount_point_for_gluster_vol() try: self._mount_gluster_vol(mount_path, ensure=True) except exception.GlusterfsException: LOG.error('Could not mount the Gluster volume %s', self.gluster_address.volume) raise def _get_local_share_path(self, share): """Determine the locally mounted path of the share (in Manila host). """ local_vol_path = self._get_mount_point_for_gluster_vol() if not os.access(local_vol_path, os.R_OK): raise exception.GlusterfsException('share path %s does not exist' % local_vol_path) return os.path.join(local_vol_path, share['name']) def create_share(self, ctx, share): """Create a directory that'd serve as a share in a Gluster volume.""" local_share_path = self._get_local_share_path(share) cmd = ['mkdir', local_share_path] try: self._execute(*cmd, run_as_root=True) except exception.ProcessExecutionError: LOG.error('Unable to create share %s', share['name']) raise export_location = os.path.join(self.gluster_address.qualified, share['name']) return export_location def delete_share(self, context, share): """Remove a directory that served as a share in a Gluster volume.""" local_share_path = self._get_local_share_path(share) cmd = ['rm', '-rf', local_share_path] try: self._execute(*cmd, run_as_root=True) except exception.ProcessExecutionError: LOG.error('Unable to delete share %s', share['name']) raise def create_snapshot(self, context, snapshot): """TBD: Is called to create snapshot.""" raise NotImplementedError() def create_share_from_snapshot(self, context, share, snapshot): """Is called to create share from snapshot.""" raise NotImplementedError() def delete_snapshot(self, context, snapshot): """TBD: Is called to remove snapshot.""" raise NotImplementedError() def ensure_share(self, context, share): """Might not be needed?""" pass def _manage_access(self, context, share, access, cbk): """Manage share access by adjusting the export list with cbk cbk is a callable of args (dl, acc), where dl is a list of strings and acc is a string. It should return True (or a value of Boolean reduct True) if it leaves dl intact, and False (or a value of Boolean reduct False) if it makes a change on dl cbk will be called with dl being list of currently exported dirs and acc being a textual specification derived from access. """ if access['access_type'] != 'ip': raise exception.InvalidShareAccess('only ip access type allowed') export_dir_list = self._get_export_dir_list() access_spec = "/%s(%s)" % (share['name'], access['access_to']) if cbk(export_dir_list, access_spec): return export_dir_new = ",".join(export_dir_list) try: args, kw = self.gluster_address.make_gluster_args( 'volume', 'set', self.gluster_address.volume, _nfs_export_dir, export_dir_new) self._execute(*args, **kw) except exception.ProcessExecutionError as exc: LOG.error(_("Error in gluster volume set: %s") % exc.stderr) raise def allow_access(self, context, share, access): """NFS export a dir to a volume""" self._manage_access(context, share, access, lambda dl, acc: True if acc in dl else dl.append(acc)) def deny_access(self, context, share, access): """Deny access to the share.""" self._manage_access(context, share, access, lambda dl, acc: True if acc not in dl else dl.remove(acc)) def get_network_allocations_number(self): """GlusterFS driver does not need to create VIFS""" return 0 def setup_network(self, network_info): """Nothing to set up""" pass manila-2013.2.dev175.gbf1a399/manila/share/drivers/lvm.py0000664000175000017500000006071112301410454022706 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ LVM Driver for shares. """ import ConfigParser import math import os import re from manila import exception from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.share import driver from manila import utils from oslo.config import cfg LOG = logging.getLogger(__name__) share_opts = [ cfg.StrOpt('share_export_root', default='$state_path/mnt', help='Base folder where exported shares are located'), cfg.StrOpt('share_export_ip', default=None, help='IP to be added to export string'), cfg.StrOpt('smb_config_path', default='$state_path/smb.conf', help="Path to smb config"), cfg.IntOpt('share_lvm_mirrors', default=0, help='If set, create lvms with multiple mirrors. Note that ' 'this requires lvm_mirrors + 2 pvs with available space'), cfg.StrOpt('share_volume_group', default='stack-shares', help='Name for the VG that will contain exported shares'), cfg.ListOpt('share_lvm_helpers', default=[ 'CIFS=manila.share.drivers.lvm.CIFSNetConfHelper', 'NFS=manila.share.drivers.lvm.NFSHelper', ], help='Specify list of share export helpers.'), ] CONF = cfg.CONF CONF.register_opts(share_opts) class LVMShareDriver(driver.ExecuteMixin, driver.ShareDriver): """Executes commands relating to Shares.""" def __init__(self, db, *args, **kwargs): """Do initialization.""" super(LVMShareDriver, self).__init__(*args, **kwargs) self.db = db self.configuration.append_config_values(share_opts) self._helpers = None def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" out, err = self._execute('vgs', '--noheadings', '-o', 'name', run_as_root=True) volume_groups = out.split() if self.configuration.share_volume_group not in volume_groups: msg = (_("share volume group %s doesn't exist") % self.configuration.share_volume_group) raise exception.InvalidParameterValue(err=msg) if not self.configuration.share_export_ip: msg = (_("share_export_ip doesn't specified")) raise exception.InvalidParameterValue(err=msg) def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(LVMShareDriver, self).do_setup(context) self._setup_helpers() for helper in self._helpers.values(): helper.init() def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" self._helpers = {} for helper_str in self.configuration.share_lvm_helpers: share_proto, _, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) #TODO(rushiagr): better way to handle configuration # instead of just passing to the helper self._helpers[share_proto.upper()] = helper(self._execute, self.configuration) def _local_path(self, share): # NOTE(vish): stops deprecation warning escaped_group = \ self.configuration.share_volume_group.replace('-', '--') escaped_name = share['name'].replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def _allocate_container(self, share): sizestr = '%sG' % share['size'] cmd = ['lvcreate', '-L', sizestr, '-n', share['name'], self.configuration.share_volume_group] if self.configuration.share_lvm_mirrors: cmd += ['-m', self.configuration.share_lvm_mirrors, '--nosync'] terras = int(sizestr[:-1]) / 1024.0 if terras >= 1.5: rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) # NOTE(vish): Next power of two for region size. See: # http://red.ht/U2BPOD cmd += ['-R', str(rsize)] self._try_execute(*cmd, run_as_root=True) device_name = self._local_path(share) self._execute('mkfs.ext4', device_name, run_as_root=True) def _deallocate_container(self, share_name): """Deletes a logical volume for share.""" # zero out old volumes to prevent data leaking between users # TODO(ja): reclaiming space should be done lazy and low priority try: self._try_execute('lvremove', '-f', "%s/%s" % (self.configuration.share_volume_group, share_name), run_as_root=True) except exception.ProcessExecutionError as exc: if "not found" not in exc.stderr: LOG.error(_("Error deleting volume: %s") % exc.stderr) raise LOG.error(_("Volume not found: %s") % exc.stderr) def get_share_stats(self, refresh=False): """Get share status. If 'refresh' is True, run update the stats first.""" if refresh: self._update_share_status() return self._stats def _update_share_status(self): """Retrieve status info from share volume group.""" LOG.debug(_("Updating share status")) data = {} # Note(zhiteng): These information are driver/backend specific, # each driver may define these values in its own config options # or fetch from driver specific configuration file. data["share_backend_name"] = 'LVM' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' #TODO(rushiagr): Pick storage_protocol from the helper used. data["storage_protocol"] = 'NFS_CIFS' data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 data['reserved_percentage'] = \ self.configuration.reserved_share_percentage data['QoS_support'] = False try: out, err = self._execute('vgs', '--noheadings', '--nosuffix', '--unit=G', '-o', 'name,size,free', self.configuration.share_volume_group, run_as_root=True) except exception.ProcessExecutionError as exc: LOG.error(_("Error retrieving volume status: %s") % exc.stderr) out = False if out: share = out.split() data['total_capacity_gb'] = float(share[1]) data['free_capacity_gb'] = float(share[2]) self._stats = data def create_share(self, context, share): self._allocate_container(share) #create file system device_name = self._local_path(share) mount_path = self._get_mount_path(share) location = self._get_helper(share).create_export(mount_path, share['name']) self._mount_device(share, device_name) #TODO(rushiagr): what is the provider_location? realy needed? return location def create_share_from_snapshot(self, context, share, snapshot): """Is called to create share from snapshot.""" self._allocate_container(share) device_name = self._local_path(snapshot) self._copy_volume(device_name, self._local_path(share), snapshot['share_size']) mount_path = self._get_mount_path(share) location = self._get_helper(share).create_export(mount_path, share['name']) self._mount_device(share, device_name) #TODO(rushiagr): what is the provider_location? realy needed? return location def delete_share(self, context, share): self._remove_export(context, share) self._delete_share(context, share) self._deallocate_container(share['name']) def _remove_export(self, ctx, share): """Removes an access rules for a share.""" mount_path = self._get_mount_path(share) if os.path.exists(mount_path): #umount, may be busy try: self._execute('umount', '-f', mount_path, run_as_root=True) except exception.ProcessExecutionError, exc: if 'device is busy' in str(exc): raise exception.ShareIsBusy(share_name=share['name']) else: LOG.info('Unable to umount: %s', exc) #remove dir try: os.rmdir(mount_path) except OSError: LOG.info('Unable to delete %s', mount_path) def create_snapshot(self, context, snapshot): """Creates a snapshot.""" orig_lv_name = "%s/%s" % (self.configuration.share_volume_group, snapshot['share_name']) self._try_execute('lvcreate', '-L', '%sG' % snapshot['share_size'], '--name', snapshot['name'], '--snapshot', orig_lv_name, run_as_root=True) def ensure_share(self, ctx, share): """Ensure that storage are mounted and exported.""" device_name = self._local_path(share) location = self._mount_device(share, device_name) self._get_helper(share).create_export(location, share['name'], recreate=True) def _delete_share(self, ctx, share): """Delete a share.""" try: location = self._get_mount_path(share) self._get_helper(share).remove_export(location, share['name']) except exception.ProcessExecutionError: LOG.info("Can't remove share %r" % share['id']) except exception.InvalidShare, exc: LOG.info(exc.message) def delete_snapshot(self, context, snapshot): """Deletes a snapshot.""" self._deallocate_container(snapshot['name']) def allow_access(self, ctx, share, access): """Allow access to the share.""" location = self._get_mount_path(share) self._get_helper(share).allow_access(location, share['name'], access['access_type'], access['access_to']) def deny_access(self, ctx, share, access): """Allow access to the share.""" location = self._get_mount_path(share) self._get_helper(share).deny_access(location, share['name'], access['access_type'], access['access_to']) def _get_helper(self, share): if share['share_proto'].startswith('NFS'): return self._helpers['NFS'] elif share['share_proto'].startswith('CIFS'): return self._helpers['CIFS'] else: raise exception.InvalidShare(reason='Wrong share type') def _mount_device(self, share, device_name): """Mount LVM share and ignore if already mounted.""" mount_path = self._get_mount_path(share) self._execute('mkdir', '-p', mount_path) try: self._execute('mount', device_name, mount_path, run_as_root=True, check_exit_code=True) self._execute('chmod', '777', mount_path, run_as_root=True, check_exit_code=True) except exception.ProcessExecutionError as exc: if 'already mounted' in exc.stderr: LOG.warn(_("%s is already mounted"), device_name) else: raise return mount_path def _get_mount_path(self, share): """Returns path where share is mounted.""" return os.path.join(self.configuration.share_export_root, share['name']) def _copy_volume(self, srcstr, deststr, size_in_g): # Use O_DIRECT to avoid thrashing the system buffer cache extra_flags = ['iflag=direct', 'oflag=direct'] # Check whether O_DIRECT is supported try: self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr, *extra_flags, run_as_root=True) except exception.ProcessExecutionError: extra_flags = [] # Perform the copy self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, 'count=%d' % (size_in_g * 1024), 'bs=1M', *extra_flags, run_as_root=True) def get_network_allocations_number(self): """LVM driver does not need to create VIFS""" return 0 def setup_network(self, network_info): """Nothing to set up""" pass class NASHelperBase(object): """Interface to work with share.""" def __init__(self, execute, config_object): self.configuration = config_object self._execute = execute def init(self): pass def create_export(self, local_path, share_name, recreate=False): """Create new export, delete old one if exists.""" raise NotImplementedError() def remove_export(self, local_path, share_name): """Remove export.""" raise NotImplementedError() def allow_access(self, local_path, share_name, access_type, access): """Allow access to the host.""" raise NotImplementedError() def deny_access(self, local_path, share_name, access_type, access, force=False): """Deny access to the host.""" raise NotImplementedError() class NFSHelper(NASHelperBase): """Interface to work with share.""" def __init__(self, execute, config_object): super(NFSHelper, self).__init__(execute, config_object) try: self._execute('exportfs', check_exit_code=True, run_as_root=True) except exception.ProcessExecutionError: raise exception.Error('NFS server not found') def create_export(self, local_path, share_name, recreate=False): """Create new export, delete old one if exists.""" return ':'.join([self.configuration.share_export_ip, local_path]) def remove_export(self, local_path, share_name): """Remove export.""" pass def allow_access(self, local_path, share_name, access_type, access): """Allow access to the host""" if access_type != 'ip': reason = 'only ip access type allowed' raise exception.InvalidShareAccess(reason) #check if presents in export out, _ = self._execute('exportfs', run_as_root=True) out = re.search(re.escape(local_path) + '[\s\n]*' + re.escape(access), out) if out is not None: raise exception.ShareAccessExists(access_type=access_type, access=access) self._execute('exportfs', '-o', 'rw,no_subtree_check', ':'.join([access, local_path]), run_as_root=True, check_exit_code=True) def deny_access(self, local_path, share_name, access_type, access, force=False): """Deny access to the host.""" self._execute('exportfs', '-u', ':'.join([access, local_path]), run_as_root=True, check_exit_code=False) class CIFSHelper(NASHelperBase): """Class provides functionality to operate with cifs shares""" def __init__(self, execute, config_object): """Store executor and configuration path.""" super(CIFSHelper, self).__init__(execute, config_object) self.config = self.configuration.smb_config_path self.test_config = "%s_" % (self.config,) def init(self): """Initialize environment.""" self._recreate_config() self._ensure_daemon_started() def create_export(self, local_path, share_name, recreate=False): """Create new export, delete old one if exists.""" parser = ConfigParser.ConfigParser() parser.read(self.config) #delete old one if parser.has_section(share_name): if recreate: parser.remove_section(share_name) else: raise exception.Error('Section exists') #Create new one parser.add_section(share_name) parser.set(share_name, 'path', local_path) parser.set(share_name, 'browseable', 'yes') parser.set(share_name, 'guest ok', 'yes') parser.set(share_name, 'read only', 'no') parser.set(share_name, 'writable', 'yes') parser.set(share_name, 'create mask', '0755') parser.set(share_name, 'hosts deny', '0.0.0.0/0') # denying all ips parser.set(share_name, 'hosts allow', '127.0.0.1') #NOTE(rushiagr): ensure that local_path dir is existing if not os.path.exists(local_path): os.makedirs(local_path) self._execute('chown', 'nobody', '-R', local_path, run_as_root=True) self._update_config(parser) return '//%s/%s' % (self.configuration.share_export_ip, share_name) def remove_export(self, local_path, share_name): """Remove export.""" parser = ConfigParser.ConfigParser() parser.read(self.config) #delete old one if parser.has_section(share_name): parser.remove_section(share_name) self._update_config(parser) self._execute('smbcontrol', 'all', 'close-share', share_name, run_as_root=True) def allow_access(self, local_path, share_name, access_type, access): """Allow access to the host.""" if access_type != 'ip': reason = 'only ip access type allowed' raise exception.InvalidShareAccess(reason) parser = ConfigParser.ConfigParser() parser.read(self.config) hosts = parser.get(share_name, 'hosts allow') if access in hosts.split(): raise exception.ShareAccessExists(access_type=access_type, access=access) hosts += ' %s' % (access,) parser.set(share_name, 'hosts allow', hosts) self._update_config(parser) def deny_access(self, local_path, share_name, access_type, access, force=False): """Deny access to the host.""" parser = ConfigParser.ConfigParser() try: parser.read(self.config) hosts = parser.get(share_name, 'hosts allow') hosts = hosts.replace(' %s' % (access,), '', 1) parser.set(share_name, 'hosts allow', hosts) self._update_config(parser) except ConfigParser.NoSectionError: if not force: raise def _ensure_daemon_started(self): """ FYI: smbd starts at least two processes. """ out, _ = self._execute(*'ps -C smbd -o args='.split(), check_exit_code=False) processes = [process.strip() for process in out.split('\n') if process.strip()] cmd = 'smbd -s %s -D' % (self.config,) running = False for process in processes: if not process.endswith(cmd): #alternatively exit raise exception.Error('smbd already started with wrong config') running = True if not running: self._execute(*cmd.split(), run_as_root=True) def _recreate_config(self): """create new SAMBA configuration file.""" if os.path.exists(self.config): os.unlink(self.config) parser = ConfigParser.ConfigParser() parser.add_section('global') parser.set('global', 'security', 'user') parser.set('global', 'server string', '%h server (Samba, Openstack)') self._update_config(parser, restart=False) def _update_config(self, parser, restart=True): """Check if new configuration is correct and save it.""" #Check that configuration is correct with open(self.test_config, 'w') as fp: parser.write(fp) self._execute('testparm', '-s', self.test_config, check_exit_code=True) #save it with open(self.config, 'w') as fp: parser.write(fp) #restart daemon if necessary if restart: self._execute(*'pkill -HUP smbd'.split(), run_as_root=True) class CIFSNetConfHelper(NASHelperBase): """Manage shares in samba server by net conf tool. Class provides functionality to operate with CIFS shares. Samba server should be configured to use registry as configuration backend to allow dynamically share managements. There are two ways to done that, one of them is to add specific parameter in the global configuration section at smb.conf: [global] include = registry For more inforation see smb.conf(5). """ def create_export(self, local_path, share_name, recreate=False): """Create share at samba server.""" create_cmd = ('net', 'conf', 'addshare', share_name, local_path, 'writeable=y', 'guest_ok=y') try: self._execute(*create_cmd, run_as_root=True) except exception.ProcessExecutionError as e: if 'already exists' in e.stderr: if recreate: self._execute('net', 'conf', 'delshare', share_name, run_as_root=True) self._execute(*create_cmd, run_as_root=True) else: msg = _('Share section %r already defined.') % (share_name) raise exception.ShareBackendException(msg=msg) else: raise parameters = { 'browseable': 'yes', 'create mask': '0755', 'hosts deny': '0.0.0.0/0', # deny all 'hosts allow': '127.0.0.1', } for name, value in parameters.items(): self._execute('net', 'conf', 'setparm', share_name, name, value, run_as_root=True) return '//%s/%s' % (self.configuration.share_export_ip, share_name) def remove_export(self, local_path, share_name): """Remove share definition from samba server.""" try: self._execute('net', 'conf', 'delshare', share_name, run_as_root=True) except exception.ProcessExecutionError as e: if 'SBC_ERR_NO_SUCH_SERVICE' not in e.stderr: raise self._execute('smbcontrol', 'all', 'close-share', share_name, run_as_root=True) def allow_access(self, local_path, share_name, access_type, access): """Add to allow hosts additional access rule.""" if access_type != 'ip': reason = _('only ip access type allowed') raise exception.InvalidShareAccess(reason=reason) hosts = self._get_allow_hosts(share_name) if access in hosts: raise exception.ShareAccessExists(access_type=access_type, access=access) hosts.append(access) self._set_allow_hosts(hosts, share_name) def deny_access(self, local_path, share_name, access_type, access, force=False): """Remove from allow hosts permit rule.""" try: hosts = self._get_allow_hosts(share_name) hosts.remove(access) self._set_allow_hosts(hosts, share_name) except exception.ProcessExecutionError as e: if not ('does not exist' in e.stdout and force): raise def _get_allow_hosts(self, share_name): (out, _) = self._execute('net', 'conf', 'getparm', share_name, 'hosts allow', run_as_root=True) return out.split() def _set_allow_hosts(self, hosts, share_name): value = ' '.join(hosts) self._execute('net', 'conf', 'setparm', share_name, 'hosts allow', value, run_as_root=True) manila-2013.2.dev175.gbf1a399/manila/share/__init__.py0000664000175000017500000000207412301410454022167 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from manila.share import ' elsewhere. from manila.common import config import manila.openstack.common.importutils as import_utils CONF = config.CONF API = import_utils.import_class(CONF.share_api_class) manila-2013.2.dev175.gbf1a399/manila/share/api.py0000664000175000017500000004403512301410454021204 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to shares. """ from manila.db import base from manila import exception from manila.openstack.common import excutils from manila.openstack.common import log as logging from manila.openstack.common import timeutils from manila import policy from manila import quota from manila.scheduler import rpcapi as scheduler_rpcapi from manila.share import rpcapi as share_rpcapi from oslo.config import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) GB = 1048576 * 1024 QUOTAS = quota.QUOTAS class API(base.Base): """API for interacting with the share manager.""" def __init__(self, db_driver=None): self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.share_rpcapi = share_rpcapi.ShareAPI() super(API, self).__init__(db_driver) def create(self, context, share_proto, size, name, description, snapshot=None, availability_zone=None, metadata=None, share_network_id=None): """Create new share.""" policy.check_policy(context, 'share', 'create') self._check_metadata_properties(context, metadata) if snapshot is not None: if snapshot['status'] != 'available': msg = _('status must be available') raise exception.InvalidShareSnapshot(reason=msg) if not size: size = snapshot['size'] snapshot_id = snapshot['id'] else: snapshot_id = None def as_int(s): try: return int(s) except (ValueError, TypeError): return s # tolerate size as stringified int size = as_int(size) if not isinstance(size, int) or size <= 0: msg = (_("Share size '%s' must be an integer and greater than 0") % size) raise exception.InvalidInput(reason=msg) if snapshot and size < snapshot['size']: msg = (_("Share size '%s' must be equal or greater " "than snapshot size") % size) raise exception.InvalidInput(reason=msg) #TODO(rushiagr): Find a suitable place to keep all the allowed # share types so that it becomes easier to add one if share_proto.lower() not in ['nfs', 'cifs']: msg = (_("Invalid share type provided: %s") % share_proto) raise exception.InvalidInput(reason=msg) try: reservations = QUOTAS.reserve(context, shares=1, gigabytes=size) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'gigabytes' in overs: msg = _("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG share (%(d_consumed)dG of %(d_quota)dG " "already consumed)") LOG.warn(msg % {'s_pid': context.project_id, 's_size': size, 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['gigabytes']}) raise exception.ShareSizeExceedsAvailableQuota() elif 'shares' in overs: msg = _("Quota exceeded for %(s_pid)s, tried to create " "share (%(d_consumed)d shares " "already consumed)") LOG.warn(msg % {'s_pid': context.project_id, 'd_consumed': _consumed('shares')}) raise exception.ShareLimitExceeded(allowed=quotas['shares']) if availability_zone is None: availability_zone = CONF.storage_availability_zone options = {'size': size, 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': snapshot_id, 'share_network_id': share_network_id, 'availability_zone': availability_zone, 'metadata': metadata, 'status': "creating", 'scheduled_at': timeutils.utcnow(), 'display_name': name, 'display_description': description, 'share_proto': share_proto, } try: share = self.db.share_create(context, options) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: self.db.share_delete(context, share['id']) finally: QUOTAS.rollback(context, reservations) request_spec = {'share_properties': options, 'share_proto': share_proto, 'share_id': share['id'], 'snapshot_id': share['snapshot_id'], } filter_properties = {} self.scheduler_rpcapi.create_share( context, CONF.share_topic, share['id'], snapshot_id, request_spec=request_spec, filter_properties=filter_properties) return share @policy.wrap_check_policy('share') def delete(self, context, share): """Delete share.""" if context.is_admin and context.project_id != share['project_id']: project_id = share['project_id'] else: project_id = context.project_id share_id = share['id'] if not share['host']: try: reservations = QUOTAS.reserve(context, project_id=project_id, shares=-1, gigabytes=-share['size']) except Exception: reservations = None LOG.exception(_("Failed to update quota for deleting share")) self.db.share_delete(context.elevated(), share_id) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) return if share['status'] not in ["available", "error"]: msg = _("Share status must be available or error") raise exception.InvalidShare(reason=msg) snapshots = self.db.share_snapshot_get_all_for_share(context, share_id) if len(snapshots): msg = _("Share still has %d dependent snapshots") % len(snapshots) raise exception.InvalidShare(reason=msg) now = timeutils.utcnow() share = self.db.share_update(context, share_id, {'status': 'deleting', 'terminated_at': now}) self.share_rpcapi.delete_share(context, share) def create_snapshot(self, context, share, name, description, force=False): policy.check_policy(context, 'share', 'create_snapshot', share) if ((not force) and (share['status'] != "available")): msg = _("must be available") raise exception.InvalidShare(reason=msg) size = share['size'] try: reservations = QUOTAS.reserve(context, snapshots=1, gigabytes=size) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'gigabytes' in overs: msg = _("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG snapshot (%(d_consumed)dG of " "%(d_quota)dG already consumed)") LOG.warn(msg % {'s_pid': context.project_id, 's_size': size, 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['gigabytes']}) raise exception.ShareSizeExceedsAvailableQuota() elif 'snapshots' in overs: msg = _("Quota exceeded for %(s_pid)s, tried to create " "snapshot (%(d_consumed)d snapshots " "already consumed)") LOG.warn(msg % {'s_pid': context.project_id, 'd_consumed': _consumed('snapshots')}) raise exception.SnapshotLimitExceeded( allowed=quotas['snapshots']) options = {'share_id': share['id'], 'size': share['size'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'progress': '0%', 'share_size': share['size'], 'display_name': name, 'display_description': description, 'share_proto': share['share_proto'], 'export_location': share['export_location']} try: snapshot = self.db.share_snapshot_create(context, options) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: self.db.snapshot_delete(context, share['id']) finally: QUOTAS.rollback(context, reservations) self.share_rpcapi.create_snapshot(context, share, snapshot) return snapshot @policy.wrap_check_policy('share') def delete_snapshot(self, context, snapshot, force=False): if not force and snapshot['status'] not in ["available", "error"]: msg = _("Share Snapshot status must be available or ") raise exception.InvalidShareSnapshot(reason=msg) self.db.share_snapshot_update(context, snapshot['id'], {'status': 'deleting'}) share = self.db.share_get(context, snapshot['share_id']) self.share_rpcapi.delete_snapshot(context, snapshot, share['host']) @policy.wrap_check_policy('share') def update(self, context, share, fields): return self.db.share_update(context, share['id'], fields) @policy.wrap_check_policy('share') def snapshot_update(self, context, snapshot, fields): return self.db.share_snapshot_update(context, snapshot['id'], fields) def get(self, context, share_id): rv = self.db.share_get(context, share_id) policy.check_policy(context, 'share', 'get', rv) return rv def get_all(self, context, search_opts={}): policy.check_policy(context, 'share', 'get_all') search_opts = search_opts or {} if (context.is_admin and 'all_tenants' in search_opts): # Need to remove all_tenants to pass the filtering below. del search_opts['all_tenants'] shares = self.db.share_get_all(context) else: shares = self.db.share_get_all_by_project(context, context.project_id) if search_opts: LOG.debug(_("Searching by: %s") % str(search_opts)) results = [] not_found = object() for share in shares: for opt, value in search_opts.iteritems(): if share.get(opt, not_found) != value: break else: results.append(share) shares = results return shares def get_snapshot(self, context, snapshot_id): policy.check_policy(context, 'share', 'get_snapshot') rv = self.db.share_snapshot_get(context, snapshot_id) return dict(rv.iteritems()) def get_all_snapshots(self, context, search_opts=None): policy.check_policy(context, 'share', 'get_all_snapshots') search_opts = search_opts or {} if (context.is_admin and 'all_tenants' in search_opts): # Need to remove all_tenants to pass the filtering below. del search_opts['all_tenants'] snapshots = self.db.share_snapshot_get_all(context) else: snapshots = self.db.share_snapshot_get_all_by_project( context, context.project_id) if search_opts: LOG.debug(_("Searching by: %s") % str(search_opts)) results = [] not_found = object() for snapshot in snapshots: for opt, value in search_opts.iteritems(): if snapshot.get(opt, not_found) != value: break else: results.append(snapshot) snapshots = results return snapshots def allow_access(self, ctx, share, access_type, access_to): """Allow access to share.""" if not share['host']: msg = _("Share host is None") raise exception.InvalidShare(reason=msg) if share['status'] not in ["available"]: msg = _("Share status must be available") raise exception.InvalidShare(reason=msg) policy.check_policy(ctx, 'share', 'allow_access') values = {'share_id': share['id'], 'access_type': access_type, 'access_to': access_to} access = [a for a in self.db.share_access_get_all_by_type_and_access( ctx, share['id'], access_type, access_to) if a['state'] != 'error'] if access: raise exception.ShareAccessExists(access_type=access_type, access=access_to) access = self.db.share_access_create(ctx, values) self.share_rpcapi.allow_access(ctx, share, access) return access def deny_access(self, ctx, share, access): """Deny access to share.""" policy.check_policy(ctx, 'share', 'deny_access') #First check state of the target share if not share['host']: msg = _("Share host is None") raise exception.InvalidShare(reason=msg) if share['status'] not in ["available"]: msg = _("Share status must be available") raise exception.InvalidShare(reason=msg) #Then check state of the access rule if access['state'] == access.STATE_ERROR: self.db.share_access_delete(ctx, access["id"]) elif access['state'] == access.STATE_ACTIVE: self.db.share_access_update(ctx, access["id"], {'state': access.STATE_DELETING}) self.share_rpcapi.deny_access(ctx, share, access) else: msg = _("Access policy should be active or in error state") raise exception.InvalidShareAccess(reason=msg) #update share state and send message to manager def access_get_all(self, context, share): """Returns all access rules for share.""" policy.check_policy(context, 'share', 'access_get_all') rules = self.db.share_access_get_all_for_share(context, share['id']) return [{'id': rule.id, 'access_type': rule.access_type, 'access_to': rule.access_to, 'state': rule.state} for rule in rules] def access_get(self, context, access_id): """Returns access rule with the id.""" policy.check_policy(context, 'share', 'access_get') rule = self.db.share_access_get(context, access_id) return rule @policy.wrap_check_policy('share') def get_share_metadata(self, context, share): """Get all metadata associated with a share.""" rv = self.db.share_metadata_get(context, share['id']) return dict(rv.iteritems()) @policy.wrap_check_policy('share') def delete_share_metadata(self, context, share, key): """Delete the given metadata item from a share.""" self.db.share_metadata_delete(context, share['id'], key) def _check_metadata_properties(self, context, metadata=None): if not metadata: metadata = {} for k, v in metadata.iteritems(): if len(k) == 0: msg = _("Metadata property key is blank") LOG.warn(msg) raise exception.InvalidShareMetadata(message=msg) if len(k) > 255: msg = _("Metadata property key is greater than 255 characters") LOG.warn(msg) raise exception.InvalidShareMetadataSize(message=msg) if len(v) > 1023: msg = _("Metadata property value is " "greater than 1023 characters") LOG.warn(msg) raise exception.InvalidShareMetadataSize(message=msg) @policy.wrap_check_policy('share') def update_share_metadata(self, context, share, metadata, delete=False): """Updates or creates share metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ orig_meta = self.get_share_metadata(context, share) if delete: _metadata = metadata else: _metadata = orig_meta.copy() _metadata.update(metadata) self._check_metadata_properties(context, _metadata) self.db.share_metadata_update(context, share['id'], _metadata, delete) return _metadata manila-2013.2.dev175.gbf1a399/manila/share/manager.py0000664000175000017500000002733312301410454022047 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NAS share manager managers creating shares and access rights. **Related Flags** :share_driver: Used by :class:`ShareManager`. Defaults to :class:`manila.share.drivers.lvm.LVMShareDriver`. """ from manila.common import constants from manila import context from manila import exception from manila import manager from manila import network from manila.openstack.common import excutils from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.openstack.common import timeutils from manila import quota from manila.share.configuration import Configuration from oslo.config import cfg LOG = logging.getLogger(__name__) share_manager_opts = [ cfg.StrOpt('share_driver', default='manila.share.drivers.lvm.LVMShareDriver', help='Driver to use for share creation'), ] CONF = cfg.CONF CONF.register_opts(share_manager_opts) QUOTAS = quota.QUOTAS class ShareManager(manager.SchedulerDependentManager): """Manages NAS storages.""" RPC_API_VERSION = '1.1' def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" self.configuration = Configuration(share_manager_opts, config_group=service_name) super(ShareManager, self).__init__(service_name='share', *args, **kwargs) if not share_driver: share_driver = self.configuration.share_driver self.driver = importutils.import_object( share_driver, self.db, configuration=self.configuration) self.network_api = network.API() def init_host(self): """Initialization for a standalone service.""" ctxt = context.get_admin_context() self.driver.do_setup(ctxt) self.driver.check_for_setup_error() shares = self.db.share_get_all_by_host(ctxt, self.host) LOG.debug(_("Re-exporting %s shares"), len(shares)) for share in shares: if share['status'] in ['available', 'in-use']: self.driver.ensure_share(ctxt, share) rules = self.db.share_access_get_all_for_share(ctxt, share['id']) for access_ref in rules: if access_ref['state'] == access_ref.STATE_ACTIVE: try: self.driver.allow_access(ctxt, share, access_ref) except exception.ShareAccessExists: pass else: LOG.info(_("share %s: skipping export"), share['name']) self.publish_service_capabilities(ctxt) def _setup_share_network(self, context, network_ref): allocation_number = self.driver.get_network_allocations_number() if allocation_number: network_info = self.network_api.allocate_network( context, network_ref, count=allocation_number) try: self.driver.setup_network(network_info) return network_info except exception.ManilaException as e: with excutils.save_and_reraise_exception(): self.db.share_network_update(context, network_ref['id'], {'status': 'error'}) def create_share(self, context, share_id, request_spec=None, filter_properties=None, snapshot_id=None): """Creates a share.""" context = context.elevated() if filter_properties is None: filter_properties = {} share_ref = self.db.share_get(context, share_id) if snapshot_id is not None: snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) else: snapshot_ref = None network_id = share_ref.get('share_network_id', None) if network_id: network_ref = self.db.share_network_get( context, share_ref['share_network_id']) if network_ref['status'] != constants.STATUS_ACTIVE: if network_ref['status'] in [constants.STATUS_INACTIVE, constants.STATUS_NEW]: network_ref = self._setup_share_network(context, network_ref) else: msg = _("Network status should be ACTIVE, INACTIVE or NEW") LOG.error(msg) raise exception.InvalidShareNetwork(reason=msg) else: network_ref = {} share_ref['network_info'] = network_ref try: if snapshot_ref: export_location = self.driver.create_share_from_snapshot( context, share_ref, snapshot_ref) else: export_location = self.driver.create_share(context, share_ref) self.db.share_update(context, share_id, {'export_location': export_location}) except Exception: with excutils.save_and_reraise_exception(): self.db.share_update(context, share_id, {'status': 'error'}) else: self.db.share_update(context, share_id, {'status': 'available', 'launched_at': timeutils.utcnow()}) def delete_share(self, context, share_id): """Delete a share.""" context = context.elevated() share_ref = self.db.share_get(context, share_id) if context.project_id != share_ref['project_id']: project_id = share_ref['project_id'] else: project_id = context.project_id rules = self.db.share_access_get_all_for_share(context, share_id) try: for access_ref in rules: self._deny_access(context, access_ref, share_ref) self.driver.delete_share(context, share_ref) except Exception: with excutils.save_and_reraise_exception(): self.db.share_update(context, share_id, {'status': 'error_deleting'}) try: reservations = QUOTAS.reserve(context, project_id=project_id, shares=-1, gigabytes=-share_ref['size']) except Exception: reservations = None LOG.exception(_("Failed to update usages deleting share")) self.db.share_delete(context, share_id) LOG.info(_("share %s: deleted successfully"), share_ref['name']) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) def create_snapshot(self, context, share_id, snapshot_id): """Create snapshot for share.""" snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) try: snap_name = snapshot_ref['name'] model_update = self.driver.create_snapshot(context, snapshot_ref) if model_update: self.db.share_snapshot_update(context, snapshot_ref['id'], model_update) except Exception: with excutils.save_and_reraise_exception(): self.db.share_snapshot_update(context, snapshot_ref['id'], {'status': 'error'}) self.db.share_snapshot_update(context, snapshot_ref['id'], {'status': 'available', 'progress': '100%'}) return snapshot_id def delete_snapshot(self, context, snapshot_id): """Delete share snapshot.""" context = context.elevated() snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) if context.project_id != snapshot_ref['project_id']: project_id = snapshot_ref['project_id'] else: project_id = context.project_id try: self.driver.delete_snapshot(context, snapshot_ref) except exception.ShareSnapshotIsBusy: self.db.share_snapshot_update(context, snapshot_ref['id'], {'status': 'available'}) except Exception: with excutils.save_and_reraise_exception(): self.db.share_snapshot_update(context, snapshot_ref['id'], {'status': 'error_deleting'}) else: self.db.share_snapshot_destroy(context, snapshot_id) try: reservations = QUOTAS.reserve(context, project_id=project_id, snapshots=-1, gigabytes=-snapshot_ref['size']) except Exception: reservations = None LOG.exception(_("Failed to update usages deleting snapshot")) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) def allow_access(self, context, access_id): """Allow access to some share.""" try: access_ref = self.db.share_access_get(context, access_id) share_ref = self.db.share_get(context, access_ref['share_id']) if access_ref['state'] == access_ref.STATE_NEW: self.driver.allow_access(context, share_ref, access_ref) self.db.share_access_update( context, access_id, {'state': access_ref.STATE_ACTIVE}) except Exception: with excutils.save_and_reraise_exception(): self.db.share_access_update( context, access_id, {'state': access_ref.STATE_ERROR}) def deny_access(self, context, access_id): """Deny access to some share.""" access_ref = self.db.share_access_get(context, access_id) share_ref = self.db.share_get(context, access_ref['share_id']) self._deny_access(context, access_ref, share_ref) def _deny_access(self, context, access_ref, share_ref): access_id = access_ref['id'] try: self.driver.deny_access(context, share_ref, access_ref) except Exception: with excutils.save_and_reraise_exception(): self.db.share_access_update( context, access_id, {'state': access_ref.STATE_ERROR}) self.db.share_access_delete(context, access_id) @manager.periodic_task def _report_driver_status(self, context): LOG.info(_('Updating share status')) share_stats = self.driver.get_share_stats(refresh=True) if share_stats: self.update_service_capabilities(share_stats) def publish_service_capabilities(self, context): """Collect driver status and then publish it.""" self._report_driver_status(context) self._publish_service_capabilities(context) manila-2013.2.dev175.gbf1a399/manila/share/driver.py0000664000175000017500000001237312301410454021726 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Drivers for shares. """ import ConfigParser import os import re import time from manila import exception from manila.openstack.common import log as logging from manila.share.configuration import Configuration from manila import utils from oslo.config import cfg LOG = logging.getLogger(__name__) share_opts = [ #NOTE(rushiagr): Reasonable to define this option at only one place. cfg.IntOpt('num_shell_tries', default=3, help='number of times to attempt to run flakey shell commands'), cfg.IntOpt('reserved_share_percentage', default=0, help='The percentage of backend capacity reserved'), cfg.StrOpt('share_backend_name', default=None, help='The backend name for a given driver implementation'), ] CONF = cfg.CONF CONF.register_opts(share_opts) class ExecuteMixin(object): """Provides an executable functionality to a driver class.""" def __init__(self, *args, **kwargs): self.db = None self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(share_opts) self.set_execute(kwargs.pop('execute', utils.execute)) def set_execute(self, execute): self._execute = execute def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: self._execute(*command, **kwargs) return True except exception.ProcessExecutionError: tries += 1 if tries >= self.configuration.num_shell_tries: raise LOG.exception(_("Recovering from a failed execute. " "Try number %s"), tries) time.sleep(tries ** 2) class ShareDriver(object): """Class defines interface of NAS driver.""" def __init__(self, *args, **kwargs): super(ShareDriver, self).__init__() self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(share_opts) def create_share(self, context, share): """Is called to create share.""" raise NotImplementedError() def create_share_from_snapshot(self, context, share, snapshot): """Is called to create share from snapshot.""" raise NotImplementedError() def create_snapshot(self, context, snapshot): """Is called to create snapshot.""" raise NotImplementedError() def delete_share(self, context, share): """Is called to remove share.""" raise NotImplementedError() def delete_snapshot(self, context, snapshot): """Is called to remove snapshot.""" raise NotImplementedError() def ensure_share(self, context, share): """Invoked to sure that share is exported.""" raise NotImplementedError() def allow_access(self, context, share, access): """Allow access to the share.""" raise NotImplementedError() def deny_access(self, context, share, access): """Deny access to the share.""" raise NotImplementedError() def check_for_setup_error(self): """Check for setup error.""" pass def do_setup(self, context): """Any initialization the share driver does while starting.""" pass def get_share_stats(self, refresh=False): """Get share status. If 'refresh' is True, run update the stats first.""" if refresh: self._update_share_status() return self._stats def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs""" pass def setup_network(self, network_info): """Set up and configures VIFs with given network parameters""" pass def _update_share_status(self): """Retrieve status info from share group.""" LOG.debug(_("Updating share status")) data = {} backend_name = self.configuration.safe_get('share_backend_name') data["share_backend_name"] = backend_name or 'Generic_NFS' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' data["storage_protocol"] = None data['total_capacity_gb'] = 'infinite' data['free_capacity_gb'] = 'infinite' data['reserved_percentage'] = 0 data['QoS_support'] = False self._stats = data manila-2013.2.dev175.gbf1a399/manila/openstack/0000775000175000017500000000000012301410516020737 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/__init__.py0000664000175000017500000000121612301410454023051 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/openstack/common/0000775000175000017500000000000012301410516022227 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/common/uuidutils.py0000664000175000017500000000212212301410454024626 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ UUID related utilities and helper functions. """ import uuid def generate_uuid(): return str(uuid.uuid4()) def is_uuid_like(val): """Returns validation of a value as a UUID. For our purposes, a UUID is a canonical form string: aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa """ try: return str(uuid.UUID(val)) == val except (TypeError, ValueError, AttributeError): return False manila-2013.2.dev175.gbf1a399/manila/openstack/common/exception.py0000664000175000017500000000657112301410454024611 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exceptions common to OpenStack projects """ import logging from manila.openstack.common.gettextutils import _ _FATAL_EXCEPTION_FORMAT_ERRORS = False class Error(Exception): def __init__(self, message=None): super(Error, self).__init__(message) class ApiError(Error): def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code super(ApiError, self).__init__('%s: %s' % (code, message)) class NotFound(Error): pass class UnknownScheme(Error): msg = "Unknown scheme '%s' found in URI" def __init__(self, scheme): msg = self.__class__.msg % scheme super(UnknownScheme, self).__init__(msg) class BadStoreUri(Error): msg = "The Store URI %s was malformed. Reason: %s" def __init__(self, uri, reason): msg = self.__class__.msg % (uri, reason) super(BadStoreUri, self).__init__(msg) class Duplicate(Error): pass class NotAuthorized(Error): pass class NotEmpty(Error): pass class Invalid(Error): pass class BadInputError(Exception): """Error resulting from a client sending bad input to a server""" pass class MissingArgumentError(Error): pass class DatabaseMigrationError(Error): pass class ClientConnectionError(Exception): """Error resulting from a client connecting to a server""" pass def wrap_exception(f): def _wrap(*args, **kw): try: return f(*args, **kw) except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() logging.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise _wrap.func_name = f.func_name return _wrap class OpenstackException(Exception): """ Base Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = "An unknown exception occurred" def __init__(self, **kwargs): try: self._error_string = self.message % kwargs except Exception as e: if _FATAL_EXCEPTION_FORMAT_ERRORS: raise e else: # at least get the core message out if something happened self._error_string = self.message def __str__(self): return self._error_string class MalformedRequestBody(OpenstackException): message = "Malformed message body: %(reason)s" class InvalidContentType(OpenstackException): message = "Invalid content type %(content_type)s" manila-2013.2.dev175.gbf1a399/manila/openstack/common/README0000664000175000017500000000070512301410454023112 0ustar chuckchuck00000000000000openstack-common ---------------- A number of modules from openstack-common are imported into this project. These modules are "incubating" in openstack-common and are kept in sync with the help of openstack-common's update.py script. See: http://wiki.openstack.org/CommonLibrary#Incubation The copy of the code should never be directly modified here. Please always update openstack-common first and then run the script to copy the changes across. manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/0000775000175000017500000000000012301410516023013 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/dispatcher.py0000664000175000017500000001330012301410454025511 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Code for rpc message dispatching. Messages that come in have a version number associated with them. RPC API version numbers are in the form: Major.Minor For a given message with version X.Y, the receiver must be marked as able to handle messages of version A.B, where: A = X B >= Y The Major version number would be incremented for an almost completely new API. The Minor version number would be incremented for backwards compatible changes to an existing API. A backwards compatible change could be something like adding a new method, adding an argument to an existing method (but not requiring it), or changing the type for an existing argument (but still handling the old type as well). The conversion over to a versioned API must be done on both the client side and server side of the API at the same time. However, as the code stands today, there can be both versioned and unversioned APIs implemented in the same code base. EXAMPLES ======== Nova was the first project to use versioned rpc APIs. Consider the compute rpc API as an example. The client side is in nova/compute/rpcapi.py and the server side is in nova/compute/manager.py. Example 1) Adding a new method. ------------------------------- Adding a new method is a backwards compatible change. It should be added to nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should have a specific version specified to indicate the minimum API version that must be implemented for the method to be supported. For example:: def get_host_uptime(self, ctxt, host): topic = _compute_topic(self.topic, ctxt, host, None) return self.call(ctxt, self.make_msg('get_host_uptime'), topic, version='1.1') In this case, version '1.1' is the first version that supported the get_host_uptime() method. Example 2) Adding a new parameter. ---------------------------------- Adding a new parameter to an rpc method can be made backwards compatible. The RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped. The implementation of the method must not expect the parameter to be present.:: def some_remote_method(self, arg1, arg2, newarg=None): # The code needs to deal with newarg=None for cases # where an older client sends a message without it. pass On the client side, the same changes should be made as in example 1. The minimum version that supports the new parameter should be specified. """ from manila.openstack.common.rpc import common as rpc_common class RpcDispatcher(object): """Dispatch rpc messages according to the requested API version. This class can be used as the top level 'manager' for a service. It contains a list of underlying managers that have an API_VERSION attribute. """ def __init__(self, callbacks): """Initialize the rpc dispatcher. :param callbacks: List of proxy objects that are an instance of a class with rpc methods exposed. Each proxy object should have an RPC_API_VERSION attribute. """ self.callbacks = callbacks super(RpcDispatcher, self).__init__() def dispatch(self, ctxt, version, method, namespace, **kwargs): """Dispatch a message based on a requested version. :param ctxt: The request context :param version: The requested API version from the incoming message :param method: The method requested to be called by the incoming message. :param namespace: The namespace for the requested method. If None, the dispatcher will look for a method on a callback object with no namespace set. :param kwargs: A dict of keyword arguments to be passed to the method. :returns: Whatever is returned by the underlying method that gets called. """ if not version: version = '1.0' had_compatible = False for proxyobj in self.callbacks: # Check for namespace compatibility try: cb_namespace = proxyobj.RPC_API_NAMESPACE except AttributeError: cb_namespace = None if namespace != cb_namespace: continue # Check for version compatibility try: rpc_api_version = proxyobj.RPC_API_VERSION except AttributeError: rpc_api_version = '1.0' is_compatible = rpc_common.version_is_compatible(rpc_api_version, version) had_compatible = had_compatible or is_compatible if not hasattr(proxyobj, method): continue if is_compatible: return getattr(proxyobj, method)(ctxt, **kwargs) if had_compatible: raise AttributeError("No such RPC function '%s'" % method) else: raise rpc_common.UnsupportedRpcVersion(version=version) manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/impl_zmq.py0000664000175000017500000006532112301410454025225 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pprint import re import socket import sys import types import uuid import eventlet import greenlet from oslo.config import cfg from manila.openstack.common import excutils from manila.openstack.common.gettextutils import _ from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila.openstack.common import processutils as utils from manila.openstack.common.rpc import common as rpc_common zmq = importutils.try_import('eventlet.green.zmq') # for convenience, are not modified. pformat = pprint.pformat Timeout = eventlet.timeout.Timeout LOG = rpc_common.LOG RemoteError = rpc_common.RemoteError RPCException = rpc_common.RPCException zmq_opts = [ cfg.StrOpt('rpc_zmq_bind_address', default='*', help='ZeroMQ bind address. Should be a wildcard (*), ' 'an ethernet interface, or IP. ' 'The "host" option should point or resolve to this ' 'address.'), # The module.Class to use for matchmaking. cfg.StrOpt( 'rpc_zmq_matchmaker', default=('manila.openstack.common.rpc.' 'matchmaker.MatchMakerLocalhost'), help='MatchMaker driver', ), # The following port is unassigned by IANA as of 2012-05-21 cfg.IntOpt('rpc_zmq_port', default=9501, help='ZeroMQ receiver listening port'), cfg.IntOpt('rpc_zmq_contexts', default=1, help='Number of ZeroMQ contexts, defaults to 1'), cfg.IntOpt('rpc_zmq_topic_backlog', default=None, help='Maximum number of ingress messages to locally buffer ' 'per topic. Default is unlimited.'), cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', help='Directory for holding IPC sockets'), cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), help='Name of this node. Must be a valid hostname, FQDN, or ' 'IP address. Must match "host" option, if running Nova.') ] CONF = cfg.CONF CONF.register_opts(zmq_opts) ZMQ_CTX = None # ZeroMQ Context, must be global. matchmaker = None # memoized matchmaker object def _serialize(data): """ Serialization wrapper We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ try: return jsonutils.dumps(data, ensure_ascii=True) except TypeError: with excutils.save_and_reraise_exception(): LOG.error(_("JSON serialization failed.")) def _deserialize(data): """ Deserialization wrapper """ LOG.debug(_("Deserializing: %s"), data) return jsonutils.loads(data) class ZmqSocket(object): """ A tiny wrapper around ZeroMQ to simplify the send/recv protocol and connection management. Can be used as a Context (supports the 'with' statement). """ def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = _get_ctxt().socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind} LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug(_("-> bind: %(bind)s"), str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket.")) def socket_s(self): """Get socket type as string.""" t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER', 'DEALER') return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type] def subscribe(self, msg_filter): """Subscribe.""" if not self.can_sub: raise RPCException("Cannot subscribe on this socket.") LOG.debug(_("Subscribing to %s"), msg_filter) try: self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) except Exception: return self.subscriptions.append(msg_filter) def unsubscribe(self, msg_filter): """Unsubscribe.""" if msg_filter not in self.subscriptions: return self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter) self.subscriptions.remove(msg_filter) def close(self): if self.sock is None or self.sock.closed: return # We must unsubscribe, or we'll leak descriptors. if len(self.subscriptions) > 0: for f in self.subscriptions: try: self.sock.setsockopt(zmq.UNSUBSCRIBE, f) except Exception: pass self.subscriptions = [] try: # Default is to linger self.sock.close() except Exception: # While this is a bad thing to happen, # it would be much worse if some of the code calling this # were to fail. For now, lets log, and later evaluate # if we can safely raise here. LOG.error("ZeroMQ socket could not be closed.") self.sock = None def recv(self): if not self.can_recv: raise RPCException(_("You cannot recv on this socket.")) return self.sock.recv_multipart() def send(self, data): if not self.can_send: raise RPCException(_("You cannot send on this socket.")) self.sock.send_multipart(data) class ZmqClient(object): """Client for ZMQ sockets.""" def __init__(self, addr, socket_type=None, bind=False): if socket_type is None: socket_type = zmq.PUSH self.outq = ZmqSocket(addr, socket_type, bind=bind) def cast(self, msg_id, topic, data, envelope=False): msg_id = msg_id or 0 if not envelope: self.outq.send(map(bytes, (msg_id, topic, 'cast', _serialize(data)))) return rpc_envelope = rpc_common.serialize_msg(data[1], envelope) zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items()) self.outq.send(map(bytes, (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) def close(self): self.outq.close() class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call.""" def __init__(self, **kwargs): self.replies = [] super(RpcContext, self).__init__(**kwargs) def deepcopy(self): values = self.to_dict() values['replies'] = self.replies return self.__class__(**values) def reply(self, reply=None, failure=None, ending=False): if ending: return self.replies.append(reply) @classmethod def marshal(self, ctx): ctx_data = ctx.to_dict() return _serialize(ctx_data) @classmethod def unmarshal(self, data): return RpcContext.from_dict(_deserialize(data)) class InternalContext(object): """Used by ConsumerBase as a private context for - methods.""" def __init__(self, proxy): self.proxy = proxy self.msg_waiter = None def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" LOG.debug(_("Running func with context: %s"), ctx.to_dict()) data.setdefault('version', None) data.setdefault('args', {}) try: result = proxy.dispatch( ctx, data['version'], data['method'], data.get('namespace'), **data['args']) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass except rpc_common.ClientException, e: LOG.debug(_("Expected exception during message handling (%s)") % e._exc_info[1]) return {'exc': rpc_common.serialize_remote_exception(e._exc_info, log_failure=False)} except Exception: LOG.error(_("Exception during message handling")) return {'exc': rpc_common.serialize_remote_exception(sys.exc_info())} def reply(self, ctx, proxy, msg_id=None, context=None, topic=None, msg=None): """Reply to a casted call.""" # NOTE(ewindisch): context kwarg exists for Grizzly compat. # this may be able to be removed earlier than # 'I' if ConsumerBase.process were refactored. if type(msg) is list: payload = msg[-1] else: payload = msg response = ConsumerBase.normalize_reply( self._get_response(ctx, proxy, topic, payload), ctx.replies) LOG.debug(_("Sending reply")) _multi_send(_cast, ctx, topic, { 'method': '-process_reply', 'args': { 'msg_id': msg_id, # Include for Folsom compat. 'response': response } }, _msg_id=msg_id) class ConsumerBase(object): """Base Consumer.""" def __init__(self): self.private_ctx = InternalContext(None) @classmethod def normalize_reply(self, result, replies): #TODO(ewindisch): re-evaluate and document this method. if isinstance(result, types.GeneratorType): return list(result) elif replies: return replies else: return [result] def process(self, proxy, ctx, data): data.setdefault('version', None) data.setdefault('args', {}) # Method starting with - are # processed internally. (non-valid method name) method = data.get('method') if not method: LOG.error(_("RPC message did not include method.")) return # Internal method # uses internal context for safety. if method == '-reply': self.private_ctx.reply(ctx, proxy, **data['args']) return proxy.dispatch(ctx, data['version'], data['method'], data.get('namespace'), **data['args']) class ZmqBaseReactor(ConsumerBase): """ A consumer class implementing a centralized casting broker (PULL-PUSH) for RoundRobin requests. """ def __init__(self, conf): super(ZmqBaseReactor, self).__init__() self.mapping = {} self.proxies = {} self.threads = [] self.sockets = [] self.subscribe = {} self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) def register(self, proxy, in_addr, zmq_type_in, out_addr=None, zmq_type_out=None, in_bind=True, out_bind=True, subscribe=None): LOG.info(_("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") # Items push in. inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, subscribe=subscribe) self.proxies[inq] = proxy self.sockets.append(inq) LOG.info(_("In reactor registered")) if not out_addr: return if zmq_type_out not in (zmq.PUSH, zmq.PUB): raise RPCException("Bad output socktype") # Items push out. outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) self.mapping[inq] = outq self.mapping[outq] = inq self.sockets.append(outq) LOG.info(_("Out reactor registered")) def consume_in_thread(self): def _consume(sock): LOG.info(_("Consuming socket")) while True: self.consume(sock) for k in self.proxies.keys(): self.threads.append( self.pool.spawn(_consume, k) ) def wait(self): for t in self.threads: t.wait() def close(self): for s in self.sockets: s.close() for t in self.threads: t.kill() class ZmqProxy(ZmqBaseReactor): """ A consumer class implementing a topic-based proxy, forwarding to IPC sockets. """ def __init__(self, conf): super(ZmqProxy, self).__init__(conf) pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\')) self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep))) self.topic_proxy = {} def consume(self, sock): ipc_dir = CONF.rpc_zmq_ipc_dir #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() topic = data[1] LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) if topic.startswith('fanout~'): sock_type = zmq.PUB topic = topic.split('.', 1)[0] elif topic.startswith('zmq_replies'): sock_type = zmq.PUB else: sock_type = zmq.PUSH if topic not in self.topic_proxy: def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, # don't trust this input. if self.badchars.search(topic) is not None: emsg = _("Topic contained dangerous characters.") LOG.warn(emsg) raise RPCException(emsg) out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue( CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) waiter.send(True) while(True): data = self.topic_proxy[topic].get() out_sock.send(data) LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data}) wait_sock_creation = eventlet.event.Event() eventlet.spawn(publisher, wait_sock_creation) try: wait_sock_creation.wait() except RPCException: LOG.error(_("Topic socket file creation failed.")) return try: self.topic_proxy[topic].put_nowait(data) LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % {'data': data}) except eventlet.queue.Full: LOG.error(_("Local per-topic backlog buffer full for topic " "%(topic)s. Dropping message.") % {'topic': topic}) def consume_in_thread(self): """Runs the ZmqProxy service""" ipc_dir = CONF.rpc_zmq_ipc_dir consume_in = "tcp://%s:%s" % \ (CONF.rpc_zmq_bind_address, CONF.rpc_zmq_port) consumption_proxy = InternalContext(None) if not os.path.isdir(ipc_dir): try: utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), ipc_dir, run_as_root=True) utils.execute('chmod', '750', ipc_dir, run_as_root=True) except utils.ProcessExecutionError: with excutils.save_and_reraise_exception(): LOG.error(_("Could not create IPC directory %s") % (ipc_dir, )) try: self.register(consumption_proxy, consume_in, zmq.PULL, out_bind=True) except zmq.ZMQError: with excutils.save_and_reraise_exception(): LOG.error(_("Could not create ZeroMQ receiver daemon. " "Socket may already be in use.")) super(ZmqProxy, self).consume_in_thread() def unflatten_envelope(packenv): """Unflattens the RPC envelope. Takes a list and returns a dictionary. i.e. [1,2,3,4] => {1: 2, 3: 4} """ i = iter(packenv) h = {} try: while True: k = i.next() h[k] = i.next() except StopIteration: return h class ZmqReactor(ZmqBaseReactor): """ A consumer class implementing a consumer for messages. Can also be used as a 1:1 proxy """ def __init__(self, conf): super(ZmqReactor, self).__init__(conf) def consume(self, sock): #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { 'data': data}) self.mapping[sock].send(data) return proxy = self.proxies[sock] if data[2] == 'cast': # Legacy protocol packenv = data[3] ctx, msg = _deserialize(packenv) request = rpc_common.deserialize_msg(msg) ctx = RpcContext.unmarshal(ctx) elif data[2] == 'impl_zmq_v2': packenv = data[4:] msg = unflatten_envelope(packenv) request = rpc_common.deserialize_msg(msg) # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: LOG.error(_("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request) class Connection(rpc_common.Connection): """Manages connections and threads.""" def __init__(self, conf): self.topics = [] self.reactor = ZmqReactor(conf) def create_consumer(self, topic, proxy, fanout=False): # Register with matchmaker. _get_matchmaker().register(topic, CONF.rpc_zmq_host) # Subscription scenarios if fanout: sock_type = zmq.SUB subscribe = ('', fanout)[type(fanout) == str] topic = 'fanout~' + topic.split('.', 1)[0] else: sock_type = zmq.PULL subscribe = None topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) if topic in self.topics: LOG.info(_("Skipping topic registration. Already registered.")) return # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % \ (CONF.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ['PULL', 'SUB'][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False) self.topics.append(topic) def close(self): _get_matchmaker().stop_heartbeat() for topic in self.topics: _get_matchmaker().unregister(topic, CONF.rpc_zmq_host) self.reactor.close() self.topics = [] def wait(self): self.reactor.wait() def consume_in_thread(self): _get_matchmaker().start_heartbeat() self.reactor.consume_in_thread() def _cast(addr, context, topic, msg, timeout=None, envelope=False, _msg_id=None): timeout_cast = timeout or CONF.rpc_cast_timeout payload = [RpcContext.marshal(context), msg] with Timeout(timeout_cast, exception=rpc_common.Timeout): try: conn = ZmqClient(addr) # assumes cast can't return an exception conn.cast(_msg_id, topic, payload, envelope) except zmq.ZMQError: raise RPCException("Cast failed. ZMQ Socket Exception") finally: if 'conn' in vars(): conn.close() def _call(addr, context, topic, msg, timeout=None, envelope=False): # timeout_response is how long we wait for a response timeout = timeout or CONF.rpc_response_timeout # The msg_id is used to track replies. msg_id = uuid.uuid4().hex # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host LOG.debug(_("Creating payload")) # Curry the original request into a reply method. mcontext = RpcContext.marshal(context) payload = { 'method': '-reply', 'args': { 'msg_id': msg_id, 'topic': reply_topic, # TODO(ewindisch): safe to remove mcontext in I. 'msg': [mcontext, msg] } } LOG.debug(_("Creating queue socket for reply waiter")) # Messages arriving async. # TODO(ewindisch): have reply consumer with dynamic subscription mgmt with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket( "ipc://%s/zmq_topic_zmq_replies.%s" % (CONF.rpc_zmq_ipc_dir, CONF.rpc_zmq_host), zmq.SUB, subscribe=msg_id, bind=False ) LOG.debug(_("Sending cast")) _cast(addr, context, topic, payload, envelope) LOG.debug(_("Cast sent; Waiting reply")) # Blocks until receives reply msg = msg_waiter.recv() LOG.debug(_("Received message: %s"), msg) LOG.debug(_("Unpacking response")) if msg[2] == 'cast': # Legacy version raw_msg = _deserialize(msg[-1])[-1] elif msg[2] == 'impl_zmq_v2': rpc_envelope = unflatten_envelope(msg[4:]) raw_msg = rpc_common.deserialize_msg(rpc_envelope) else: raise rpc_common.UnsupportedRpcEnvelopeVersion( _("Unsupported or unknown ZMQ envelope returned.")) responses = raw_msg['args']['response'] # ZMQError trumps the Timeout error. except zmq.ZMQError: raise RPCException("ZMQ Socket Error") except (IndexError, KeyError): raise RPCException(_("RPC Message Invalid.")) finally: if 'msg_waiter' in vars(): msg_waiter.close() # It seems we don't need to do all of the following, # but perhaps it would be useful for multicall? # One effect of this is that we're checking all # responses for Exceptions. for resp in responses: if isinstance(resp, types.DictType) and 'exc' in resp: raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) return responses[-1] def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None): """ Wraps the sending of messages, dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = _get_matchmaker().queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results if len(queues) == 0: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, _topic, msg, timeout, envelope, _msg_id) return return method(_addr, context, _topic, msg, timeout, envelope) def create_connection(conf, new=True): return Connection(conf) def multicall(conf, *args, **kwargs): """Multiple calls.""" return _multi_send(_call, *args, **kwargs) def call(conf, *args, **kwargs): """Send a message, expect a response.""" data = _multi_send(_call, *args, **kwargs) return data[-1] def cast(conf, *args, **kwargs): """Send a message expecting no reply.""" _multi_send(_cast, *args, **kwargs) def fanout_cast(conf, context, topic, msg, **kwargs): """Send a message to all listening and expect no reply.""" # NOTE(ewindisch): fanout~ is used because it avoid splitting on . # and acts as a non-subtle hint to the matchmaker and ZmqProxy. _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) def notify(conf, context, topic, msg, envelope): """ Send notification event. Notifications are sent to topic-priority. This differs from the AMQP drivers which send to topic.priority. """ # NOTE(ewindisch): dot-priority in rpc notifier does not # work with our assumptions. topic = topic.replace('.', '-') cast(conf, context, topic, msg, envelope=envelope) def cleanup(): """Clean up resources in use by implementation.""" global ZMQ_CTX if ZMQ_CTX: ZMQ_CTX.term() ZMQ_CTX = None global matchmaker matchmaker = None def _get_ctxt(): if not zmq: raise ImportError("Failed to import eventlet.green.zmq") global ZMQ_CTX if not ZMQ_CTX: ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts) return ZMQ_CTX def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: matchmaker = importutils.import_object( CONF.rpc_zmq_matchmaker, *args, **kwargs) return matchmaker manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/impl_qpid.py0000664000175000017500000005461512301410454025357 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import itertools import time import uuid import eventlet import greenlet from oslo.config import cfg from manila.openstack.common.gettextutils import _ from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila.openstack.common import log as logging from manila.openstack.common.rpc import amqp as rpc_amqp from manila.openstack.common.rpc import common as rpc_common qpid_messaging = importutils.try_import("qpid.messaging") qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") LOG = logging.getLogger(__name__) qpid_opts = [ cfg.StrOpt('qpid_hostname', default='localhost', help='Qpid broker hostname'), cfg.IntOpt('qpid_port', default=5672, help='Qpid broker port'), cfg.ListOpt('qpid_hosts', default=['$qpid_hostname:$qpid_port'], help='Qpid HA cluster host:port pairs'), cfg.StrOpt('qpid_username', default='', help='Username for qpid connection'), cfg.StrOpt('qpid_password', default='', help='Password for qpid connection', secret=True), cfg.StrOpt('qpid_sasl_mechanisms', default='', help='Space separated list of SASL mechanisms to use for auth'), cfg.IntOpt('qpid_heartbeat', default=60, help='Seconds between connection keepalive heartbeats'), cfg.StrOpt('qpid_protocol', default='tcp', help="Transport to use, either 'tcp' or 'ssl'"), cfg.BoolOpt('qpid_tcp_nodelay', default=True, help='Disable Nagle algorithm'), ] cfg.CONF.register_opts(qpid_opts) class ConsumerBase(object): """Consumer base class.""" def __init__(self, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. 'session' is the amqp session to use 'callback' is the callback to call when messages are received 'node_name' is the first part of the Qpid address string, before ';' 'node_opts' will be applied to the "x-declare" section of "node" in the address string. 'link_name' goes into the "name" field of the "link" in the address string 'link_opts' will be applied to the "x-declare" section of "link" in the address string. """ self.callback = callback self.receiver = None self.session = None addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": True, "auto-delete": True, }, }, "link": { "name": link_name, "durable": True, "x-declare": { "durable": False, "auto-delete": True, "exclusive": False, }, }, } addr_opts["node"]["x-declare"].update(node_opts) addr_opts["link"]["x-declare"].update(link_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.reconnect(session) def reconnect(self, session): """Re-declare the receiver after a qpid reconnect""" self.session = session self.receiver = session.receiver(self.address) self.receiver.capacity = 1 def consume(self): """Fetch the message and pass it to the callback object""" message = self.receiver.fetch() try: msg = rpc_common.deserialize_msg(message.content) self.callback(msg) except Exception: LOG.exception(_("Failed to process message... skipping it.")) finally: self.session.acknowledge(message) def get_receiver(self): return self.receiver class DirectConsumer(ConsumerBase): """Queue/consumer class for 'direct'""" def __init__(self, conf, session, msg_id, callback): """Init a 'direct' queue. 'session' is the amqp session to use 'msg_id' is the msg_id to listen on 'callback' is the callback to call when messages are received """ super(DirectConsumer, self).__init__(session, callback, "%s/%s" % (msg_id, msg_id), {"type": "direct"}, msg_id, {"exclusive": True}) class TopicConsumer(ConsumerBase): """Consumer class for 'topic'""" def __init__(self, conf, session, topic, callback, name=None, exchange_name=None): """Init a 'topic' queue. :param session: the amqp session to use :param topic: is the topic to listen on :paramtype topic: str :param callback: the callback to call when messages are received :param name: optional queue name, defaults to topic """ exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) super(TopicConsumer, self).__init__(session, callback, "%s/%s" % (exchange_name, topic), {}, name or topic, {}) class FanoutConsumer(ConsumerBase): """Consumer class for 'fanout'""" def __init__(self, conf, session, topic, callback): """Init a 'fanout' queue. 'session' is the amqp session to use 'topic' is the topic to listen on 'callback' is the callback to call when messages are received """ super(FanoutConsumer, self).__init__( session, callback, "%s_fanout" % topic, {"durable": False, "type": "fanout"}, "%s_fanout_%s" % (topic, uuid.uuid4().hex), {"exclusive": True}) class Publisher(object): """Base Publisher class""" def __init__(self, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": False, # auto-delete isn't implemented for exchanges in qpid, # but put in here anyway "auto-delete": True, }, }, } if node_opts: addr_opts["node"]["x-declare"].update(node_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.reconnect(session) def reconnect(self, session): """Re-establish the Sender after a reconnection""" self.sender = session.sender(self.address) def send(self, msg): """Send a message""" self.sender.send(msg) class DirectPublisher(Publisher): """Publisher class for 'direct'""" def __init__(self, conf, session, msg_id): """Init a 'direct' publisher.""" super(DirectPublisher, self).__init__(session, msg_id, {"type": "Direct"}) class TopicPublisher(Publisher): """Publisher class for 'topic'""" def __init__(self, conf, session, topic): """init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) super(TopicPublisher, self).__init__(session, "%s/%s" % (exchange_name, topic)) class FanoutPublisher(Publisher): """Publisher class for 'fanout'""" def __init__(self, conf, session, topic): """init a 'fanout' publisher. """ super(FanoutPublisher, self).__init__( session, "%s_fanout" % topic, {"type": "fanout"}) class NotifyPublisher(Publisher): """Publisher class for notifications""" def __init__(self, conf, session, topic): """init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) super(NotifyPublisher, self).__init__(session, "%s/%s" % (exchange_name, topic), {"durable": True}) class Connection(object): """Connection object.""" pool = None def __init__(self, conf, server_params=None): if not qpid_messaging: raise ImportError("Failed to import qpid.messaging") self.session = None self.consumers = {} self.consumer_thread = None self.proxy_callbacks = [] self.conf = conf if server_params and 'hostname' in server_params: # NOTE(russellb) This enables support for cast_to_server. server_params['qpid_hosts'] = [ '%s:%d' % (server_params['hostname'], server_params.get('port', 5672)) ] params = { 'qpid_hosts': self.conf.qpid_hosts, 'username': self.conf.qpid_username, 'password': self.conf.qpid_password, } params.update(server_params or {}) self.brokers = params['qpid_hosts'] self.username = params['username'] self.password = params['password'] self.connection_create(self.brokers[0]) self.reconnect() def connection_create(self, broker): # Create the connection - this does not open the connection self.connection = qpid_messaging.Connection(broker) # Check if flags are set and if so set them for the connection # before we call open self.connection.username = self.username self.connection.password = self.password self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms # Reconnection is done by self.reconnect() self.connection.reconnect = False self.connection.heartbeat = self.conf.qpid_heartbeat self.connection.transport = self.conf.qpid_protocol self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay def _register_consumer(self, consumer): self.consumers[str(consumer.get_receiver())] = consumer def _lookup_consumer(self, receiver): return self.consumers[str(receiver)] def reconnect(self): """Handles reconnecting and re-establishing sessions and queues""" if self.connection.opened(): try: self.connection.close() except qpid_exceptions.ConnectionError: pass attempt = 0 delay = 1 while True: broker = self.brokers[attempt % len(self.brokers)] attempt += 1 try: self.connection_create(broker) self.connection.open() except qpid_exceptions.ConnectionError, e: msg_dict = dict(e=e, delay=delay) msg = _("Unable to connect to AMQP server: %(e)s. " "Sleeping %(delay)s seconds") % msg_dict LOG.error(msg) time.sleep(delay) delay = min(2 * delay, 60) else: LOG.info(_('Connected to AMQP server on %s'), broker) break self.session = self.connection.session() if self.consumers: consumers = self.consumers self.consumers = {} for consumer in consumers.itervalues(): consumer.reconnect(self.session) self._register_consumer(consumer) LOG.debug(_("Re-established AMQP queues")) def ensure(self, error_callback, method, *args, **kwargs): while True: try: return method(*args, **kwargs) except (qpid_exceptions.Empty, qpid_exceptions.ConnectionError), e: if error_callback: error_callback(e) self.reconnect() def close(self): """Close/release this connection""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.connection.close() self.connection = None def reset(self): """Reset a connection so it can be used again""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.session.close() self.session = self.connection.session() self.consumers = {} def declare_consumer(self, consumer_cls, topic, callback): """Create a Consumer using the class that was passed in and add it to our list of consumers """ def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.error(_("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info) def _declare_consumer(): consumer = consumer_cls(self.conf, self.session, topic, callback) self._register_consumer(consumer) return consumer return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): """Return an iterator that will consume from all queues/consumers""" def _error_callback(exc): if isinstance(exc, qpid_exceptions.Empty): LOG.debug(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc)) def _consume(): nxt_receiver = self.session.next_receiver(timeout=timeout) try: self._lookup_consumer(nxt_receiver).consume() except Exception: LOG.exception(_("Error processing message. Skipping it.")) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.ensure(_error_callback, _consume) def cancel_consumer_thread(self): """Cancel a consumer thread""" if self.consumer_thread is not None: self.consumer_thread.kill() try: self.consumer_thread.wait() except greenlet.GreenletExit: pass self.consumer_thread = None def wait_on_proxy_callbacks(self): """Wait for all proxy callback threads to exit.""" for proxy_cb in self.proxy_callbacks: proxy_cb.wait() def publisher_send(self, cls, topic, msg): """Send to a publisher based on the publisher class""" def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception(_("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info) def _publisher_send(): publisher = cls(self.conf, self.session, topic) publisher.send(msg) return self.ensure(_connect_error, _publisher_send) def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ self.declare_consumer(DirectConsumer, topic, callback) def declare_topic_consumer(self, topic, callback=None, queue_name=None, exchange_name=None): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, exchange_name=exchange_name, ), topic, callback) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): """Send a 'topic' message""" # # We want to create a message with attributes, e.g. a TTL. We # don't really need to keep 'msg' in its JSON format any longer # so let's create an actual qpid message here and get some # value-add on the go. # # WARNING: Request timeout happens to be in the same units as # qpid's TTL (seconds). If this changes in the future, then this # will need to be altered accordingly. # qpid_message = qpid_messaging.Message(content=msg, ttl=timeout) self.publisher_send(TopicPublisher, topic, qpid_message) def fanout_send(self, topic, msg): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): """Send a notify message on a topic""" self.publisher_send(NotifyPublisher, topic, msg) def consume(self, limit=None): """Consume from all queues/consumers""" it = self.iterconsume(limit=limit) while True: try: it.next() except StopIteration: return def consume_in_thread(self): """Consumer from all queues/consumers in a greenthread""" def _consumer_thread(): try: self.consume() except greenlet.GreenletExit: return if self.consumer_thread is None: self.consumer_thread = eventlet.spawn(_consumer_thread) return self.consumer_thread def create_consumer(self, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) self.proxy_callbacks.append(proxy_cb) if fanout: consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) else: consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb) self._register_consumer(consumer) return consumer def create_worker(self, topic, proxy, pool_name): """Create a worker that calls a method in a proxy object""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) self.proxy_callbacks.append(proxy_cb) consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb, name=pool_name) self._register_consumer(consumer) return consumer def join_consumer_pool(self, callback, pool_name, topic, exchange_name=None): """Register as a member of a group of consumers for a given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than one is created. """ callback_wrapper = rpc_amqp.CallbackWrapper( conf=self.conf, callback=callback, connection_pool=rpc_amqp.get_connection_pool(self.conf, Connection), ) self.proxy_callbacks.append(callback_wrapper) consumer = TopicConsumer(conf=self.conf, session=self.session, topic=topic, callback=callback_wrapper, name=pool_name, exchange_name=exchange_name) self._register_consumer(consumer) return consumer def create_connection(conf, new=True): """Create a connection""" return rpc_amqp.create_connection( conf, new, rpc_amqp.get_connection_pool(conf, Connection)) def multicall(conf, context, topic, msg, timeout=None): """Make a call that returns multiple times.""" return rpc_amqp.multicall( conf, context, topic, msg, timeout, rpc_amqp.get_connection_pool(conf, Connection)) def call(conf, context, topic, msg, timeout=None): """Sends a message on a topic and wait for a response.""" return rpc_amqp.call( conf, context, topic, msg, timeout, rpc_amqp.get_connection_pool(conf, Connection)) def cast(conf, context, topic, msg): """Sends a message on a topic without waiting for a response.""" return rpc_amqp.cast( conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def fanout_cast(conf, context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" return rpc_amqp.fanout_cast( conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def cast_to_server(conf, context, server_params, topic, msg): """Sends a message on a topic to a specific server.""" return rpc_amqp.cast_to_server( conf, context, server_params, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def fanout_cast_to_server(conf, context, server_params, topic, msg): """Sends a message on a fanout exchange to a specific server.""" return rpc_amqp.fanout_cast_to_server( conf, context, server_params, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def notify(conf, context, topic, msg, envelope): """Sends a notification event on a topic.""" return rpc_amqp.notify(conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection), envelope) def cleanup(): return rpc_amqp.cleanup(Connection.pool) manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/zmq_receiver.py0000775000175000017500000000227712301410454026074 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet eventlet.monkey_patch() import contextlib import sys from oslo.config import cfg from manila.openstack.common import log as logging from manila.openstack.common import rpc from manila.openstack.common.rpc import impl_zmq CONF = cfg.CONF CONF.register_opts(rpc.rpc_opts) CONF.register_opts(impl_zmq.zmq_opts) def main(): CONF(sys.argv[1:], project='oslo') logging.setup("oslo") with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: reactor.consume_in_thread() reactor.wait() manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/amqp.py0000664000175000017500000006133212301410454024331 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Shared code between AMQP based openstack.common.rpc implementations. The code in this module is shared between the rpc implemenations based on AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses AMQP, but is deprecated and predates this code. """ import collections import inspect import sys import uuid from eventlet import greenpool from eventlet import pools from eventlet import queue from eventlet import semaphore # TODO(pekowsk): Remove import cfg and below comment in Havana. # This import should no longer be needed when the amqp_rpc_single_reply_queue # option is removed. from oslo.config import cfg from manila.openstack.common import excutils from manila.openstack.common.gettextutils import _ from manila.openstack.common import local from manila.openstack.common import log as logging from manila.openstack.common.rpc import common as rpc_common # TODO(pekowski): Remove this option in Havana. amqp_opts = [ cfg.BoolOpt('amqp_rpc_single_reply_queue', default=False, help='Enable a fast single reply queue if using AMQP based ' 'RPC like RabbitMQ or Qpid.'), ] cfg.CONF.register_opts(amqp_opts) UNIQUE_ID = '_unique_id' LOG = logging.getLogger(__name__) class Pool(pools.Pool): """Class that implements a Pool of Connections.""" def __init__(self, conf, connection_cls, *args, **kwargs): self.connection_cls = connection_cls self.conf = conf kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size) kwargs.setdefault("order_as_stack", True) super(Pool, self).__init__(*args, **kwargs) self.reply_proxy = None # TODO(comstud): Timeout connections not used in a while def create(self): LOG.debug(_('Pool creating new connection')) return self.connection_cls(self.conf) def empty(self): while self.free_items: self.get().close() # Force a new connection pool to be created. # Note that this was added due to failing unit test cases. The issue # is the above "while loop" gets all the cached connections from the # pool and closes them, but never returns them to the pool, a pool # leak. The unit tests hang waiting for an item to be returned to the # pool. The unit tests get here via the teatDown() method. In the run # time code, it gets here via cleanup() and only appears in service.py # just before doing a sys.exit(), so cleanup() only happens once and # the leakage is not a problem. self.connection_cls.pool = None _pool_create_sem = semaphore.Semaphore() def get_connection_pool(conf, connection_cls): with _pool_create_sem: # Make sure only one thread tries to create the connection pool. if not connection_cls.pool: connection_cls.pool = Pool(conf, connection_cls) return connection_cls.pool class ConnectionContext(rpc_common.Connection): """The class that is actually returned to the caller of create_connection(). This is essentially a wrapper around Connection that supports 'with'. It can also return a new Connection, or one from a pool. The function will also catch when an instance of this class is to be deleted. With that we can return Connections to the pool on exceptions and so forth without making the caller be responsible for catching them. If possible the function makes sure to return a connection to the pool. """ def __init__(self, conf, connection_pool, pooled=True, server_params=None): """Create a new connection, or get one from the pool""" self.connection = None self.conf = conf self.connection_pool = connection_pool if pooled: self.connection = connection_pool.get() else: self.connection = connection_pool.connection_cls( conf, server_params=server_params) self.pooled = pooled def __enter__(self): """When with ConnectionContext() is used, return self""" return self def _done(self): """If the connection came from a pool, clean it up and put it back. If it did not come from a pool, close it. """ if self.connection: if self.pooled: # Reset the connection so it's ready for the next caller # to grab from the pool self.connection.reset() self.connection_pool.put(self.connection) else: try: self.connection.close() except Exception: pass self.connection = None def __exit__(self, exc_type, exc_value, tb): """End of 'with' statement. We're done here.""" self._done() def __del__(self): """Caller is done with this connection. Make sure we cleaned up.""" self._done() def close(self): """Caller is done with this connection.""" self._done() def create_consumer(self, topic, proxy, fanout=False): self.connection.create_consumer(topic, proxy, fanout) def create_worker(self, topic, proxy, pool_name): self.connection.create_worker(topic, proxy, pool_name) def join_consumer_pool(self, callback, pool_name, topic, exchange_name): self.connection.join_consumer_pool(callback, pool_name, topic, exchange_name) def consume_in_thread(self): self.connection.consume_in_thread() def __getattr__(self, key): """Proxy all other calls to the Connection instance""" if self.connection: return getattr(self.connection, key) else: raise rpc_common.InvalidRPCConnectionReuse() class ReplyProxy(ConnectionContext): """ Connection class for RPC replies / callbacks """ def __init__(self, conf, connection_pool): self._call_waiters = {} self._num_call_waiters = 0 self._num_call_waiters_wrn_threshhold = 10 self._reply_q = 'reply_' + uuid.uuid4().hex super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) self.declare_direct_consumer(self._reply_q, self._process_data) self.consume_in_thread() def _process_data(self, message_data): msg_id = message_data.pop('_msg_id', None) waiter = self._call_waiters.get(msg_id) if not waiter: LOG.warn(_('no calling threads waiting for msg_id : %s' ', message : %s') % (msg_id, message_data)) else: waiter.put(message_data) def add_call_waiter(self, waiter, msg_id): self._num_call_waiters += 1 if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: LOG.warn(_('Number of call waiters is greater than warning ' 'threshhold: %d. There could be a MulticallProxyWaiter ' 'leak.') % self._num_call_waiters_wrn_threshhold) self._num_call_waiters_wrn_threshhold *= 2 self._call_waiters[msg_id] = waiter def del_call_waiter(self, msg_id): self._num_call_waiters -= 1 del self._call_waiters[msg_id] def get_reply_q(self): return self._reply_q def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, failure=None, ending=False, log_failure=True): """Sends a reply or an error on the channel signified by msg_id. Failure should be a sys.exc_info() tuple. """ with ConnectionContext(conf, connection_pool) as conn: if failure: failure = rpc_common.serialize_remote_exception(failure, log_failure) try: msg = {'result': reply, 'failure': failure} except TypeError: msg = {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), 'failure': failure} if ending: msg['ending'] = True _add_unique_id(msg) # If a reply_q exists, add the msg_id to the reply and pass the # reply_q to direct_send() to use it as the response queue. # Otherwise use the msg_id for backward compatibilty. if reply_q: msg['_msg_id'] = msg_id conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) else: conn.direct_send(msg_id, rpc_common.serialize_msg(msg)) class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call""" def __init__(self, **kwargs): self.msg_id = kwargs.pop('msg_id', None) self.reply_q = kwargs.pop('reply_q', None) self.conf = kwargs.pop('conf') super(RpcContext, self).__init__(**kwargs) def deepcopy(self): values = self.to_dict() values['conf'] = self.conf values['msg_id'] = self.msg_id values['reply_q'] = self.reply_q return self.__class__(**values) def reply(self, reply=None, failure=None, ending=False, connection_pool=None, log_failure=True): if self.msg_id: msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool, reply, failure, ending, log_failure) if ending: self.msg_id = None def unpack_context(conf, msg): """Unpack context from msg.""" context_dict = {} for key in list(msg.keys()): # NOTE(vish): Some versions of python don't like unicode keys # in kwargs. key = str(key) if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value context_dict['msg_id'] = msg.pop('_msg_id', None) context_dict['reply_q'] = msg.pop('_reply_q', None) context_dict['conf'] = conf ctx = RpcContext.from_dict(context_dict) rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) return ctx def pack_context(msg, context): """Pack context into msg. Values for message keys need to be less than 255 chars, so we pull context out into a bunch of separate keys. If we want to support more arguments in rabbit messages, we may want to do the same for args at some point. """ context_d = dict([('_context_%s' % key, value) for (key, value) in context.to_dict().iteritems()]) msg.update(context_d) class _MsgIdCache(object): """This class checks any duplicate messages.""" # NOTE: This value is considered can be a configuration item, but # it is not necessary to change its value in most cases, # so let this value as static for now. DUP_MSG_CHECK_SIZE = 16 def __init__(self, **kwargs): self.prev_msgids = collections.deque([], maxlen=self.DUP_MSG_CHECK_SIZE) def check_duplicate_message(self, message_data): """AMQP consumers may read same message twice when exceptions occur before ack is returned. This method prevents doing it. """ if UNIQUE_ID in message_data: msg_id = message_data[UNIQUE_ID] if msg_id not in self.prev_msgids: self.prev_msgids.append(msg_id) else: raise rpc_common.DuplicateMessageError(msg_id=msg_id) def _add_unique_id(msg): """Add unique_id for checking duplicate messages.""" unique_id = uuid.uuid4().hex msg.update({UNIQUE_ID: unique_id}) LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) class _ThreadPoolWithWait(object): """Base class for a delayed invocation manager used by the Connection class to start up green threads to handle incoming messages. """ def __init__(self, conf, connection_pool): self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size) self.connection_pool = connection_pool self.conf = conf def wait(self): """Wait for all callback threads to exit.""" self.pool.waitall() class CallbackWrapper(_ThreadPoolWithWait): """Wraps a straight callback to allow it to be invoked in a green thread. """ def __init__(self, conf, callback, connection_pool): """ :param conf: cfg.CONF instance :param callback: a callable (probably a function) :param connection_pool: connection pool as returned by get_connection_pool() """ super(CallbackWrapper, self).__init__( conf=conf, connection_pool=connection_pool, ) self.callback = callback def __call__(self, message_data): self.pool.spawn_n(self.callback, message_data) class ProxyCallback(_ThreadPoolWithWait): """Calls methods on a proxy object based on method and args.""" def __init__(self, conf, proxy, connection_pool): super(ProxyCallback, self).__init__( conf=conf, connection_pool=connection_pool, ) self.proxy = proxy self.msg_id_cache = _MsgIdCache() def __call__(self, message_data): """Consumer callback to call a method on a proxy object. Parses the message for validity and fires off a thread to call the proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} """ # It is important to clear the context here, because at this point # the previous context is stored in local.store.context if hasattr(local.store, 'context'): del local.store.context rpc_common._safe_log(LOG.debug, _('received %s'), message_data) self.msg_id_cache.check_duplicate_message(message_data) ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') args = message_data.get('args', {}) version = message_data.get('version') namespace = message_data.get('namespace') if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data, connection_pool=self.connection_pool) return self.pool.spawn_n(self._process_data, ctxt, version, method, namespace, args) def _process_data(self, ctxt, version, method, namespace, args): """Process a message in a new thread. If the proxy object we have has a dispatch method (see rpc.dispatcher.RpcDispatcher), pass it the version, method, and args and let it dispatch as appropriate. If not, use the old behavior of magically calling the specified method on the proxy we have here. """ ctxt.update_store() try: rval = self.proxy.dispatch(ctxt, version, method, namespace, **args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: ctxt.reply(x, None, connection_pool=self.connection_pool) else: ctxt.reply(rval, None, connection_pool=self.connection_pool) # This final None tells multicall that it is done. ctxt.reply(ending=True, connection_pool=self.connection_pool) except rpc_common.ClientException as e: LOG.debug(_('Expected exception during message handling (%s)') % e._exc_info[1]) ctxt.reply(None, e._exc_info, connection_pool=self.connection_pool, log_failure=False) except Exception: # sys.exc_info() is deleted by LOG.exception(). exc_info = sys.exc_info() LOG.error(_('Exception during message handling'), exc_info=exc_info) ctxt.reply(None, exc_info, connection_pool=self.connection_pool) class MulticallProxyWaiter(object): def __init__(self, conf, msg_id, timeout, connection_pool): self._msg_id = msg_id self._timeout = timeout or conf.rpc_response_timeout self._reply_proxy = connection_pool.reply_proxy self._done = False self._got_ending = False self._conf = conf self._dataqueue = queue.LightQueue() # Add this caller to the reply proxy's call_waiters self._reply_proxy.add_call_waiter(self, self._msg_id) self.msg_id_cache = _MsgIdCache() def put(self, data): self._dataqueue.put(data) def done(self): if self._done: return self._done = True # Remove this caller from reply proxy's call_waiters self._reply_proxy.del_call_waiter(self._msg_id) def _process_data(self, data): result = None self.msg_id_cache.check_duplicate_message(data) if data['failure']: failure = data['failure'] result = rpc_common.deserialize_remote_exception(self._conf, failure) elif data.get('ending', False): self._got_ending = True else: result = data['result'] return result def __iter__(self): """Return a result until we get a reply with an 'ending" flag""" if self._done: raise StopIteration while True: try: data = self._dataqueue.get(timeout=self._timeout) result = self._process_data(data) except queue.Empty: self.done() raise rpc_common.Timeout() except Exception: with excutils.save_and_reraise_exception(): self.done() if self._got_ending: self.done() raise StopIteration if isinstance(result, Exception): self.done() raise result yield result #TODO(pekowski): Remove MulticallWaiter() in Havana. class MulticallWaiter(object): def __init__(self, conf, connection, timeout): self._connection = connection self._iterator = connection.iterconsume(timeout=timeout or conf.rpc_response_timeout) self._result = None self._done = False self._got_ending = False self._conf = conf self.msg_id_cache = _MsgIdCache() def done(self): if self._done: return self._done = True self._iterator.close() self._iterator = None self._connection.close() def __call__(self, data): """The consume() callback will call this. Store the result.""" self.msg_id_cache.check_duplicate_message(data) if data['failure']: failure = data['failure'] self._result = rpc_common.deserialize_remote_exception(self._conf, failure) elif data.get('ending', False): self._got_ending = True else: self._result = data['result'] def __iter__(self): """Return a result until we get a 'None' response from consumer""" if self._done: raise StopIteration while True: try: self._iterator.next() except Exception: with excutils.save_and_reraise_exception(): self.done() if self._got_ending: self.done() raise StopIteration result = self._result if isinstance(result, Exception): self.done() raise result yield result def create_connection(conf, new, connection_pool): """Create a connection""" return ConnectionContext(conf, connection_pool, pooled=not new) _reply_proxy_create_sem = semaphore.Semaphore() def multicall(conf, context, topic, msg, timeout, connection_pool): """Make a call that returns multiple times.""" # TODO(pekowski): Remove all these comments in Havana. # For amqp_rpc_single_reply_queue = False, # Can't use 'with' for multicall, as it returns an iterator # that will continue to use the connection. When it's done, # connection.close() will get called which will put it back into # the pool # For amqp_rpc_single_reply_queue = True, # The 'with' statement is mandatory for closing the connection LOG.debug(_('Making synchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _add_unique_id(msg) pack_context(msg, context) # TODO(pekowski): Remove this flag and the code under the if clause # in Havana. if not conf.amqp_rpc_single_reply_queue: conn = ConnectionContext(conf, connection_pool) wait_msg = MulticallWaiter(conf, conn, timeout) conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) else: with _reply_proxy_create_sem: if not connection_pool.reply_proxy: connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) return wait_msg def call(conf, context, topic, msg, timeout, connection_pool): """Sends a message on a topic and wait for a response.""" rv = multicall(conf, context, topic, msg, timeout, connection_pool) # NOTE(vish): return the last result from the multicall rv = list(rv) if not rv: return return rv[-1] def cast(conf, context, topic, msg, connection_pool): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg)) def fanout_cast(conf, context, topic, msg, connection_pool): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.fanout_send(topic, rpc_common.serialize_msg(msg)) def cast_to_server(conf, context, server_params, topic, msg, connection_pool): """Sends a message on a topic to a specific server.""" _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool, pooled=False, server_params=server_params) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg)) def fanout_cast_to_server(conf, context, server_params, topic, msg, connection_pool): """Sends a message on a fanout exchange to a specific server.""" _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool, pooled=False, server_params=server_params) as conn: conn.fanout_send(topic, rpc_common.serialize_msg(msg)) def notify(conf, context, topic, msg, connection_pool, envelope): """Sends a notification event on a topic.""" LOG.debug(_('Sending %(event_type)s on %(topic)s'), dict(event_type=msg.get('event_type'), topic=topic)) _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: if envelope: msg = rpc_common.serialize_msg(msg) conn.notify_send(topic, msg) def cleanup(connection_pool): if connection_pool: connection_pool.empty() def get_control_exchange(conf): return conf.control_exchange manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/__init__.py0000664000175000017500000002671212301410454025135 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A remote procedure call (rpc) abstraction. For some wrappers that add message versioning to rpc, see: rpc.dispatcher rpc.proxy """ import inspect import logging from oslo.config import cfg from manila.openstack.common.gettextutils import _ from manila.openstack.common import importutils from manila.openstack.common import local LOG = logging.getLogger(__name__) rpc_opts = [ cfg.StrOpt('rpc_backend', default='%s.impl_kombu' % __package__, help="The messaging module to use, defaults to kombu."), cfg.IntOpt('rpc_thread_pool_size', default=64, help='Size of RPC thread pool'), cfg.IntOpt('rpc_conn_pool_size', default=30, help='Size of RPC connection pool'), cfg.IntOpt('rpc_response_timeout', default=60, help='Seconds to wait for a response from call or multicall'), cfg.IntOpt('rpc_cast_timeout', default=30, help='Seconds to wait before a cast expires (TTL). ' 'Only supported by impl_zmq.'), cfg.ListOpt('allowed_rpc_exception_modules', default=['manila.openstack.common.exception', 'nova.exception', 'manila.exception', 'exceptions', ], help='Modules of exceptions that are permitted to be recreated' 'upon receiving exception data from an rpc call.'), cfg.BoolOpt('fake_rabbit', default=False, help='If passed, use a fake RabbitMQ provider'), cfg.StrOpt('control_exchange', default='openstack', help='AMQP exchange to connect to if using RabbitMQ or Qpid'), ] CONF = cfg.CONF CONF.register_opts(rpc_opts) def set_defaults(control_exchange): cfg.set_defaults(rpc_opts, control_exchange=control_exchange) def create_connection(new=True): """Create a connection to the message bus used for rpc. For some example usage of creating a connection and some consumers on that connection, see nova.service. :param new: Whether or not to create a new connection. A new connection will be created by default. If new is False, the implementation is free to return an existing connection from a pool. :returns: An instance of openstack.common.rpc.common.Connection """ return _get_impl().create_connection(CONF, new=new) def _check_for_lock(): if not CONF.debug: return None if ((hasattr(local.strong_store, 'locks_held') and local.strong_store.locks_held)): stack = ' :: '.join([frame[3] for frame in inspect.stack()]) LOG.warn(_('A RPC is being made while holding a lock. The locks ' 'currently held are %(locks)s. This is probably a bug. ' 'Please report it. Include the following: [%(stack)s].'), {'locks': local.strong_store.locks_held, 'stack': stack}) return True return False def call(context, topic, msg, timeout=None, check_for_lock=False): """Invoke a remote method that returns something. :param context: Information that identifies the user that has made this request. :param topic: The topic to send the rpc message to. This correlates to the topic argument of openstack.common.rpc.common.Connection.create_consumer() and only applies when the consumer was created with fanout=False. :param msg: This is a dict in the form { "method" : "method_to_invoke", "args" : dict_of_kwargs } :param timeout: int, number of seconds to use for a response timeout. If set, this overrides the rpc_response_timeout option. :param check_for_lock: if True, a warning is emitted if a RPC call is made with a lock held. :returns: A dict from the remote method. :raises: openstack.common.rpc.common.Timeout if a complete response is not received before the timeout is reached. """ if check_for_lock: _check_for_lock() return _get_impl().call(CONF, context, topic, msg, timeout) def cast(context, topic, msg): """Invoke a remote method that does not return anything. :param context: Information that identifies the user that has made this request. :param topic: The topic to send the rpc message to. This correlates to the topic argument of openstack.common.rpc.common.Connection.create_consumer() and only applies when the consumer was created with fanout=False. :param msg: This is a dict in the form { "method" : "method_to_invoke", "args" : dict_of_kwargs } :returns: None """ return _get_impl().cast(CONF, context, topic, msg) def fanout_cast(context, topic, msg): """Broadcast a remote method invocation with no return. This method will get invoked on all consumers that were set up with this topic name and fanout=True. :param context: Information that identifies the user that has made this request. :param topic: The topic to send the rpc message to. This correlates to the topic argument of openstack.common.rpc.common.Connection.create_consumer() and only applies when the consumer was created with fanout=True. :param msg: This is a dict in the form { "method" : "method_to_invoke", "args" : dict_of_kwargs } :returns: None """ return _get_impl().fanout_cast(CONF, context, topic, msg) def multicall(context, topic, msg, timeout=None, check_for_lock=False): """Invoke a remote method and get back an iterator. In this case, the remote method will be returning multiple values in separate messages, so the return values can be processed as the come in via an iterator. :param context: Information that identifies the user that has made this request. :param topic: The topic to send the rpc message to. This correlates to the topic argument of openstack.common.rpc.common.Connection.create_consumer() and only applies when the consumer was created with fanout=False. :param msg: This is a dict in the form { "method" : "method_to_invoke", "args" : dict_of_kwargs } :param timeout: int, number of seconds to use for a response timeout. If set, this overrides the rpc_response_timeout option. :param check_for_lock: if True, a warning is emitted if a RPC call is made with a lock held. :returns: An iterator. The iterator will yield a tuple (N, X) where N is an index that starts at 0 and increases by one for each value returned and X is the Nth value that was returned by the remote method. :raises: openstack.common.rpc.common.Timeout if a complete response is not received before the timeout is reached. """ if check_for_lock: _check_for_lock() return _get_impl().multicall(CONF, context, topic, msg, timeout) def notify(context, topic, msg, envelope=False): """Send notification event. :param context: Information that identifies the user that has made this request. :param topic: The topic to send the notification to. :param msg: This is a dict of content of event. :param envelope: Set to True to enable message envelope for notifications. :returns: None """ return _get_impl().notify(cfg.CONF, context, topic, msg, envelope) def cleanup(): """Clean up resoruces in use by implementation. Clean up any resources that have been allocated by the RPC implementation. This is typically open connections to a messaging service. This function would get called before an application using this API exits to allow connections to get torn down cleanly. :returns: None """ return _get_impl().cleanup() def cast_to_server(context, server_params, topic, msg): """Invoke a remote method that does not return anything. :param context: Information that identifies the user that has made this request. :param server_params: Connection information :param topic: The topic to send the notification to. :param msg: This is a dict in the form { "method" : "method_to_invoke", "args" : dict_of_kwargs } :returns: None """ return _get_impl().cast_to_server(CONF, context, server_params, topic, msg) def fanout_cast_to_server(context, server_params, topic, msg): """Broadcast to a remote method invocation with no return. :param context: Information that identifies the user that has made this request. :param server_params: Connection information :param topic: The topic to send the notification to. :param msg: This is a dict in the form { "method" : "method_to_invoke", "args" : dict_of_kwargs } :returns: None """ return _get_impl().fanout_cast_to_server(CONF, context, server_params, topic, msg) def queue_get_for(context, topic, host): """Get a queue name for a given topic + host. This function only works if this naming convention is followed on the consumer side, as well. For example, in nova, every instance of the nova-foo service calls create_consumer() for two topics: foo foo. Messages sent to the 'foo' topic are distributed to exactly one instance of the nova-foo service. The services are chosen in a round-robin fashion. Messages sent to the 'foo.' topic are sent to the nova-foo service on . """ return '%s.%s' % (topic, host) if host else topic _RPCIMPL = None def _get_impl(): """Delay import of rpc_backend until configuration is loaded.""" global _RPCIMPL if _RPCIMPL is None: try: _RPCIMPL = importutils.import_module(CONF.rpc_backend) except ImportError: # For backwards compatibility with older nova config. impl = CONF.rpc_backend.replace('nova.rpc', 'nova.openstack.common.rpc') _RPCIMPL = importutils.import_module(impl) return _RPCIMPL manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/common.py0000664000175000017500000004437212301410454024670 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import sys import traceback from oslo.config import cfg from manila.openstack.common.gettextutils import _ from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila.openstack.common import local from manila.openstack.common import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) '''RPC Envelope Version. This version number applies to the top level structure of messages sent out. It does *not* apply to the message payload, which must be versioned independently. For example, when using rpc APIs, a version number is applied for changes to the API being exposed over rpc. This version number is handled in the rpc proxy and dispatcher modules. This version number applies to the message envelope that is used in the serialization done inside the rpc layer. See serialize_msg() and deserialize_msg(). The current message format (version 2.0) is very simple. It is: { 'oslo.version': , 'oslo.message': } Message format version '1.0' is just considered to be the messages we sent without a message envelope. So, the current message envelope just includes the envelope version. It may eventually contain additional information, such as a signature for the message payload. We will JSON encode the application message payload. The message envelope, which includes the JSON encoded application message body, will be passed down to the messaging libraries as a dict. ''' _RPC_ENVELOPE_VERSION = '2.0' _VERSION_KEY = 'oslo.version' _MESSAGE_KEY = 'oslo.message' class RPCException(Exception): message = _("An unknown RPC related exception occurred.") def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: message = self.message % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) # at least get the core message out if something happened message = self.message super(RPCException, self).__init__(message) class RemoteError(RPCException): """Signifies that a remote class has raised an exception. Contains a string representation of the type of the original exception, the value of the original exception, and the traceback. These are sent to the parent as a joined string so printing the exception contains all of the relevant info. """ message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") def __init__(self, exc_type=None, value=None, traceback=None): self.exc_type = exc_type self.value = value self.traceback = traceback super(RemoteError, self).__init__(exc_type=exc_type, value=value, traceback=traceback) class Timeout(RPCException): """Signifies that a timeout has occurred. This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ message = _('Timeout while waiting on RPC response - ' 'topic: "%(topic)s", RPC method: "%(method)s" ' 'info: "%(info)s"') def __init__(self, info=None, topic=None, method=None): """ :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or _(''), topic=topic or _(''), method=method or _('')) class DuplicateMessageError(RPCException): message = _("Found duplicate message(%(msg_id)s). Skipping it.") class InvalidRPCConnectionReuse(RPCException): message = _("Invalid reuse of an RPC connection.") class UnsupportedRpcVersion(RPCException): message = _("Specified RPC version, %(version)s, not supported by " "this endpoint.") class UnsupportedRpcEnvelopeVersion(RPCException): message = _("Specified RPC envelope version, %(version)s, " "not supported by this endpoint.") class Connection(object): """A connection, returned by rpc.create_connection(). This class represents a connection to the message bus used for rpc. An instance of this class should never be created by users of the rpc API. Use rpc.create_connection() instead. """ def close(self): """Close the connection. This method must be called when the connection will no longer be used. It will ensure that any resources associated with the connection, such as a network connection, and cleaned up. """ raise NotImplementedError() def create_consumer(self, topic, proxy, fanout=False): """Create a consumer on this connection. A consumer is associated with a message queue on the backend message bus. The consumer will read messages from the queue, unpack them, and dispatch them to the proxy object. The contents of the message pulled off of the queue will determine which method gets called on the proxy object. :param topic: This is a name associated with what to consume from. Multiple instances of a service may consume from the same topic. For example, all instances of nova-compute consume from a queue called "compute". In that case, the messages will get distributed amongst the consumers in a round-robin fashion if fanout=False. If fanout=True, every consumer associated with this topic will get a copy of every message. :param proxy: The object that will handle all incoming messages. :param fanout: Whether or not this is a fanout topic. See the documentation for the topic parameter for some additional comments on this. """ raise NotImplementedError() def create_worker(self, topic, proxy, pool_name): """Create a worker on this connection. A worker is like a regular consumer of messages directed to a topic, except that it is part of a set of such consumers (the "pool") which may run in parallel. Every pool of workers will receive a given message, but only one worker in the pool will be asked to process it. Load is distributed across the members of the pool in round-robin fashion. :param topic: This is a name associated with what to consume from. Multiple instances of a service may consume from the same topic. :param proxy: The object that will handle all incoming messages. :param pool_name: String containing the name of the pool of workers """ raise NotImplementedError() def join_consumer_pool(self, callback, pool_name, topic, exchange_name): """Register as a member of a group of consumers for a given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than one is created. :param callback: Callable to be invoked for each message. :type callback: callable accepting one argument :param pool_name: The name of the consumer pool. :type pool_name: str :param topic: The routing topic for desired messages. :type topic: str :param exchange_name: The name of the message exchange where the client should attach. Defaults to the configured exchange. :type exchange_name: str """ raise NotImplementedError() def consume_in_thread(self): """Spawn a thread to handle incoming messages. Spawn a thread that will be responsible for handling all incoming messages for consumers that were set up on this connection. Message dispatching inside of this is expected to be implemented in a non-blocking manner. An example implementation would be having this thread pull messages in for all of the consumers, but utilize a thread pool for dispatching the messages to the proxy objects. """ raise NotImplementedError() def _safe_log(log_func, msg, msg_data): """Sanitizes the msg_data field before logging.""" SANITIZE = {'set_admin_password': [('args', 'new_pass')], 'run_instance': [('args', 'admin_password')], 'route_message': [('args', 'message', 'args', 'method_info', 'method_kwargs', 'password'), ('args', 'message', 'args', 'method_info', 'method_kwargs', 'admin_password')]} has_method = 'method' in msg_data and msg_data['method'] in SANITIZE has_context_token = '_context_auth_token' in msg_data has_token = 'auth_token' in msg_data if not any([has_method, has_context_token, has_token]): return log_func(msg, msg_data) msg_data = copy.deepcopy(msg_data) if has_method: for arg in SANITIZE.get(msg_data['method'], []): try: d = msg_data for elem in arg[:-1]: d = d[elem] d[arg[-1]] = '' except KeyError, e: LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'), {'item': arg, 'err': e}) if has_context_token: msg_data['_context_auth_token'] = '' if has_token: msg_data['auth_token'] = '' return log_func(msg, msg_data) def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), unicode(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs data = { 'class': str(failure.__class__.__name__), 'module': str(failure.__class__.__module__), 'message': unicode(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data def deserialize_remote_exception(conf, data): failure = jsonutils.loads(str(data)) trace = failure.get('tb', []) message = failure.get('message', "") + "\n" + "\n".join(trace) name = failure.get('class') module = failure.get('module') # NOTE(ameade): We DO NOT want to allow just any module to be imported, in # order to prevent arbitrary code execution. if module not in conf.allowed_rpc_exception_modules: return RemoteError(name, failure.get('message'), trace) try: mod = importutils.import_module(module) klass = getattr(mod, name) if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError): return RemoteError(name, failure.get('message'), trace) ex_type = type(failure) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), {'__str__': str_override, '__unicode__': str_override}) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined # Exceptions and not core python exceptions. This is important because # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type except TypeError: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message,) + failure.args[1:] return failure class CommonRpcContext(object): def __init__(self, **kwargs): self.values = kwargs def __getattr__(self, key): try: return self.values[key] except KeyError: raise AttributeError(key) def to_dict(self): return copy.deepcopy(self.values) @classmethod def from_dict(cls, values): return cls(**values) def deepcopy(self): return self.from_dict(self.to_dict()) def update_store(self): local.store.context = self def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" # TODO(russellb) This method is a bit of a nova-ism. It makes # some assumptions about the data in the request context sent # across rpc, while the rest of this class does not. We could get # rid of this if we changed the nova code that uses this to # convert the RpcContext back to its native RequestContext doing # something like nova.context.RequestContext.from_dict(ctxt.to_dict()) context = self.deepcopy() context.values['is_admin'] = True context.values.setdefault('roles', []) if 'admin' not in context.values['roles']: context.values['roles'].append('admin') if read_deleted is not None: context.values['read_deleted'] = read_deleted return context class ClientException(Exception): """This encapsulates some actual exception that is expected to be hit by an RPC proxy object. Merely instantiating it records the current exception information, which will be passed back to the RPC client without exceptional logging.""" def __init__(self): self._exc_info = sys.exc_info() def catch_client_exception(exceptions, func, *args, **kwargs): try: return func(*args, **kwargs) except Exception, e: if type(e) in exceptions: raise ClientException() else: raise def client_exceptions(*exceptions): """Decorator for manager methods that raise expected exceptions. Marking a Manager method with this decorator allows the declaration of expected exceptions that the RPC layer should not consider fatal, and not log as if they were generated in a real error scenario. Note that this will cause listed exceptions to be wrapped in a ClientException, which is used internally by the RPC layer.""" def outer(func): def inner(*args, **kwargs): return catch_client_exception(exceptions, func, *args, **kwargs) return inner return outer def version_is_compatible(imp_version, version): """Determine whether versions are compatible. :param imp_version: The version implemented :param version: The version requested by an incoming message. """ version_parts = version.split('.') imp_version_parts = imp_version.split('.') if int(version_parts[0]) != int(imp_version_parts[0]): # Major return False if int(version_parts[1]) > int(imp_version_parts[1]): # Minor return False return True def serialize_msg(raw_msg): # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, _MESSAGE_KEY: jsonutils.dumps(raw_msg)} return msg def deserialize_msg(msg): # NOTE(russellb): Hang on to your hats, this road is about to # get a little bumpy. # # Robustness Principle: # "Be strict in what you send, liberal in what you accept." # # At this point we have to do a bit of guessing about what it # is we just received. Here is the set of possibilities: # # 1) We received a dict. This could be 2 things: # # a) Inspect it to see if it looks like a standard message envelope. # If so, great! # # b) If it doesn't look like a standard message envelope, it could either # be a notification, or a message from before we added a message # envelope (referred to as version 1.0). # Just return the message as-is. # # 2) It's any other non-dict type. Just return it and hope for the best. # This case covers return values from rpc.call() from before message # envelopes were used. (messages to call a method were always a dict) if not isinstance(msg, dict): # See #2 above. return msg base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) if not all(map(lambda key: key in msg, base_envelope_keys)): # See #1.b above. return msg # At this point we think we have the message envelope # format we were expecting. (#1.a above) if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]): raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) return raw_msg manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/impl_kombu.py0000664000175000017500000007623212301410454025536 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import itertools import socket import ssl import sys import time import uuid import eventlet import greenlet import kombu import kombu.connection import kombu.entity import kombu.messaging from oslo.config import cfg from manila.openstack.common.gettextutils import _ from manila.openstack.common import network_utils from manila.openstack.common.rpc import amqp as rpc_amqp from manila.openstack.common.rpc import common as rpc_common kombu_opts = [ cfg.StrOpt('kombu_ssl_version', default='', help='SSL version to use (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_keyfile', default='', help='SSL key file (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_certfile', default='', help='SSL cert file (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_ca_certs', default='', help=('SSL certification authority file ' '(valid only if SSL enabled)')), cfg.StrOpt('rabbit_host', default='localhost', help='The RabbitMQ broker address where a single node is used'), cfg.IntOpt('rabbit_port', default=5672, help='The RabbitMQ broker port where a single node is used'), cfg.ListOpt('rabbit_hosts', default=['$rabbit_host:$rabbit_port'], help='RabbitMQ HA cluster host:port pairs'), cfg.BoolOpt('rabbit_use_ssl', default=False, help='connect over SSL for RabbitMQ'), cfg.StrOpt('rabbit_userid', default='guest', help='the RabbitMQ userid'), cfg.StrOpt('rabbit_password', default='guest', help='the RabbitMQ password', secret=True), cfg.StrOpt('rabbit_virtual_host', default='/', help='the RabbitMQ virtual host'), cfg.IntOpt('rabbit_retry_interval', default=1, help='how frequently to retry connecting with RabbitMQ'), cfg.IntOpt('rabbit_retry_backoff', default=2, help='how long to backoff for between retries when connecting ' 'to RabbitMQ'), cfg.IntOpt('rabbit_max_retries', default=0, help='maximum retries with trying to connect to RabbitMQ ' '(the default of 0 implies an infinite retry count)'), cfg.BoolOpt('rabbit_durable_queues', default=False, help='use durable queues in RabbitMQ'), cfg.BoolOpt('rabbit_ha_queues', default=False, help='use H/A queues in RabbitMQ (x-ha-policy: all).' 'You need to wipe RabbitMQ database when ' 'changing this option.'), ] cfg.CONF.register_opts(kombu_opts) LOG = rpc_common.LOG def _get_queue_arguments(conf): """Construct the arguments for declaring a queue. If the rabbit_ha_queues option is set, we declare a mirrored queue as described here: http://www.rabbitmq.com/ha.html Setting x-ha-policy to all means that the queue will be mirrored to all nodes in the cluster. """ return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {} class ConsumerBase(object): """Consumer base class.""" def __init__(self, channel, callback, tag, **kwargs): """Declare a queue on an amqp channel. 'channel' is the amqp channel to use 'callback' is the callback to call when messages are received 'tag' is a unique ID for the consumer on the channel queue name, exchange name, and other kombu options are passed in here as a dictionary. """ self.callback = callback self.tag = str(tag) self.kwargs = kwargs self.queue = None self.reconnect(channel) def reconnect(self, channel): """Re-declare the queue after a rabbit reconnect""" self.channel = channel self.kwargs['channel'] = channel self.queue = kombu.entity.Queue(**self.kwargs) self.queue.declare() def consume(self, *args, **kwargs): """Actually declare the consumer on the amqp channel. This will start the flow of messages from the queue. Using the Connection.iterconsume() iterator will process the messages, calling the appropriate callback. If a callback is specified in kwargs, use that. Otherwise, use the callback passed during __init__() If kwargs['nowait'] is True, then this call will block until a message is read. Messages will automatically be acked if the callback doesn't raise an exception """ options = {'consumer_tag': self.tag} options['nowait'] = kwargs.get('nowait', False) callback = kwargs.get('callback', self.callback) if not callback: raise ValueError("No callback defined") def _callback(raw_message): message = self.channel.message_to_python(raw_message) try: msg = rpc_common.deserialize_msg(message.payload) callback(msg) except Exception: LOG.exception(_("Failed to process message... skipping it.")) finally: message.ack() self.queue.consume(*args, callback=_callback, **options) def cancel(self): """Cancel the consuming from the queue, if it has started""" try: self.queue.cancel(self.tag) except KeyError, e: # NOTE(comstud): Kludge to get around a amqplib bug if str(e) != "u'%s'" % self.tag: raise self.queue = None class DirectConsumer(ConsumerBase): """Queue/consumer class for 'direct'""" def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): """Init a 'direct' queue. 'channel' is the amqp channel to use 'msg_id' is the msg_id to listen on 'callback' is the callback to call when messages are received 'tag' is a unique ID for the consumer on the channel Other kombu options may be passed """ # Default options options = {'durable': False, 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': True, 'exclusive': False} options.update(kwargs) exchange = kombu.entity.Exchange(name=msg_id, type='direct', durable=options['durable'], auto_delete=options['auto_delete']) super(DirectConsumer, self).__init__(channel, callback, tag, name=msg_id, exchange=exchange, routing_key=msg_id, **options) class TopicConsumer(ConsumerBase): """Consumer class for 'topic'""" def __init__(self, conf, channel, topic, callback, tag, name=None, exchange_name=None, **kwargs): """Init a 'topic' queue. :param channel: the amqp channel to use :param topic: the topic to listen on :paramtype topic: str :param callback: the callback to call when messages are received :param tag: a unique ID for the consumer on the channel :param name: optional queue name, defaults to topic :paramtype name: str Other kombu options may be passed as keyword arguments """ # Default options options = {'durable': conf.rabbit_durable_queues, 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': False, 'exclusive': False} options.update(kwargs) exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) exchange = kombu.entity.Exchange(name=exchange_name, type='topic', durable=options['durable'], auto_delete=options['auto_delete']) super(TopicConsumer, self).__init__(channel, callback, tag, name=name or topic, exchange=exchange, routing_key=topic, **options) class FanoutConsumer(ConsumerBase): """Consumer class for 'fanout'""" def __init__(self, conf, channel, topic, callback, tag, **kwargs): """Init a 'fanout' queue. 'channel' is the amqp channel to use 'topic' is the topic to listen on 'callback' is the callback to call when messages are received 'tag' is a unique ID for the consumer on the channel Other kombu options may be passed """ unique = uuid.uuid4().hex exchange_name = '%s_fanout' % topic queue_name = '%s_fanout_%s' % (topic, unique) # Default options options = {'durable': False, 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': True, 'exclusive': False} options.update(kwargs) exchange = kombu.entity.Exchange(name=exchange_name, type='fanout', durable=options['durable'], auto_delete=options['auto_delete']) super(FanoutConsumer, self).__init__(channel, callback, tag, name=queue_name, exchange=exchange, routing_key=topic, **options) class Publisher(object): """Base Publisher class""" def __init__(self, channel, exchange_name, routing_key, **kwargs): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.exchange_name = exchange_name self.routing_key = routing_key self.kwargs = kwargs self.reconnect(channel) def reconnect(self, channel): """Re-establish the Producer after a rabbit reconnection""" self.exchange = kombu.entity.Exchange(name=self.exchange_name, **self.kwargs) self.producer = kombu.messaging.Producer(exchange=self.exchange, channel=channel, routing_key=self.routing_key) def send(self, msg, timeout=None): """Send a message""" if timeout: # # AMQP TTL is in milliseconds when set in the header. # self.producer.publish(msg, headers={'ttl': (timeout * 1000)}) else: self.producer.publish(msg) class DirectPublisher(Publisher): """Publisher class for 'direct'""" def __init__(self, conf, channel, msg_id, **kwargs): """init a 'direct' publisher. Kombu options may be passed as keyword args to override defaults """ options = {'durable': False, 'auto_delete': True, 'exclusive': False} options.update(kwargs) super(DirectPublisher, self).__init__(channel, msg_id, msg_id, type='direct', **options) class TopicPublisher(Publisher): """Publisher class for 'topic'""" def __init__(self, conf, channel, topic, **kwargs): """init a 'topic' publisher. Kombu options may be passed as keyword args to override defaults """ options = {'durable': conf.rabbit_durable_queues, 'auto_delete': False, 'exclusive': False} options.update(kwargs) exchange_name = rpc_amqp.get_control_exchange(conf) super(TopicPublisher, self).__init__(channel, exchange_name, topic, type='topic', **options) class FanoutPublisher(Publisher): """Publisher class for 'fanout'""" def __init__(self, conf, channel, topic, **kwargs): """init a 'fanout' publisher. Kombu options may be passed as keyword args to override defaults """ options = {'durable': False, 'auto_delete': True, 'exclusive': False} options.update(kwargs) super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic, None, type='fanout', **options) class NotifyPublisher(TopicPublisher): """Publisher class for 'notify'""" def __init__(self, conf, channel, topic, **kwargs): self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) self.queue_arguments = _get_queue_arguments(conf) super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) def reconnect(self, channel): super(NotifyPublisher, self).reconnect(channel) # NOTE(jerdfelt): Normally the consumer would create the queue, but # we do this to ensure that messages don't get dropped if the # consumer is started after we do queue = kombu.entity.Queue(channel=channel, exchange=self.exchange, durable=self.durable, name=self.routing_key, routing_key=self.routing_key, queue_arguments=self.queue_arguments) queue.declare() class Connection(object): """Connection object.""" pool = None def __init__(self, conf, server_params=None): self.consumers = [] self.consumer_thread = None self.proxy_callbacks = [] self.conf = conf self.max_retries = self.conf.rabbit_max_retries # Try forever? if self.max_retries <= 0: self.max_retries = None self.interval_start = self.conf.rabbit_retry_interval self.interval_stepping = self.conf.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 self.memory_transport = False if server_params is None: server_params = {} # Keys to translate from server_params to kombu params server_params_to_kombu_params = {'username': 'userid'} ssl_params = self._fetch_ssl_params() params_list = [] for adr in self.conf.rabbit_hosts: hostname, port = network_utils.parse_host_port( adr, default_port=self.conf.rabbit_port) params = { 'hostname': hostname, 'port': port, 'userid': self.conf.rabbit_userid, 'password': self.conf.rabbit_password, 'virtual_host': self.conf.rabbit_virtual_host, } for sp_key, value in server_params.iteritems(): p_key = server_params_to_kombu_params.get(sp_key, sp_key) params[p_key] = value if self.conf.fake_rabbit: params['transport'] = 'memory' if self.conf.rabbit_use_ssl: params['ssl'] = ssl_params params_list.append(params) self.params_list = params_list self.memory_transport = self.conf.fake_rabbit self.connection = None self.reconnect() def _fetch_ssl_params(self): """Handles fetching what ssl params should be used for the connection (if any)""" ssl_params = dict() # http://docs.python.org/library/ssl.html - ssl.wrap_socket if self.conf.kombu_ssl_version: ssl_params['ssl_version'] = self.conf.kombu_ssl_version if self.conf.kombu_ssl_keyfile: ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile if self.conf.kombu_ssl_certfile: ssl_params['certfile'] = self.conf.kombu_ssl_certfile if self.conf.kombu_ssl_ca_certs: ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs # We might want to allow variations in the # future with this? ssl_params['cert_reqs'] = ssl.CERT_REQUIRED if not ssl_params: # Just have the default behavior return True else: # Return the extended behavior return ssl_params def _connect(self, params): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info(_("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % params) try: self.connection.release() except self.connection_errors: pass # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None self.connection = kombu.connection.BrokerConnection(**params) self.connection_errors = self.connection.connection_errors if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) self.connection.connect() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % params) def reconnect(self): """Handles reconnecting and re-establishing queues. Will retry up to self.max_retries number of times. self.max_retries = 0 means to retry forever. Sleep between tries, starting at self.interval_start seconds, backing off self.interval_stepping number of seconds each attempt. """ attempt = 0 while True: params = self.params_list[attempt % len(self.params_list)] attempt += 1 try: self._connect(params) return except (IOError, self.connection_errors) as e: pass except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621) # So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info.update(params) if self.max_retries and attempt == self.max_retries: LOG.error(_('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info) # NOTE(comstud): Copied from original code. There's # really no better recourse because if this was a queue we # need to consume on, we have no way to consume anymore. sys.exit(1) if attempt == 1: sleep_time = self.interval_start or 1 elif attempt > 1: sleep_time += self.interval_stepping if self.interval_max: sleep_time = min(sleep_time, self.interval_max) log_info['sleep_time'] = sleep_time LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' 'unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time) def ensure(self, error_callback, method, *args, **kwargs): while True: try: return method(*args, **kwargs) except (self.connection_errors, socket.timeout, IOError), e: if error_callback: error_callback(e) except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621) # So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise if error_callback: error_callback(e) self.reconnect() def get_channel(self): """Convenience call for bin/clear_rabbit_queues""" return self.channel def close(self): """Close/release this connection""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.connection.release() self.connection = None def reset(self): """Reset a connection so it can be used again""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.channel.close() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') self.consumers = [] def declare_consumer(self, consumer_cls, topic, callback): """Create a Consumer using the class that was passed in and add it to our list of consumers """ def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.error(_("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info) def _declare_consumer(): consumer = consumer_cls(self.conf, self.channel, topic, callback, self.consumer_num.next()) self.consumers.append(consumer) return consumer return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): """Return an iterator that will consume from all queues/consumers""" info = {'do_consume': True} def _error_callback(exc): if isinstance(exc, socket.timeout): LOG.debug(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc)) info['do_consume'] = True def _consume(): if info['do_consume']: queues_head = self.consumers[:-1] queues_tail = self.consumers[-1] for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) info['do_consume'] = False return self.connection.drain_events(timeout=timeout) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.ensure(_error_callback, _consume) def cancel_consumer_thread(self): """Cancel a consumer thread""" if self.consumer_thread is not None: self.consumer_thread.kill() try: self.consumer_thread.wait() except greenlet.GreenletExit: pass self.consumer_thread = None def wait_on_proxy_callbacks(self): """Wait for all proxy callback threads to exit.""" for proxy_cb in self.proxy_callbacks: proxy_cb.wait() def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): """Send to a publisher based on the publisher class""" def _error_callback(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception(_("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info) def _publish(): publisher = cls(self.conf, self.channel, topic, **kwargs) publisher.send(msg, timeout) self.ensure(_error_callback, _publish) def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ self.declare_consumer(DirectConsumer, topic, callback) def declare_topic_consumer(self, topic, callback=None, queue_name=None, exchange_name=None): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, exchange_name=exchange_name, ), topic, callback) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): """Send a 'topic' message""" self.publisher_send(TopicPublisher, topic, msg, timeout) def fanout_send(self, topic, msg): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): """Send a notify message on a topic""" self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) def consume(self, limit=None): """Consume from all queues/consumers""" it = self.iterconsume(limit=limit) while True: try: it.next() except StopIteration: return def consume_in_thread(self): """Consumer from all queues/consumers in a greenthread""" def _consumer_thread(): try: self.consume() except greenlet.GreenletExit: return if self.consumer_thread is None: self.consumer_thread = eventlet.spawn(_consumer_thread) return self.consumer_thread def create_consumer(self, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) self.proxy_callbacks.append(proxy_cb) if fanout: self.declare_fanout_consumer(topic, proxy_cb) else: self.declare_topic_consumer(topic, proxy_cb) def create_worker(self, topic, proxy, pool_name): """Create a worker that calls a method in a proxy object""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) self.proxy_callbacks.append(proxy_cb) self.declare_topic_consumer(topic, proxy_cb, pool_name) def join_consumer_pool(self, callback, pool_name, topic, exchange_name=None): """Register as a member of a group of consumers for a given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than one is created. """ callback_wrapper = rpc_amqp.CallbackWrapper( conf=self.conf, callback=callback, connection_pool=rpc_amqp.get_connection_pool(self.conf, Connection), ) self.proxy_callbacks.append(callback_wrapper) self.declare_topic_consumer( queue_name=pool_name, topic=topic, exchange_name=exchange_name, callback=callback_wrapper, ) def create_connection(conf, new=True): """Create a connection""" return rpc_amqp.create_connection( conf, new, rpc_amqp.get_connection_pool(conf, Connection)) def multicall(conf, context, topic, msg, timeout=None): """Make a call that returns multiple times.""" return rpc_amqp.multicall( conf, context, topic, msg, timeout, rpc_amqp.get_connection_pool(conf, Connection)) def call(conf, context, topic, msg, timeout=None): """Sends a message on a topic and wait for a response.""" return rpc_amqp.call( conf, context, topic, msg, timeout, rpc_amqp.get_connection_pool(conf, Connection)) def cast(conf, context, topic, msg): """Sends a message on a topic without waiting for a response.""" return rpc_amqp.cast( conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def fanout_cast(conf, context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" return rpc_amqp.fanout_cast( conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def cast_to_server(conf, context, server_params, topic, msg): """Sends a message on a topic to a specific server.""" return rpc_amqp.cast_to_server( conf, context, server_params, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def fanout_cast_to_server(conf, context, server_params, topic, msg): """Sends a message on a fanout exchange to a specific server.""" return rpc_amqp.fanout_cast_to_server( conf, context, server_params, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def notify(conf, context, topic, msg, envelope): """Sends a notification event on a topic.""" return rpc_amqp.notify( conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection), envelope) def cleanup(): return rpc_amqp.cleanup(Connection.pool) manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/matchmaker_redis.py0000664000175000017500000001142012301410454026666 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The MatchMaker classes should accept a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. """ from oslo.config import cfg from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.openstack.common.rpc import matchmaker as mm_common redis = importutils.try_import('redis') matchmaker_redis_opts = [ cfg.StrOpt('host', default='127.0.0.1', help='Host to locate redis'), cfg.IntOpt('port', default=6379, help='Use this port to connect to redis host.'), cfg.StrOpt('password', default=None, help='Password for Redis server. (optional)'), ] CONF = cfg.CONF opt_group = cfg.OptGroup(name='matchmaker_redis', title='Options for Redis-based MatchMaker') CONF.register_group(opt_group) CONF.register_opts(matchmaker_redis_opts, opt_group) LOG = logging.getLogger(__name__) class RedisExchange(mm_common.Exchange): def __init__(self, matchmaker): self.matchmaker = matchmaker self.redis = matchmaker.redis super(RedisExchange, self).__init__() class RedisTopicExchange(RedisExchange): """ Exchange where all topic keys are split, sending to second half. i.e. "compute.host" sends a message to "compute" running on "host" """ def run(self, topic): while True: member_name = self.redis.srandmember(topic) if not member_name: # If this happens, there are no # longer any members. break if not self.matchmaker.is_alive(topic, member_name): continue host = member_name.split('.', 1)[1] return [(member_name, host)] return [] class RedisFanoutExchange(RedisExchange): """ Return a list of all hosts. """ def run(self, topic): topic = topic.split('~', 1)[1] hosts = self.redis.smembers(topic) good_hosts = filter( lambda host: self.matchmaker.is_alive(topic, host), hosts) return [(x, x.split('.', 1)[1]) for x in good_hosts] class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): """ MatchMaker registering and looking-up hosts with a Redis server. """ def __init__(self): super(MatchMakerRedis, self).__init__() if not redis: raise ImportError("Failed to import module redis.") self.redis = redis.StrictRedis( host=CONF.matchmaker_redis.host, port=CONF.matchmaker_redis.port, password=CONF.matchmaker_redis.password) self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self)) self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange()) self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self)) def ack_alive(self, key, host): topic = "%s.%s" % (key, host) if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl): # If we could not update the expiration, the key # might have been pruned. Re-register, creating a new # key in Redis. self.register(self.topic_host[host], host) def is_alive(self, topic, host): if self.redis.ttl(host) == -1: self.expire(topic, host) return False return True def expire(self, topic, host): with self.redis.pipeline() as pipe: pipe.multi() pipe.delete(host) pipe.srem(topic, host) pipe.execute() def backend_register(self, key, key_host): with self.redis.pipeline() as pipe: pipe.multi() pipe.sadd(key, key_host) # No value is needed, we just # care if it exists. Sets aren't viable # because only keys can expire. pipe.set(key_host, '') pipe.execute() def backend_unregister(self, key, key_host): with self.redis.pipeline() as pipe: pipe.multi() pipe.srem(key, key_host) pipe.delete(key_host) pipe.execute() manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/proxy.py0000664000175000017500000001575312301410454024562 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A helper class for proxy objects to remote APIs. For more information about rpc API version numbers, see: rpc/dispatcher.py """ from manila.openstack.common import rpc class RpcProxy(object): """A helper class for rpc clients. This class is a wrapper around the RPC client API. It allows you to specify the topic and API version in a single place. This is intended to be used as a base class for a class that implements the client side of an rpc API. """ def __init__(self, topic, default_version): """Initialize an RpcProxy. :param topic: The topic to use for all messages. :param default_version: The default API version to request in all outgoing messages. This can be overridden on a per-message basis. """ self.topic = topic self.default_version = default_version super(RpcProxy, self).__init__() def _set_version(self, msg, vers): """Helper method to set the version in a message. :param msg: The message having a version added to it. :param vers: The version number to add to the message. """ msg['version'] = vers if vers else self.default_version def _get_topic(self, topic): """Return the topic to use for a message.""" return topic if topic else self.topic @staticmethod def make_namespaced_msg(method, namespace, **kwargs): return {'method': method, 'namespace': namespace, 'args': kwargs} @staticmethod def make_msg(method, **kwargs): return RpcProxy.make_namespaced_msg(method, None, **kwargs) def call(self, context, msg, topic=None, version=None, timeout=None): """rpc.call() a remote method. :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :param timeout: (Optional) A timeout to use when waiting for the response. If no timeout is specified, a default timeout will be used that is usually sufficient. :returns: The return value from the remote method. """ self._set_version(msg, version) real_topic = self._get_topic(topic) try: return rpc.call(context, real_topic, msg, timeout) except rpc.common.Timeout as exc: raise rpc.common.Timeout( exc.info, real_topic, msg.get('method')) def multicall(self, context, msg, topic=None, version=None, timeout=None): """rpc.multicall() a remote method. :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :param timeout: (Optional) A timeout to use when waiting for the response. If no timeout is specified, a default timeout will be used that is usually sufficient. :returns: An iterator that lets you process each of the returned values from the remote method as they arrive. """ self._set_version(msg, version) real_topic = self._get_topic(topic) try: return rpc.multicall(context, real_topic, msg, timeout) except rpc.common.Timeout as exc: raise rpc.common.Timeout( exc.info, real_topic, msg.get('method')) def cast(self, context, msg, topic=None, version=None): """rpc.cast() a remote method. :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :returns: None. rpc.cast() does not wait on any return value from the remote method. """ self._set_version(msg, version) rpc.cast(context, self._get_topic(topic), msg) def fanout_cast(self, context, msg, topic=None, version=None): """rpc.fanout_cast() a remote method. :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :returns: None. rpc.fanout_cast() does not wait on any return value from the remote method. """ self._set_version(msg, version) rpc.fanout_cast(context, self._get_topic(topic), msg) def cast_to_server(self, context, server_params, msg, topic=None, version=None): """rpc.cast_to_server() a remote method. :param context: The request context :param server_params: Server parameters. See rpc.cast_to_server() for details. :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :returns: None. rpc.cast_to_server() does not wait on any return values. """ self._set_version(msg, version) rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) def fanout_cast_to_server(self, context, server_params, msg, topic=None, version=None): """rpc.fanout_cast_to_server() a remote method. :param context: The request context :param server_params: Server parameters. See rpc.cast_to_server() for details. :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. :param version: (Optional) Override the requested API version in this message. :returns: None. rpc.fanout_cast_to_server() does not wait on any return values. """ self._set_version(msg, version) rpc.fanout_cast_to_server(context, server_params, self._get_topic(topic), msg) manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/matchmaker.py0000664000175000017500000002767412301410454025522 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The MatchMaker classes should except a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. """ import contextlib import itertools import json import eventlet from oslo.config import cfg from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging matchmaker_opts = [ # Matchmaker ring file cfg.StrOpt('matchmaker_ringfile', default='/etc/nova/matchmaker_ring.json', help='Matchmaker ring file (JSON)'), cfg.IntOpt('matchmaker_heartbeat_freq', default=300, help='Heartbeat frequency'), cfg.IntOpt('matchmaker_heartbeat_ttl', default=600, help='Heartbeat time-to-live.'), ] CONF = cfg.CONF CONF.register_opts(matchmaker_opts) LOG = logging.getLogger(__name__) contextmanager = contextlib.contextmanager class MatchMakerException(Exception): """Signified a match could not be found.""" message = _("Match not found by MatchMaker.") class Exchange(object): """ Implements lookups. Subclass this to support hashtables, dns, etc. """ def __init__(self): pass def run(self, key): raise NotImplementedError() class Binding(object): """ A binding on which to perform a lookup. """ def __init__(self): pass def test(self, key): raise NotImplementedError() class MatchMakerBase(object): """ Match Maker Base Class. Build off HeartbeatMatchMakerBase if building a heartbeat-capable MatchMaker. """ def __init__(self): # Array of tuples. Index [2] toggles negation, [3] is last-if-true self.bindings = [] self.no_heartbeat_msg = _('Matchmaker does not implement ' 'registration or heartbeat.') def register(self, key, host): """ Register a host on a backend. Heartbeats, if applicable, may keepalive registration. """ pass def ack_alive(self, key, host): """ Acknowledge that a key.host is alive. Used internally for updating heartbeats, but may also be used publically to acknowledge a system is alive (i.e. rpc message successfully sent to host) """ pass def is_alive(self, topic, host): """ Checks if a host is alive. """ pass def expire(self, topic, host): """ Explicitly expire a host's registration. """ pass def send_heartbeats(self): """ Send all heartbeats. Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ pass def unregister(self, key, host): """ Unregister a topic. """ pass def start_heartbeat(self): """ Spawn heartbeat greenthread. """ pass def stop_heartbeat(self): """ Destroys the heartbeat greenthread. """ pass def add_binding(self, binding, rule, last=True): self.bindings.append((binding, rule, False, last)) #NOTE(ewindisch): kept the following method in case we implement the # underlying support. #def add_negate_binding(self, binding, rule, last=True): # self.bindings.append((binding, rule, True, last)) def queues(self, key): workers = [] # bit is for negate bindings - if we choose to implement it. # last stops processing rules if this matches. for (binding, exchange, bit, last) in self.bindings: if binding.test(key): workers.extend(exchange.run(key)) # Support last. if last: return workers return workers class HeartbeatMatchMakerBase(MatchMakerBase): """ Base for a heart-beat capable MatchMaker. Provides common methods for registering, unregistering, and maintaining heartbeats. """ def __init__(self): self.hosts = set() self._heart = None self.host_topic = {} super(HeartbeatMatchMakerBase, self).__init__() def send_heartbeats(self): """ Send all heartbeats. Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ for key, host in self.host_topic: self.ack_alive(key, host) def ack_alive(self, key, host): """ Acknowledge that a host.topic is alive. Used internally for updating heartbeats, but may also be used publically to acknowledge a system is alive (i.e. rpc message successfully sent to host) """ raise NotImplementedError("Must implement ack_alive") def backend_register(self, key, host): """ Implements registration logic. Called by register(self,key,host) """ raise NotImplementedError("Must implement backend_register") def backend_unregister(self, key, key_host): """ Implements de-registration logic. Called by unregister(self,key,host) """ raise NotImplementedError("Must implement backend_unregister") def register(self, key, host): """ Register a host on a backend. Heartbeats, if applicable, may keepalive registration. """ self.hosts.add(host) self.host_topic[(key, host)] = host key_host = '.'.join((key, host)) self.backend_register(key, key_host) self.ack_alive(key, host) def unregister(self, key, host): """ Unregister a topic. """ if (key, host) in self.host_topic: del self.host_topic[(key, host)] self.hosts.discard(host) self.backend_unregister(key, '.'.join((key, host))) LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) def start_heartbeat(self): """ Implementation of MatchMakerBase.start_heartbeat Launches greenthread looping send_heartbeats(), yielding for CONF.matchmaker_heartbeat_freq seconds between iterations. """ if len(self.hosts) == 0: raise MatchMakerException( _("Register before starting heartbeat.")) def do_heartbeat(): while True: self.send_heartbeats() eventlet.sleep(CONF.matchmaker_heartbeat_freq) self._heart = eventlet.spawn(do_heartbeat) def stop_heartbeat(self): """ Destroys the heartbeat greenthread. """ if self._heart: self._heart.kill() class DirectBinding(Binding): """ Specifies a host in the key via a '.' character Although dots are used in the key, the behavior here is that it maps directly to a host, thus direct. """ def test(self, key): if '.' in key: return True return False class TopicBinding(Binding): """ Where a 'bare' key without dots. AMQP generally considers topic exchanges to be those *with* dots, but we deviate here in terminology as the behavior here matches that of a topic exchange (whereas where there are dots, behavior matches that of a direct exchange. """ def test(self, key): if '.' not in key: return True return False class FanoutBinding(Binding): """Match on fanout keys, where key starts with 'fanout.' string.""" def test(self, key): if key.startswith('fanout~'): return True return False class StubExchange(Exchange): """Exchange that does nothing.""" def run(self, key): return [(key, None)] class RingExchange(Exchange): """ Match Maker where hosts are loaded from a static file containing a hashmap (JSON formatted). __init__ takes optional ring dictionary argument, otherwise loads the ringfile from CONF.mathcmaker_ringfile. """ def __init__(self, ring=None): super(RingExchange, self).__init__() if ring: self.ring = ring else: fh = open(CONF.matchmaker_ringfile, 'r') self.ring = json.load(fh) fh.close() self.ring0 = {} for k in self.ring.keys(): self.ring0[k] = itertools.cycle(self.ring[k]) def _ring_has(self, key): if key in self.ring0: return True return False class RoundRobinRingExchange(RingExchange): """A Topic Exchange based on a hashmap.""" def __init__(self, ring=None): super(RoundRobinRingExchange, self).__init__(ring) def run(self, key): if not self._ring_has(key): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile") % (key, ) ) return [] host = next(self.ring0[key]) return [(key + '.' + host, host)] class FanoutRingExchange(RingExchange): """Fanout Exchange based on a hashmap.""" def __init__(self, ring=None): super(FanoutRingExchange, self).__init__(ring) def run(self, key): # Assume starts with "fanout~", strip it for lookup. nkey = key.split('fanout~')[1:][0] if not self._ring_has(nkey): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile") % (nkey, ) ) return [] return map(lambda x: (key + '.' + x, x), self.ring[nkey]) class LocalhostExchange(Exchange): """Exchange where all direct topics are local.""" def __init__(self, host='localhost'): self.host = host super(Exchange, self).__init__() def run(self, key): return [('.'.join((key.split('.')[0], self.host)), self.host)] class DirectExchange(Exchange): """ Exchange where all topic keys are split, sending to second half. i.e. "compute.host" sends a message to "compute.host" running on "host" """ def __init__(self): super(Exchange, self).__init__() def run(self, key): e = key.split('.', 1)[1] return [(key, e)] class MatchMakerRing(MatchMakerBase): """ Match Maker where hosts are loaded from a static hashmap. """ def __init__(self, ring=None): super(MatchMakerRing, self).__init__() self.add_binding(FanoutBinding(), FanoutRingExchange(ring)) self.add_binding(DirectBinding(), DirectExchange()) self.add_binding(TopicBinding(), RoundRobinRingExchange(ring)) class MatchMakerLocalhost(MatchMakerBase): """ Match Maker where all bare topics resolve to localhost. Useful for testing. """ def __init__(self, host='localhost'): super(MatchMakerLocalhost, self).__init__() self.add_binding(FanoutBinding(), LocalhostExchange(host)) self.add_binding(DirectBinding(), DirectExchange()) self.add_binding(TopicBinding(), LocalhostExchange(host)) class MatchMakerStub(MatchMakerBase): """ Match Maker where topics are untouched. Useful for testing, or for AMQP/brokered queues. Will not work where knowledge of hosts is known (i.e. zeromq) """ def __init__(self): super(MatchMakerLocalhost, self).__init__() self.add_binding(FanoutBinding(), StubExchange()) self.add_binding(DirectBinding(), StubExchange()) self.add_binding(TopicBinding(), StubExchange()) manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/impl_fake.py0000664000175000017500000001331712301410454025322 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fake RPC implementation which calls proxy methods directly with no queues. Casts will block, but this is very useful for tests. """ import inspect # NOTE(russellb): We specifically want to use json, not our own jsonutils. # jsonutils has some extra logic to automatically convert objects to primitive # types so that they can be serialized. We want to catch all cases where # non-primitive types make it into this code and treat it as an error. import json import time import eventlet from manila.openstack.common.rpc import common as rpc_common CONSUMERS = {} class RpcContext(rpc_common.CommonRpcContext): def __init__(self, **kwargs): super(RpcContext, self).__init__(**kwargs) self._response = [] self._done = False def deepcopy(self): values = self.to_dict() new_inst = self.__class__(**values) new_inst._response = self._response new_inst._done = self._done return new_inst def reply(self, reply=None, failure=None, ending=False): if ending: self._done = True if not self._done: self._response.append((reply, failure)) class Consumer(object): def __init__(self, topic, proxy): self.topic = topic self.proxy = proxy def call(self, context, version, method, namespace, args, timeout): done = eventlet.event.Event() def _inner(): ctxt = RpcContext.from_dict(context.to_dict()) try: rval = self.proxy.dispatch(context, version, method, namespace, **args) res = [] # Caller might have called ctxt.reply() manually for (reply, failure) in ctxt._response: if failure: raise failure[0], failure[1], failure[2] res.append(reply) # if ending not 'sent'...we might have more data to # return from the function itself if not ctxt._done: if inspect.isgenerator(rval): for val in rval: res.append(val) else: res.append(rval) done.send(res) except rpc_common.ClientException as e: done.send_exception(e._exc_info[1]) except Exception as e: done.send_exception(e) thread = eventlet.greenthread.spawn(_inner) if timeout: start_time = time.time() while not done.ready(): eventlet.greenthread.sleep(1) cur_time = time.time() if (cur_time - start_time) > timeout: thread.kill() raise rpc_common.Timeout() return done.wait() class Connection(object): """Connection object.""" def __init__(self): self.consumers = [] def create_consumer(self, topic, proxy, fanout=False): consumer = Consumer(topic, proxy) self.consumers.append(consumer) if topic not in CONSUMERS: CONSUMERS[topic] = [] CONSUMERS[topic].append(consumer) def close(self): for consumer in self.consumers: CONSUMERS[consumer.topic].remove(consumer) self.consumers = [] def consume_in_thread(self): pass def create_connection(conf, new=True): """Create a connection""" return Connection() def check_serialize(msg): """Make sure a message intended for rpc can be serialized.""" json.dumps(msg) def multicall(conf, context, topic, msg, timeout=None): """Make a call that returns multiple times.""" check_serialize(msg) method = msg.get('method') if not method: return args = msg.get('args', {}) version = msg.get('version', None) namespace = msg.get('namespace', None) try: consumer = CONSUMERS[topic][0] except (KeyError, IndexError): return iter([None]) else: return consumer.call(context, version, method, namespace, args, timeout) def call(conf, context, topic, msg, timeout=None): """Sends a message on a topic and wait for a response.""" rv = multicall(conf, context, topic, msg, timeout) # NOTE(vish): return the last result from the multicall rv = list(rv) if not rv: return return rv[-1] def cast(conf, context, topic, msg): check_serialize(msg) try: call(conf, context, topic, msg) except Exception: pass def notify(conf, context, topic, msg, envelope): check_serialize(msg) def cleanup(): pass def fanout_cast(conf, context, topic, msg): """Cast to all consumers of a topic""" check_serialize(msg) method = msg.get('method') if not method: return args = msg.get('args', {}) version = msg.get('version', None) namespace = msg.get('namespace', None) for consumer in CONSUMERS.get(topic, []): try: consumer.call(context, version, method, namespace, args, None) except Exception: pass manila-2013.2.dev175.gbf1a399/manila/openstack/common/rpc/service.py0000664000175000017500000000526712301410454025040 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging from manila.openstack.common import rpc from manila.openstack.common.rpc import dispatcher as rpc_dispatcher from manila.openstack.common import service LOG = logging.getLogger(__name__) class Service(service.Service): """Service object for binaries running on hosts. A service enables rpc by listening to queues based on topic and host.""" def __init__(self, host, topic, manager=None): super(Service, self).__init__() self.host = host self.topic = topic if manager is None: self.manager = self else: self.manager = manager def start(self): super(Service, self).start() self.conn = rpc.create_connection(new=True) LOG.debug(_("Creating Consumer connection for Service %s") % self.topic) dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) # Share this same connection for these Consumers self.conn.create_consumer(self.topic, dispatcher, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) self.conn.create_consumer(node_topic, dispatcher, fanout=False) self.conn.create_consumer(self.topic, dispatcher, fanout=True) # Hook to allow the manager to do other initializations after # the rpc connection is created. if callable(getattr(self.manager, 'initialize_service_hook', None)): self.manager.initialize_service_hook(self) # Consume from all consumers in a thread self.conn.consume_in_thread() def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.conn.close() except Exception: pass super(Service, self).stop() manila-2013.2.dev175.gbf1a399/manila/openstack/common/jsonutils.py0000664000175000017500000001347112301410454024642 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' JSON related utilities. This module provides a few things: 1) A handy function for getting an object down to something that can be JSON serialized. See to_primitive(). 2) Wrappers around loads() and dumps(). The dumps() wrapper will automatically use to_primitive() for you if needed. 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson is available. ''' import datetime import functools import inspect import itertools import json import types import xmlrpclib from manila.openstack.common import timeutils _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, inspect.isfunction, inspect.isgeneratorfunction, inspect.isgenerator, inspect.istraceback, inspect.isframe, inspect.iscode, inspect.isbuiltin, inspect.isroutine, inspect.isabstract] _simple_types = (types.NoneType, int, basestring, bool, float, long) def to_primitive(value, convert_instances=False, convert_datetime=True, level=0, max_depth=3): """Convert a complex object into primitives. Handy for JSON serialization. We can optionally handle instances, but since this is a recursive function, we could have cyclical data structures. To handle cyclical data structures we could track the actual objects visited in a set, but not all objects are hashable. Instead we just track the depth of the object inspections and don't go too deep. Therefore, convert_instances=True is lossy ... be aware. """ # handle obvious types first - order of basic types determined by running # full tests on nova project, resulting in the following counts: # 572754 # 460353 # 379632 # 274610 # 199918 # 114200 # 51817 # 26164 # 6491 # 283 # 19 if isinstance(value, _simple_types): return value if isinstance(value, datetime.datetime): if convert_datetime: return timeutils.strtime(value) else: return value # value of itertools.count doesn't get caught by nasty_type_tests # and results in infinite loop when list(value) is called. if type(value) == itertools.count: return unicode(value) # FIXME(vish): Workaround for LP bug 852095. Without this workaround, # tests that raise an exception in a mocked method that # has a @wrap_exception with a notifier will fail. If # we up the dependency to 0.5.4 (when it is released) we # can remove this workaround. if getattr(value, '__module__', None) == 'mox': return 'mock' if level > max_depth: return '?' # The try block may not be necessary after the class check above, # but just in case ... try: recursive = functools.partial(to_primitive, convert_instances=convert_instances, convert_datetime=convert_datetime, level=level, max_depth=max_depth) if isinstance(value, dict): return dict((k, recursive(v)) for k, v in value.iteritems()) elif isinstance(value, (list, tuple)): return [recursive(lv) for lv in value] # It's not clear why xmlrpclib created their own DateTime type, but # for our purposes, make it a datetime type which is explicitly # handled if isinstance(value, xmlrpclib.DateTime): value = datetime.datetime(*tuple(value.timetuple())[:6]) if convert_datetime and isinstance(value, datetime.datetime): return timeutils.strtime(value) elif hasattr(value, 'iteritems'): return recursive(dict(value.iteritems()), level=level + 1) elif hasattr(value, '__iter__'): return recursive(list(value)) elif convert_instances and hasattr(value, '__dict__'): # Likely an instance of something. Watch for cycles. # Ignore class member vars. return recursive(value.__dict__, level=level + 1) else: if any(test(value) for test in _nasty_type_tests): return unicode(value) return value except TypeError: # Class objects are tricky since they may define something like # __iter__ defined but it isn't callable as list(). return unicode(value) def dumps(value, default=to_primitive, **kwargs): return json.dumps(value, default=default, **kwargs) def loads(s): return json.loads(s) def load(s): return json.load(s) try: import anyjson except ImportError: pass else: anyjson._modules.append((__name__, 'dumps', TypeError, 'loads', ValueError, 'load')) anyjson.force_implementation(__name__) manila-2013.2.dev175.gbf1a399/manila/openstack/common/eventlet_backdoor.py0000664000175000017500000000507712301410454026305 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 OpenStack Foundation. # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gc import pprint import sys import traceback import eventlet import eventlet.backdoor import greenlet from oslo.config import cfg eventlet_backdoor_opts = [ cfg.IntOpt('backdoor_port', default=None, help='port for eventlet backdoor to listen') ] CONF = cfg.CONF CONF.register_opts(eventlet_backdoor_opts) def _dont_use_this(): print "Don't use this, just disconnect instead" def _find_objects(t): return filter(lambda o: isinstance(o, t), gc.get_objects()) def _print_greenthreads(): for i, gt in enumerate(_find_objects(greenlet.greenlet)): print i, gt traceback.print_stack(gt.gr_frame) print def _print_nativethreads(): for threadId, stack in sys._current_frames().items(): print threadId traceback.print_stack(stack) print def initialize_if_enabled(): backdoor_locals = { 'exit': _dont_use_this, # So we don't exit the entire process 'quit': _dont_use_this, # So we don't exit the entire process 'fo': _find_objects, 'pgt': _print_greenthreads, 'pnt': _print_nativethreads, } if CONF.backdoor_port is None: return None # NOTE(johannes): The standard sys.displayhook will print the value of # the last expression and set it to __builtin__._, which overwrites # the __builtin__._ that gettext sets. Let's switch to using pprint # since it won't interact poorly with gettext, and it's easier to # read the output too. def displayhook(val): if val is not None: pprint.pprint(val) sys.displayhook = displayhook sock = eventlet.listen(('localhost', CONF.backdoor_port)) port = sock.getsockname()[1] eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, locals=backdoor_locals) return port manila-2013.2.dev175.gbf1a399/manila/openstack/common/lockutils.py0000664000175000017500000002337012301410454024620 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import errno import functools import os import shutil import subprocess import sys import tempfile import threading import time import weakref from oslo.config import cfg from manila.openstack.common import fileutils from manila.openstack.common.gettextutils import _ # noqa from manila.openstack.common import local from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) util_opts = [ cfg.BoolOpt('disable_process_locking', default=False, help='Whether to disable inter-process locks'), cfg.StrOpt('lock_path', default=os.environ.get("MANILA_LOCK_PATH"), help=('Directory to use for lock files.')) ] CONF = cfg.CONF CONF.register_opts(util_opts) def set_defaults(lock_path): cfg.set_defaults(util_opts, lock_path=lock_path) class _InterProcessLock(object): """Lock implementation which allows multiple locks, working around issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does not require any cleanup. Since the lock is always held on a file descriptor rather than outside of the process, the lock gets dropped automatically if the process crashes, even if __exit__ is not executed. There are no guarantees regarding usage by multiple green threads in a single process here. This lock works only between processes. Exclusive access between local threads should be achieved using the semaphores in the @synchronized decorator. Note these locks are released when the descriptor is closed, so it's not safe to close the file descriptor while another green thread holds the lock. Just opening and closing the lock file can break synchronisation, so lock files must be accessed only using this abstraction. """ def __init__(self, name): self.lockfile = None self.fname = name def __enter__(self): self.lockfile = open(self.fname, 'w') while True: try: # Using non-blocking locks since green threads are not # patched to deal with blocking locking calls. # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() return self except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: raise def __exit__(self, exc_type, exc_val, exc_tb): try: self.unlock() self.lockfile.close() except IOError: LOG.exception(_("Could not release the acquired lock `%s`"), self.fname) def trylock(self): raise NotImplementedError() def unlock(self): raise NotImplementedError() class _WindowsLock(_InterProcessLock): def trylock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) def unlock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) class _PosixLock(_InterProcessLock): def trylock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) def unlock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_UN) if os.name == 'nt': import msvcrt InterProcessLock = _WindowsLock else: import fcntl InterProcessLock = _PosixLock _semaphores = weakref.WeakValueDictionary() _semaphores_lock = threading.Lock() @contextlib.contextmanager def lock(name, lock_file_prefix=None, external=False, lock_path=None): """Context based lock This function yields a `threading.Semaphore` instance (if we don't use eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. :param lock_path: The lock_path keyword argument is used to specify a special location for external lock files to live. If nothing is set, then CONF.lock_path is used as a default. """ with _semaphores_lock: try: sem = _semaphores[name] except KeyError: sem = threading.Semaphore() _semaphores[name] = sem with sem: LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) # NOTE(mikal): I know this looks odd if not hasattr(local.strong_store, 'locks_held'): local.strong_store.locks_held = [] local.strong_store.locks_held.append(name) try: if external and not CONF.disable_process_locking: LOG.debug(_('Attempting to grab file lock "%(lock)s"'), {'lock': name}) # We need a copy of lock_path because it is non-local local_lock_path = lock_path or CONF.lock_path if not local_lock_path: raise cfg.RequiredOptError('lock_path') if not os.path.exists(local_lock_path): fileutils.ensure_tree(local_lock_path) LOG.info(_('Created lock path: %s'), local_lock_path) def add_prefix(name, prefix): if not prefix: return name sep = '' if prefix.endswith('-') else '-' return '%s%s%s' % (prefix, sep, name) # NOTE(mikal): the lock name cannot contain directory # separators lock_file_name = add_prefix(name.replace(os.sep, '_'), lock_file_prefix) lock_file_path = os.path.join(local_lock_path, lock_file_name) try: lock = InterProcessLock(lock_file_path) with lock as lock: LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), {'lock': name, 'path': lock_file_path}) yield lock finally: LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), {'lock': name, 'path': lock_file_path}) else: yield sem finally: local.strong_store.locks_held.remove(name) def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): """Synchronization decorator. Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one thread will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. """ def wrap(f): @functools.wraps(f) def inner(*args, **kwargs): try: with lock(name, lock_file_prefix, external, lock_path): LOG.debug(_('Got semaphore / lock "%(function)s"'), {'function': f.__name__}) return f(*args, **kwargs) finally: LOG.debug(_('Semaphore / lock released "%(function)s"'), {'function': f.__name__}) return inner return wrap def synchronized_with_prefix(lock_file_prefix): """Partial object generator for the synchronization decorator. Redefine @synchronized in each project like so:: (in nova/utils.py) from nova.openstack.common import lockutils synchronized = lockutils.synchronized_with_prefix('nova-') (in nova/foo.py) from nova import utils @utils.synchronized('mylock') def bar(self, *args): ... The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. """ return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) def main(argv): """Create a dir for locks and pass it to command from arguments If you run this: python -m openstack.common.lockutils python setup.py testr a temporary directory will be created for all your locks and passed to all your tests in an environment variable. The temporary dir will be deleted afterwards and the return value will be preserved. """ lock_dir = tempfile.mkdtemp() os.environ["MANILA_LOCK_PATH"] = lock_dir try: ret_val = subprocess.call(argv[1:]) finally: shutil.rmtree(lock_dir, ignore_errors=True) return ret_val if __name__ == '__main__': sys.exit(main(sys.argv)) manila-2013.2.dev175.gbf1a399/manila/openstack/common/fileutils.py0000664000175000017500000000201512301410454024600 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os def ensure_tree(path): """Create a directory (and any ancestor directories required) :param path: Directory to create """ try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST: if not os.path.isdir(path): raise else: raise manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/0000775000175000017500000000000012301410516024205 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/weights/0000775000175000017500000000000012301410516025657 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/weights/__init__.py0000664000175000017500000000243112301410454027771 0ustar chuckchuck00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host weights """ from manila.openstack.common.scheduler import weight class WeighedHost(weight.WeighedObject): def to_dict(self): return { 'weight': self.weight, 'host': self.obj.host, } def __repr__(self): return ("WeighedHost [host: %s, weight: %s]" % (self.obj.host, self.weight)) class BaseHostWeigher(weight.BaseWeigher): """Base class for host weights.""" pass class HostWeightHandler(weight.BaseWeightHandler): object_class = WeighedHost def __init__(self, namespace): super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace) manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/filters/0000775000175000017500000000000012301410516025655 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/filters/capabilities_filter.py0000664000175000017500000000471212301410454032232 0ustar chuckchuck00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.openstack.common import log as logging from manila.openstack.common.scheduler import filters from manila.openstack.common.scheduler.filters import extra_specs_ops LOG = logging.getLogger(__name__) class CapabilitiesFilter(filters.BaseHostFilter): """HostFilter to work with resource (instance & share) type records.""" def _satisfies_extra_specs(self, capabilities, resource_type): """Check that the capabilities provided by the services satisfy the extra specs associated with the instance type.""" extra_specs = resource_type.get('extra_specs', []) if not extra_specs: return True for key, req in extra_specs.iteritems(): # Either not scope format, or in capabilities scope scope = key.split(':') if len(scope) > 1 and scope[0] != "capabilities": continue elif scope[0] == "capabilities": del scope[0] cap = capabilities for index in range(0, len(scope)): try: cap = cap.get(scope[index], None) except AttributeError: return False if cap is None: return False if not extra_specs_ops.match(cap, req): return False return True def host_passes(self, host_state, filter_properties): """Return a list of hosts that can create instance_type.""" # Note(zhiteng) Currently only Manila and Nova are using # this filter, so the resource type is either instance or # volume. resource_type = filter_properties.get('resource_type') if not self._satisfies_extra_specs(host_state.capabilities, resource_type): return False return True manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/filters/availability_zone_filter.py0000664000175000017500000000221112301410454033276 0ustar chuckchuck00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.openstack.common.scheduler import filters class AvailabilityZoneFilter(filters.BaseHostFilter): """Filters Hosts by availability zone.""" def host_passes(self, host_state, filter_properties): spec = filter_properties.get('request_spec', {}) props = spec.get('resource_properties', []) availability_zone = props.get('availability_zone') if availability_zone: return availability_zone == host_state.service['availability_zone'] return True manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/filters/__init__.py0000664000175000017500000000266412301410454027777 0ustar chuckchuck00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host filters """ from manila.openstack.common import log as logging from manila.openstack.common.scheduler import filter LOG = logging.getLogger(__name__) class BaseHostFilter(filter.BaseFilter): """Base class for host filters.""" def _filter_one(self, obj, filter_properties): """Return True if the object passes the filter, otherwise False.""" return self.host_passes(obj, filter_properties) def host_passes(self, host_state, filter_properties): """Return True if the HostState passes the filter, otherwise False. Override this in a subclass. """ raise NotImplementedError() class HostFilterHandler(filter.BaseFilterHandler): def __init__(self, namespace): super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/filters/extra_specs_ops.py0000664000175000017500000000444012301410454031433 0ustar chuckchuck00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from manila.openstack.common import strutils # 1. The following operations are supported: # =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= # 2. Note that is handled in a different way below. # 3. If the first word in the extra_specs is not one of the operators, # it is ignored. _op_methods = {'=': lambda x, y: float(x) >= float(y), '': lambda x, y: y in x, '': lambda x, y: (strutils.bool_from_string(x) is strutils.bool_from_string(y)), '==': lambda x, y: float(x) == float(y), '!=': lambda x, y: float(x) != float(y), '>=': lambda x, y: float(x) >= float(y), '<=': lambda x, y: float(x) <= float(y), 's==': operator.eq, 's!=': operator.ne, 's<': operator.lt, 's<=': operator.le, 's>': operator.gt, 's>=': operator.ge} def match(value, req): words = req.split() op = method = None if words: op = words.pop(0) method = _op_methods.get(op) if op != '' and not method: return value == req if value is None: return False if op == '': # Ex: v1 v2 v3 while True: if words.pop(0) == value: return True if not words: break op = words.pop(0) # remove a keyword if not words: break return False try: if words and method(value, words[0]): return True except ValueError: pass return False manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/filters/json_filter.py0000664000175000017500000001145312301410454030552 0ustar chuckchuck00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from manila.openstack.common import jsonutils from manila.openstack.common.scheduler import filters class JsonFilter(filters.BaseHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts. """ def _op_compare(self, args, op): """Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False if op is operator.contains: bad = args[0] not in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms""" return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" return [not arg for arg in args] def _or(self, args): """True if any arg is True.""" return any(args) def _and(self, args): """True if all args are True.""" return all(args) commands = { '=': _equals, '<': _less_than, '>': _greater_than, 'in': _in, '<=': _less_than_equal, '>=': _greater_than_equal, 'not': _not, 'or': _or, 'and': _and, } def _parse_string(self, string, host_state): """Strings prefixed with $ are capability lookups in the form '$variable' where 'variable' is an attribute in the HostState class. If $variable is a dictionary, you may use: $variable.dictkey """ if not string: return None if not string.startswith("$"): return string path = string[1:].split(".") obj = getattr(host_state, path[0], None) if obj is None: return None for item in path[1:]: obj = obj.get(item, None) if obj is None: return None return obj def _process_filter(self, query, host_state): """Recursively parse the query structure.""" if not query: return True cmd = query[0] method = self.commands[cmd] cooked_args = [] for arg in query[1:]: if isinstance(arg, list): arg = self._process_filter(arg, host_state) elif isinstance(arg, basestring): arg = self._parse_string(arg, host_state) if arg is not None: cooked_args.append(arg) result = method(self, cooked_args) return result def host_passes(self, host_state, filter_properties): """Return a list of hosts that can fulfill the requirements specified in the query. """ # TODO(zhiteng) Add description for filter_properties structure # and scheduler_hints. try: query = filter_properties['scheduler_hints']['query'] except KeyError: query = None if not query: return True # NOTE(comstud): Not checking capabilities or service for # enabled/disabled so that a provided json filter can decide result = self._process_filter(jsonutils.loads(query), host_state) if isinstance(result, list): # If any succeeded, include the host result = any(result) if result: # Filter it out. return True return False manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/__init__.py0000664000175000017500000000000012301410454026305 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/filter.py0000664000175000017500000000470212301410454026050 0ustar chuckchuck00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Filter support """ import inspect from stevedore import extension class BaseFilter(object): """Base class for all filter classes.""" def _filter_one(self, obj, filter_properties): """Return True if it passes the filter, False otherwise. Override this in a subclass. """ return True def filter_all(self, filter_obj_list, filter_properties): """Yield objects that pass the filter. Can be overriden in a subclass, if you need to base filtering decisions on all objects. Otherwise, one can just override _filter_one() to filter a single object. """ for obj in filter_obj_list: if self._filter_one(obj, filter_properties): yield obj class BaseFilterHandler(object): """ Base class to handle loading filter classes. This class should be subclassed where one needs to use filters. """ def __init__(self, filter_class_type, filter_namespace): self.namespace = filter_namespace self.filter_class_type = filter_class_type self.filter_manager = extension.ExtensionManager(filter_namespace) def _is_correct_class(self, obj): """Return whether an object is a class of the correct type and is not prefixed with an underscore. """ return (inspect.isclass(obj) and not obj.__name__.startswith('_') and issubclass(obj, self.filter_class_type)) def get_all_classes(self): return [x.plugin for x in self.filter_manager if self._is_correct_class(x.plugin)] def get_filtered_objects(self, filter_classes, objs, filter_properties): for filter_cls in filter_classes: objs = filter_cls().filter_all(objs, filter_properties) return list(objs) manila-2013.2.dev175.gbf1a399/manila/openstack/common/scheduler/weight.py0000664000175000017500000000607312301410454026055 0ustar chuckchuck00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Pluggable Weighing support """ import inspect from stevedore import extension class WeighedObject(object): """Object with weight information.""" def __init__(self, obj, weight): self.obj = obj self.weight = weight def __repr__(self): return "" % (self.obj, self.weight) class BaseWeigher(object): """Base class for pluggable weighers.""" def _weight_multiplier(self): """How weighted this weigher should be. Normally this would be overriden in a subclass based on a config value. """ return 1.0 def _weigh_object(self, obj, weight_properties): """Override in a subclass to specify a weight for a specific object. """ return 0.0 def weigh_objects(self, weighed_obj_list, weight_properties): """Weigh multiple objects. Override in a subclass if you need need access to all objects in order to manipulate weights. """ constant = self._weight_multiplier() for obj in weighed_obj_list: obj.weight += (constant * self._weigh_object(obj.obj, weight_properties)) class BaseWeightHandler(object): object_class = WeighedObject def __init__(self, weighed_object_type, weight_namespace): self.namespace = weight_namespace self.weighed_object_type = weighed_object_type self.weight_manager = extension.ExtensionManager(weight_namespace) def _is_correct_class(self, obj): """Return whether an object is a class of the correct type and is not prefixed with an underscore. """ return (inspect.isclass(obj) and not obj.__name__.startswith('_') and issubclass(obj, self.weighed_object_type)) def get_all_classes(self): return [x.plugin for x in self.weight_manager if self._is_correct_class(x.plugin)] def get_weighed_objects(self, weigher_classes, obj_list, weighing_properties): """Return a sorted (highest score first) list of WeighedObjects.""" if not obj_list: return [] weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] for weigher_cls in weigher_classes: weigher = weigher_cls() weigher.weigh_objects(weighed_objs, weighing_properties) return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) manila-2013.2.dev175.gbf1a399/manila/openstack/common/threadgroup.py0000664000175000017500000000667212301410454025141 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import greenlet from eventlet import greenpool from eventlet import greenthread from manila.openstack.common import log as logging from manila.openstack.common import loopingcall LOG = logging.getLogger(__name__) def _thread_done(gt, *args, **kwargs): """ Callback function to be passed to GreenThread.link() when we spawn() Calls the :class:`ThreadGroup` to notify if. """ kwargs['group'].thread_done(kwargs['thread']) class Thread(object): """ Wrapper around a greenthread, that holds a reference to the :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when it has done so it can be removed from the threads list. """ def __init__(self, thread, group): self.thread = thread self.thread.link(_thread_done, group=group, thread=self) def stop(self): self.thread.kill() def wait(self): return self.thread.wait() class ThreadGroup(object): """ The point of the ThreadGroup classis to: * keep track of timers and greenthreads (making it easier to stop them when need be). * provide an easy API to add timers. """ def __init__(self, thread_pool_size=10): self.pool = greenpool.GreenPool(thread_pool_size) self.threads = [] self.timers = [] def add_timer(self, interval, callback, initial_delay=None, *args, **kwargs): pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) pulse.start(interval=interval, initial_delay=initial_delay) self.timers.append(pulse) def add_thread(self, callback, *args, **kwargs): gt = self.pool.spawn(callback, *args, **kwargs) th = Thread(gt, self) self.threads.append(th) def thread_done(self, thread): self.threads.remove(thread) def stop(self): current = greenthread.getcurrent() for x in self.threads: if x is current: # don't kill the current thread. continue try: x.stop() except Exception as ex: LOG.exception(ex) for x in self.timers: try: x.stop() except Exception as ex: LOG.exception(ex) self.timers = [] def wait(self): for x in self.timers: try: x.wait() except greenlet.GreenletExit: pass except Exception as ex: LOG.exception(ex) current = greenthread.getcurrent() for x in self.threads: if x is current: continue try: x.wait() except greenlet.GreenletExit: pass except Exception as ex: LOG.exception(ex) manila-2013.2.dev175.gbf1a399/manila/openstack/common/rootwrap/0000775000175000017500000000000012301410516024104 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/common/rootwrap/cmd.py0000775000175000017500000001104112301410454025222 0ustar chuckchuck00000000000000#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Root wrapper for OpenStack services Filters which commands a service is allowed to run as another user. To use this with manila, you should set the following in manila.conf: rootwrap_config=/etc/manila/rootwrap.conf You also need to let the manila user run manila-rootwrap as root in sudoers: manila ALL = (root) NOPASSWD: /usr/bin/manila-rootwrap /etc/manila/rootwrap.conf * Service packaging should deploy .filters files only on nodes where they are needed, to avoid allowing more than is necessary. """ import ConfigParser import logging import os import pwd import signal import subprocess import sys RC_UNAUTHORIZED = 99 RC_NOCOMMAND = 98 RC_BADCONFIG = 97 RC_NOEXECFOUND = 96 def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def _exit_error(execname, message, errorcode, log=True): print "%s: %s" % (execname, message) if log: logging.error(message) sys.exit(errorcode) def main(): # Split arguments, require at least a command execname = sys.argv.pop(0) if len(sys.argv) < 2: _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) configfile = sys.argv.pop(0) userargs = sys.argv[:] # Add ../ to sys.path to allow running from branch possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, "manila", "__init__.py")): sys.path.insert(0, possible_topdir) from manila.openstack.common.rootwrap import wrapper # Load configuration try: rawconfig = ConfigParser.RawConfigParser() rawconfig.read(configfile) config = wrapper.RootwrapConfig(rawconfig) except ValueError as exc: msg = "Incorrect value in %s: %s" % (configfile, exc.message) _exit_error(execname, msg, RC_BADCONFIG, log=False) except ConfigParser.Error: _exit_error(execname, "Incorrect configuration file: %s" % configfile, RC_BADCONFIG, log=False) if config.use_syslog: wrapper.setup_syslog(execname, config.syslog_log_facility, config.syslog_log_level) # Execute command if it matches any of the loaded filters filters = wrapper.load_filters(config.filters_path) try: filtermatch = wrapper.match_filter(filters, userargs, exec_dirs=config.exec_dirs) if filtermatch: command = filtermatch.get_command(userargs, exec_dirs=config.exec_dirs) if config.use_syslog: logging.info("(%s > %s) Executing %s (filter match = %s)" % ( os.getlogin(), pwd.getpwuid(os.getuid())[0], command, filtermatch.name)) obj = subprocess.Popen(command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr, preexec_fn=_subprocess_setup, env=filtermatch.get_environment(userargs)) obj.wait() sys.exit(obj.returncode) except wrapper.FilterMatchNotExecutable as exc: msg = ("Executable not found: %s (filter match = %s)" % (exc.match.exec_path, exc.match.name)) _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) except wrapper.NoFilterMatched: msg = ("Unauthorized command: %s (no filter matched)" % ' '.join(userargs)) _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) manila-2013.2.dev175.gbf1a399/manila/openstack/common/rootwrap/wrapper.py0000664000175000017500000001252712301410454026146 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ConfigParser import logging import logging.handlers import os import string from manila.openstack.common.rootwrap import filters class NoFilterMatched(Exception): """This exception is raised when no filter matched.""" pass class FilterMatchNotExecutable(Exception): """ This exception is raised when a filter matched but no executable was found. """ def __init__(self, match=None, **kwargs): self.match = match class RootwrapConfig(object): def __init__(self, config): # filters_path self.filters_path = config.get("DEFAULT", "filters_path").split(",") # exec_dirs if config.has_option("DEFAULT", "exec_dirs"): self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") else: # Use system PATH if exec_dirs is not specified self.exec_dirs = os.environ["PATH"].split(':') # syslog_log_facility if config.has_option("DEFAULT", "syslog_log_facility"): v = config.get("DEFAULT", "syslog_log_facility") facility_names = logging.handlers.SysLogHandler.facility_names self.syslog_log_facility = getattr(logging.handlers.SysLogHandler, v, None) if self.syslog_log_facility is None and v in facility_names: self.syslog_log_facility = facility_names.get(v) if self.syslog_log_facility is None: raise ValueError('Unexpected syslog_log_facility: %s' % v) else: default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG self.syslog_log_facility = default_facility # syslog_log_level if config.has_option("DEFAULT", "syslog_log_level"): v = config.get("DEFAULT", "syslog_log_level") self.syslog_log_level = logging.getLevelName(v.upper()) if (self.syslog_log_level == "Level %s" % v.upper()): raise ValueError('Unexepected syslog_log_level: %s' % v) else: self.syslog_log_level = logging.ERROR # use_syslog if config.has_option("DEFAULT", "use_syslog"): self.use_syslog = config.getboolean("DEFAULT", "use_syslog") else: self.use_syslog = False def setup_syslog(execname, facility, level): rootwrap_logger = logging.getLogger() rootwrap_logger.setLevel(level) handler = logging.handlers.SysLogHandler(address='/dev/log', facility=facility) handler.setFormatter(logging.Formatter( os.path.basename(execname) + ': %(message)s')) rootwrap_logger.addHandler(handler) def build_filter(class_name, *args): """Returns a filter object of class class_name""" if not hasattr(filters, class_name): logging.warning("Skipping unknown filter class (%s) specified " "in filter definitions" % class_name) return None filterclass = getattr(filters, class_name) return filterclass(*args) def load_filters(filters_path): """Load filters from a list of directories""" filterlist = [] for filterdir in filters_path: if not os.path.isdir(filterdir): continue for filterfile in os.listdir(filterdir): filterconfig = ConfigParser.RawConfigParser() filterconfig.read(os.path.join(filterdir, filterfile)) for (name, value) in filterconfig.items("Filters"): filterdefinition = [string.strip(s) for s in value.split(',')] newfilter = build_filter(*filterdefinition) if newfilter is None: continue newfilter.name = name filterlist.append(newfilter) return filterlist def match_filter(filters, userargs, exec_dirs=[]): """ Checks user command and arguments through command filters and returns the first matching filter. Raises NoFilterMatched if no filter matched. Raises FilterMatchNotExecutable if no executable was found for the best filter match. """ first_not_executable_filter = None for f in filters: if f.match(userargs): # Try other filters if executable is absent if not f.get_exec(exec_dirs=exec_dirs): if not first_not_executable_filter: first_not_executable_filter = f continue # Otherwise return matching filter for execution return f if first_not_executable_filter: # A filter matched, but no executable was found for it raise FilterMatchNotExecutable(match=first_not_executable_filter) # No filter matched raise NoFilterMatched() manila-2013.2.dev175.gbf1a399/manila/openstack/common/rootwrap/__init__.py0000664000175000017500000000125512301410454026221 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/openstack/common/rootwrap/filters.py0000664000175000017500000002007712301410454026135 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re class CommandFilter(object): """Command filter only checking that the 1st argument matches exec_path""" def __init__(self, exec_path, run_as, *args): self.name = '' self.exec_path = exec_path self.run_as = run_as self.args = args self.real_exec = None def get_exec(self, exec_dirs=[]): """Returns existing executable, or empty string if none found""" if self.real_exec is not None: return self.real_exec self.real_exec = "" if self.exec_path.startswith('/'): if os.access(self.exec_path, os.X_OK): self.real_exec = self.exec_path else: for binary_path in exec_dirs: expanded_path = os.path.join(binary_path, self.exec_path) if os.access(expanded_path, os.X_OK): self.real_exec = expanded_path break return self.real_exec def match(self, userargs): """Only check that the first argument (command) matches exec_path""" if (os.path.basename(self.exec_path) == userargs[0]): return True return False def get_command(self, userargs, exec_dirs=[]): """Returns command to execute (with sudo -u if run_as != root).""" to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path if (self.run_as != 'root'): # Used to run commands at lesser privileges return ['sudo', '-u', self.run_as, to_exec] + userargs[1:] return [to_exec] + userargs[1:] def get_environment(self, userargs): """Returns specific environment to set, None if none""" return None class RegExpFilter(CommandFilter): """Command filter doing regexp matching for every argument""" def match(self, userargs): # Early skip if command or number of args don't match if (len(self.args) != len(userargs)): # DENY: argument numbers don't match return False # Compare each arg (anchoring pattern explicitly at end of string) for (pattern, arg) in zip(self.args, userargs): try: if not re.match(pattern + '$', arg): break except re.error: # DENY: Badly-formed filter return False else: # ALLOW: All arguments matched return True # DENY: Some arguments did not match return False class PathFilter(CommandFilter): """Command filter checking that path arguments are within given dirs One can specify the following constraints for command arguments: 1) pass - pass an argument as is to the resulting command 2) some_str - check if an argument is equal to the given string 3) abs path - check if a path argument is within the given base dir A typical rootwrapper filter entry looks like this: # cmdname: filter name, raw command, user, arg_i_constraint [, ...] chown: PathFilter, /bin/chown, root, nova, /var/lib/images """ def match(self, userargs): command, arguments = userargs[0], userargs[1:] equal_args_num = len(self.args) == len(arguments) exec_is_valid = super(PathFilter, self).match(userargs) args_equal_or_pass = all( arg == 'pass' or arg == value for arg, value in zip(self.args, arguments) if not os.path.isabs(arg) # arguments not specifying abs paths ) paths_are_within_base_dirs = all( os.path.commonprefix([arg, os.path.realpath(value)]) == arg for arg, value in zip(self.args, arguments) if os.path.isabs(arg) # arguments specifying abs paths ) return (equal_args_num and exec_is_valid and args_equal_or_pass and paths_are_within_base_dirs) def get_command(self, userargs, exec_dirs=[]): command, arguments = userargs[0], userargs[1:] # convert path values to canonical ones; copy other args as is args = [os.path.realpath(value) if os.path.isabs(arg) else value for arg, value in zip(self.args, arguments)] return super(PathFilter, self).get_command([command] + args, exec_dirs) class DnsmasqFilter(CommandFilter): """Specific filter for the dnsmasq call (which includes env)""" CONFIG_FILE_ARG = 'CONFIG_FILE' def match(self, userargs): if (userargs[0] == 'env' and userargs[1].startswith(self.CONFIG_FILE_ARG) and userargs[2].startswith('NETWORK_ID=') and userargs[3] == 'dnsmasq'): return True return False def get_command(self, userargs, exec_dirs=[]): to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path dnsmasq_pos = userargs.index('dnsmasq') return [to_exec] + userargs[dnsmasq_pos + 1:] def get_environment(self, userargs): env = os.environ.copy() env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1] env['NETWORK_ID'] = userargs[2].split('=')[-1] return env class DeprecatedDnsmasqFilter(DnsmasqFilter): """Variant of dnsmasq filter to support old-style FLAGFILE""" CONFIG_FILE_ARG = 'FLAGFILE' class KillFilter(CommandFilter): """Specific filter for the kill calls. 1st argument is the user to run /bin/kill under 2nd argument is the location of the affected executable Subsequent arguments list the accepted signals (if any) This filter relies on /proc to accurately determine affected executable, so it will only work on procfs-capable systems (not OSX). """ def __init__(self, *args): super(KillFilter, self).__init__("/bin/kill", *args) def match(self, userargs): if userargs[0] != "kill": return False args = list(userargs) if len(args) == 3: # A specific signal is requested signal = args.pop(1) if signal not in self.args[1:]: # Requested signal not in accepted list return False else: if len(args) != 2: # Incorrect number of arguments return False if len(self.args) > 1: # No signal requested, but filter requires specific signal return False try: command = os.readlink("/proc/%d/exe" % int(args[1])) # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on # the end if an executable is updated or deleted if command.endswith(" (deleted)"): command = command[:command.rindex(" ")] if command != self.args[0]: # Affected executable does not match return False except (ValueError, OSError): # Incorrect PID return False return True class ReadFileFilter(CommandFilter): """Specific filter for the utils.read_file_as_root call""" def __init__(self, file_path, *args): self.file_path = file_path super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) def match(self, userargs): if userargs[0] != 'cat': return False if userargs[1] != self.file_path: return False if len(userargs) != 2: return False return True manila-2013.2.dev175.gbf1a399/manila/openstack/common/network_utils.py0000664000175000017500000000415512301410454025520 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Network-related utilities and helper functions. """ import logging LOG = logging.getLogger(__name__) def parse_host_port(address, default_port=None): """ Interpret a string as a host:port pair. An IPv6 address MUST be escaped if accompanied by a port, because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 means both [2001:db8:85a3::8a2e:370:7334] and [2001:db8:85a3::8a2e:370]:7334. >>> parse_host_port('server01:80') ('server01', 80) >>> parse_host_port('server01') ('server01', None) >>> parse_host_port('server01', default_port=1234) ('server01', 1234) >>> parse_host_port('[::1]:80') ('::1', 80) >>> parse_host_port('[::1]') ('::1', None) >>> parse_host_port('[::1]', default_port=1234) ('::1', 1234) >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) ('2001:db8:85a3::8a2e:370:7334', 1234) """ if address[0] == '[': # Escaped ipv6 _host, _port = address[1:].split(']') host = _host if ':' in _port: port = _port.split(':')[1] else: port = default_port else: if address.count(':') == 1: host, port = address.split(':') else: # 0 means ipv4, >1 means ipv6. # We prohibit unescaped ipv6 addresses with port. host = address port = default_port return (host, None if port is None else int(port)) manila-2013.2.dev175.gbf1a399/manila/openstack/common/timeutils.py0000664000175000017500000001264712301410454024633 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Time related utilities and helper functions. """ import calendar import datetime import iso8601 # ISO 8601 extended time format with microseconds _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' st += ('Z' if tz == 'UTC' else tz) return st def parse_isotime(timestr): """Parse time from ISO 8601 format""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: raise ValueError(e.message) except TypeError as e: raise ValueError(e.message) def strtime(at=None, fmt=PERFECT_TIME_FORMAT): """Returns formatted utcnow.""" if not at: at = utcnow() return at.strftime(fmt) def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): """Turn a formatted time back into a datetime.""" return datetime.datetime.strptime(timestr, fmt) def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC naive object""" offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset def is_older_than(before, seconds): """Return True if before is older than seconds.""" if isinstance(before, basestring): before = parse_strtime(before).replace(tzinfo=None) return utcnow() - before > datetime.timedelta(seconds=seconds) def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" if isinstance(after, basestring): after = parse_strtime(after).replace(tzinfo=None) return after - utcnow() > datetime.timedelta(seconds=seconds) def utcnow_ts(): """Timestamp version of our utcnow function.""" return calendar.timegm(utcnow().timetuple()) def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: try: return utcnow.override_time.pop(0) except AttributeError: return utcnow.override_time return datetime.datetime.utcnow() def iso8601_from_timestamp(timestamp): """Returns a iso8601 formated date from timestamp""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) utcnow.override_time = None def set_time_override(override_time=datetime.datetime.utcnow()): """ Override utils.utcnow to return a constant time or a list thereof, one at a time. """ utcnow.override_time = override_time def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" assert(not utcnow.override_time is None) try: for dt in utcnow.override_time: dt += timedelta except TypeError: utcnow.override_time += timedelta def advance_time_seconds(seconds): """Advance overridden time by seconds.""" advance_time_delta(datetime.timedelta(0, seconds)) def clear_time_override(): """Remove the overridden time.""" utcnow.override_time = None def marshall_now(now=None): """Make an rpc-safe datetime with microseconds. Note: tzinfo is stripped, but not required for relative times.""" if not now: now = utcnow() return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond) def unmarshall_time(tyme): """Unmarshall a datetime dict.""" return datetime.datetime(day=tyme['day'], month=tyme['month'], year=tyme['year'], hour=tyme['hour'], minute=tyme['minute'], second=tyme['second'], microsecond=tyme['microsecond']) def delta_seconds(before, after): """ Compute the difference in seconds between two date, time, or datetime objects (as a float, to microsecond resolution). """ delta = after - before try: return delta.total_seconds() except AttributeError: return ((delta.days * 24 * 3600) + delta.seconds + float(delta.microseconds) / (10 ** 6)) def is_soon(dt, window): """ Determines if time is going to happen in the next window seconds. :params dt: the time :params window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ soon = (utcnow() + datetime.timedelta(seconds=window)) return normalize_time(dt) <= soon manila-2013.2.dev175.gbf1a399/manila/openstack/common/excutils.py0000664000175000017500000000337712301410454024454 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception related utilities. """ import contextlib import logging import sys import traceback from manila.openstack.common.gettextutils import _ @contextlib.contextmanager def save_and_reraise_exception(): """Save current exception, run some code and then re-raise. In some cases the exception context can be cleared, resulting in None being attempted to be re-raised after an exception handler is run. This can happen when eventlet switches greenthreads or when running an exception handler, code raises and catches an exception. In both cases the exception context will be cleared. To work around this, we save the exception state, run handler code, and then re-raise the original exception. If another exception occurs, the saved exception is logged and the new exception is re-raised. """ type_, value, tb = sys.exc_info() try: yield except Exception: logging.error(_('Original exception being dropped: %s'), traceback.format_exception(type_, value, tb)) raise raise type_, value, tb manila-2013.2.dev175.gbf1a399/manila/openstack/common/__init__.py0000664000175000017500000000121612301410454024341 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/openstack/common/processutils.py0000664000175000017500000001555012301410454025347 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ import os import random import shlex import signal from eventlet.green import subprocess from eventlet import greenthread from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) class UnknownArgumentError(Exception): def __init__(self, message=None): super(UnknownArgumentError, self).__init__(message) class ProcessExecutionError(Exception): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = "Unexpected error while running command." if exit_code is None: exit_code = '-' message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % (description, cmd, exit_code, stdout, stderr)) super(ProcessExecutionError, self).__init__(message) class NoRootWrapSpecified(Exception): def __init__(self, message=None): super(NoRootWrapSpecified, self).__init__(message) def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def execute(*cmd, **kwargs): """ Helper method to shell out and execute a command through subprocess with optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param process_input: Send to opened process. :type proces_input: string :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless program exits with one of these code. :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean :param attempts: How many times to retry cmd. :type attempts: int :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean :param root_helper: command to prefix to commands called with run_as_root=True :type root_helper: string :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments :raises: :class:`ProcessExecutionError` """ process_input = kwargs.pop('process_input', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if len(kwargs): raise UnknownArgumentError(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) if run_as_root and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=('Command requested root, but did not specify a root ' 'helper.')) cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) while attempts > 0: attempts -= 1 try: LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': preexec_fn = None close_fds = False else: preexec_fn = _subprocess_setup close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, shell=shell) result = None if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 if _returncode: LOG.debug(_('Result was %s') % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result raise ProcessExecutionError(exit_code=_returncode, stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) return result except ProcessExecutionError: if not attempts: raise else: LOG.debug(_('%r failed. Retrying.'), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/0000775000175000017500000000000012301410516024046 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/rpc_notifier.py0000664000175000017500000000323512301410454027107 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from manila.openstack.common import context as req_context from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging from manila.openstack.common import rpc LOG = logging.getLogger(__name__) notification_topic_opt = cfg.ListOpt( 'notification_topics', default=['notifications', ], help='AMQP topic used for openstack notifications') CONF = cfg.CONF CONF.register_opt(notification_topic_opt) def notify(context, message): """Sends a notification via RPC""" if not context: context = req_context.get_admin_context() priority = message.get('priority', CONF.default_notification_level) priority = priority.lower() for topic in CONF.notification_topics: topic = '%s.%s' % (topic, priority) try: rpc.notify(context, topic, message) except Exception: LOG.exception(_("Could not send notification to %(topic)s. " "Payload=%(message)s"), locals()) manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/no_op_notifier.py0000664000175000017500000000135412301410454027435 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def notify(_context, message): """Notifies the recipient of the desired event given the model""" pass manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/__init__.py0000664000175000017500000000117412301410454026163 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/rabbit_notifier.py0000664000175000017500000000207712301410454027571 0ustar chuckchuck00000000000000# Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging from manila.openstack.common.notifier import rpc_notifier LOG = logging.getLogger(__name__) def notify(context, message): """Deprecated in Grizzly. Please use rpc_notifier instead.""" LOG.deprecated(_("The rabbit_notifier is now deprecated." " Please use rpc_notifier instead.")) rpc_notifier.notify(context, message) manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/rpc_notifier2.py0000664000175000017500000000357412301410454027177 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. '''messaging based notification driver, with message envelopes''' from oslo.config import cfg from manila.openstack.common import context as req_context from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging from manila.openstack.common import rpc LOG = logging.getLogger(__name__) notification_topic_opt = cfg.ListOpt( 'topics', default=['notifications', ], help='AMQP topic(s) used for openstack notifications') opt_group = cfg.OptGroup(name='rpc_notifier2', title='Options for rpc_notifier2') CONF = cfg.CONF CONF.register_group(opt_group) CONF.register_opt(notification_topic_opt, opt_group) def notify(context, message): """Sends a notification via RPC""" if not context: context = req_context.get_admin_context() priority = message.get('priority', CONF.default_notification_level) priority = priority.lower() for topic in CONF.rpc_notifier2.topics: topic = '%s.%s' % (topic, priority) try: rpc.notify(context, topic, message, envelope=True) except Exception: LOG.exception(_("Could not send notification to %(topic)s. " "Payload=%(message)s"), locals()) manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/test_notifier.py0000664000175000017500000000143312301410454027300 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. NOTIFICATIONS = [] def notify(_context, message): """Test notifier, stores notifications in memory for unittests.""" NOTIFICATIONS.append(message) manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/log_notifier.py0000664000175000017500000000234312301410454027103 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from manila.openstack.common import jsonutils from manila.openstack.common import log as logging CONF = cfg.CONF def notify(_context, message): """Notifies the recipient of the desired event given the model. Log notifications using openstack's default logging system""" priority = message.get('priority', CONF.default_notification_level) priority = priority.lower() logger = logging.getLogger( 'manila.openstack.common.notification.%s' % message['event_type']) getattr(logger, priority)(jsonutils.dumps(message)) manila-2013.2.dev175.gbf1a399/manila/openstack/common/notifier/api.py0000664000175000017500000001314012301410454025171 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo.config import cfg from manila.openstack.common import context from manila.openstack.common.gettextutils import _ from manila.openstack.common import importutils from manila.openstack.common import jsonutils from manila.openstack.common import log as logging from manila.openstack.common import timeutils LOG = logging.getLogger(__name__) notifier_opts = [ cfg.MultiStrOpt('notification_driver', default=[], help='Driver or drivers to handle sending notifications'), cfg.StrOpt('default_notification_level', default='INFO', help='Default notification level for outgoing notifications'), cfg.StrOpt('default_publisher_id', default='$host', help='Default publisher_id for outgoing notifications'), ] CONF = cfg.CONF CONF.register_opts(notifier_opts) WARN = 'WARN' INFO = 'INFO' ERROR = 'ERROR' CRITICAL = 'CRITICAL' DEBUG = 'DEBUG' log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) class BadPriorityException(Exception): pass def notify_decorator(name, fn): """ decorator for notify which is used from utils.monkey_patch() :param name: name of the function :param function: - object of the function :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): body = {} body['args'] = [] body['kwarg'] = {} for arg in args: body['args'].append(arg) for key in kwarg: body['kwarg'][key] = kwarg[key] ctxt = context.get_context_from_function_and_args(fn, args, kwarg) notify(ctxt, CONF.default_publisher_id, name, CONF.default_notification_level, body) return fn(*args, **kwarg) return wrapped_func def publisher_id(service, host=None): if not host: host = CONF.host return "%s.%s" % (service, host) def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload)) _drivers = None def _get_drivers(): """Instantiate, cache, and return drivers based on the CONF.""" global _drivers if _drivers is None: _drivers = {} for notification_driver in CONF.notification_driver: add_driver(notification_driver) return _drivers.values() def add_driver(notification_driver): """Add a notification driver at runtime.""" # Make sure the driver list is initialized. _get_drivers() if isinstance(notification_driver, basestring): # Load and add try: driver = importutils.import_module(notification_driver) _drivers[notification_driver] = driver except ImportError: LOG.exception(_("Failed to load notifier %s. " "These notifications will not be sent.") % notification_driver) else: # Driver is already loaded; just add the object. _drivers[notification_driver] = notification_driver def _reset_drivers(): """Used by unit tests to reset the drivers.""" global _drivers _drivers = None manila-2013.2.dev175.gbf1a399/manila/openstack/common/loopingcall.py0000664000175000017500000001106512301410454025110 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from eventlet import event from eventlet import greenthread from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging from manila.openstack.common import timeutils LOG = logging.getLogger(__name__) class LoopingCallDone(Exception): """Exception to break out and stop a LoopingCall. The poll-function passed to LoopingCall can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCall.wait() """ def __init__(self, retvalue=True): """:param retvalue: Value that LoopingCall.wait() should return.""" self.retvalue = retvalue class LoopingCallBase(object): def __init__(self, f=None, *args, **kw): self.args = args self.kw = kw self.f = f self._running = False self.done = None def stop(self): self._running = False def wait(self): return self.done.wait() class FixedIntervalLoopingCall(LoopingCallBase): """A fixed interval looping call.""" def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone, e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done # TODO(mikal): this class name is deprecated in Havana and should be removed # in the I release LoopingCall = FixedIntervalLoopingCall class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. The function called should return how long to sleep for before being called again. """ def start(self, initial_delay=None, periodic_interval_max=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug(_('Dynamic looping call sleeping for %.02f ' 'seconds'), idle) greenthread.sleep(idle) except LoopingCallDone, e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn(_inner) return self.done manila-2013.2.dev175.gbf1a399/manila/openstack/common/strutils.py0000664000175000017500000001164012301410454024475 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ import sys from manila.openstack.common.gettextutils import _ TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') def int_from_bool_as_string(subject): """ Interpret a string as a boolean and return either 1 or 0. Any string value in: ('True', 'true', 'On', 'on', '1') is interpreted as a boolean True. Useful for JSON-decoded stuff and config file parsing """ return bool_from_string(subject) and 1 or 0 def bool_from_string(subject, strict=False): """ Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when `strict=False`, anything else is considered False. Useful for JSON-decoded stuff and config file parsing. If `strict=True`, unrecognized values, including None, will raise a ValueError which is useful when parsing values passed in from an API call. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ if not isinstance(subject, basestring): subject = str(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = _("Unrecognized value '%(val)s', acceptable values are:" " %(acceptable)s") % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return False def safe_decode(text, incoming=None, errors='strict'): """ Decodes incoming str using `incoming` if they're not already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. :raises TypeError: If text is not an isntance of basestring """ if not isinstance(text, basestring): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, unicode): return text if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) try: return text.decode(incoming, errors) except UnicodeDecodeError: # Note(flaper87) If we get here, it means that # sys.stdin.encoding / sys.getdefaultencoding # didn't return a suitable encoding to decode # text. This happens mostly when global LANG # var is not set correctly and there's no # default encoding. In this case, most likely # python will use ASCII or ANSI encoders as # default encodings but they won't be capable # of decoding non-ASCII characters. # # Also, UTF-8 is being used since it's an ASCII # extension. return text.decode('utf-8', errors) def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'): """ Encodes incoming str/unicode using `encoding`. If incoming is not specified, text is expected to be encoded with current python's default encoding. (`sys.getdefaultencoding`) :param incoming: Text's current encoding :param encoding: Expected encoding for text (Default UTF-8) :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a bytestring `encoding` encoded representation of it. :raises TypeError: If text is not an isntance of basestring """ if not isinstance(text, basestring): raise TypeError("%s can't be encoded" % type(text)) if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) if isinstance(text, unicode): return text.encode(encoding, errors) elif text and encoding != incoming: # Decode text before encoding it with `encoding` text = safe_decode(text, incoming, errors) return text.encode(encoding, errors) return text manila-2013.2.dev175.gbf1a399/manila/openstack/common/log.py0000664000175000017500000004541112301410454023370 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Openstack logging handler. This module adds to logging functionality by adding the option to specify a context object when calling the various log methods. If the context object is not specified, default formatting is used. Additionally, an instance uuid may be passed as part of the log message, which is intended to make it easier for admins to find messages related to a specific instance. It also allows setting of formatting information through conf. """ import ConfigParser import cStringIO import inspect import itertools import logging import logging.config import logging.handlers import os import stat import sys import traceback from oslo.config import cfg from manila.openstack.common.gettextutils import _ from manila.openstack.common import jsonutils from manila.openstack.common import local from manila.openstack.common import notifier _DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" common_cli_opts = [ cfg.BoolOpt('debug', short='d', default=False, help='Print debugging output (set logging level to ' 'DEBUG instead of default WARNING level).'), cfg.BoolOpt('verbose', short='v', default=False, help='Print more verbose output (set logging level to ' 'INFO instead of default WARNING level).'), ] logging_cli_opts = [ cfg.StrOpt('log-config', metavar='PATH', help='If this option is specified, the logging configuration ' 'file specified is used and overrides any other logging ' 'options specified. Please see the Python logging module ' 'documentation for details on logging configuration ' 'files.'), cfg.StrOpt('log-format', default=_DEFAULT_LOG_FORMAT, metavar='FORMAT', help='A logging.Formatter log message format string which may ' 'use any of the available logging.LogRecord attributes. ' 'Default: %(default)s'), cfg.StrOpt('log-date-format', default=_DEFAULT_LOG_DATE_FORMAT, metavar='DATE_FORMAT', help='Format string for %%(asctime)s in log records. ' 'Default: %(default)s'), cfg.StrOpt('log-file', metavar='PATH', deprecated_name='logfile', help='(Optional) Name of log file to output to. ' 'If no default is set, logging will go to stdout.'), cfg.StrOpt('log-dir', deprecated_name='logdir', help='(Optional) The base directory used for relative ' '--log-file paths'), cfg.BoolOpt('use-syslog', default=False, help='Use syslog for logging.'), cfg.StrOpt('syslog-log-facility', default='LOG_USER', help='syslog facility to receive log lines') ] generic_log_opts = [ cfg.BoolOpt('use_stderr', default=True, help='Log output to standard error'), cfg.StrOpt('logfile_mode', default='0644', help='Default file mode used when creating log files'), ] log_opts = [ cfg.StrOpt('logging_context_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' '%(name)s [%(request_id)s %(user)s %(tenant)s] ' '%(instance)s%(message)s', help='format string to use for log messages with context'), cfg.StrOpt('logging_default_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' '%(name)s [-] %(instance)s%(message)s', help='format string to use for log messages without context'), cfg.StrOpt('logging_debug_format_suffix', default='%(funcName)s %(pathname)s:%(lineno)d', help='data to append to log format when level is DEBUG'), cfg.StrOpt('logging_exception_prefix', default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' '%(instance)s', help='prefix each line of exception output with this format'), cfg.ListOpt('default_log_levels', default=[ 'amqplib=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'suds=INFO', 'keystone=INFO', 'eventlet.wsgi.server=WARN' ], help='list of logger=LEVEL pairs'), cfg.BoolOpt('publish_errors', default=False, help='publish error events'), cfg.BoolOpt('fatal_deprecations', default=False, help='make deprecations fatal'), # NOTE(mikal): there are two options here because sometimes we are handed # a full instance (and could include more information), and other times we # are just handed a UUID for the instance. cfg.StrOpt('instance_format', default='[instance: %(uuid)s] ', help='If an instance is passed with the log message, format ' 'it like this'), cfg.StrOpt('instance_uuid_format', default='[instance: %(uuid)s] ', help='If an instance UUID is passed with the log message, ' 'format it like this'), ] CONF = cfg.CONF CONF.register_cli_opts(common_cli_opts) CONF.register_cli_opts(logging_cli_opts) CONF.register_opts(generic_log_opts) CONF.register_opts(log_opts) # our new audit level # NOTE(jkoelker) Since we synthesized an audit level, make the logging # module aware of it so it acts like other levels. logging.AUDIT = logging.INFO + 1 logging.addLevelName(logging.AUDIT, 'AUDIT') try: NullHandler = logging.NullHandler except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 class NullHandler(logging.Handler): def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None def _dictify_context(context): if context is None: return None if not isinstance(context, dict) and getattr(context, 'to_dict', None): context = context.to_dict() return context def _get_binary_name(): return os.path.basename(inspect.stack()[-1][1]) def _get_log_file_path(binary=None): logfile = CONF.log_file logdir = CONF.log_dir if logfile and not logdir: return logfile if logfile and logdir: return os.path.join(logdir, logfile) if logdir: binary = binary or _get_binary_name() return '%s.log' % (os.path.join(logdir, binary),) class ContextAdapter(logging.LoggerAdapter): warn = logging.LoggerAdapter.warning def __init__(self, logger, project_name, version_string): self.logger = logger self.project = project_name self.version = version_string def audit(self, msg, *args, **kwargs): self.log(logging.AUDIT, msg, *args, **kwargs) def deprecated(self, msg, *args, **kwargs): stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) else: self.warn(stdmsg, *args, **kwargs) def process(self, msg, kwargs): if 'extra' not in kwargs: kwargs['extra'] = {} extra = kwargs['extra'] context = kwargs.pop('context', None) if not context: context = getattr(local.store, 'context', None) if context: extra.update(_dictify_context(context)) instance = kwargs.pop('instance', None) instance_extra = '' if instance: instance_extra = CONF.instance_format % instance else: instance_uuid = kwargs.pop('instance_uuid', None) if instance_uuid: instance_extra = (CONF.instance_uuid_format % {'uuid': instance_uuid}) extra.update({'instance': instance_extra}) extra.update({"project": self.project}) extra.update({"version": self.version}) extra['extra'] = extra.copy() return msg, kwargs class JSONFormatter(logging.Formatter): def __init__(self, fmt=None, datefmt=None): # NOTE(jkoelker) we ignore the fmt argument, but its still there # since logging.config.fileConfig passes it. self.datefmt = datefmt def formatException(self, ei, strip_newlines=True): lines = traceback.format_exception(*ei) if strip_newlines: lines = [itertools.ifilter( lambda x: x, line.rstrip().splitlines()) for line in lines] lines = list(itertools.chain(*lines)) return lines def format(self, record): message = {'message': record.getMessage(), 'asctime': self.formatTime(record, self.datefmt), 'name': record.name, 'msg': record.msg, 'args': record.args, 'levelname': record.levelname, 'levelno': record.levelno, 'pathname': record.pathname, 'filename': record.filename, 'module': record.module, 'lineno': record.lineno, 'funcname': record.funcName, 'created': record.created, 'msecs': record.msecs, 'relative_created': record.relativeCreated, 'thread': record.thread, 'thread_name': record.threadName, 'process_name': record.processName, 'process': record.process, 'traceback': None} if hasattr(record, 'extra'): message['extra'] = record.extra if record.exc_info: message['traceback'] = self.formatException(record.exc_info) return jsonutils.dumps(message) class PublishErrorsHandler(logging.Handler): def emit(self, record): if ('manila.openstack.common.notifier.log_notifier' in CONF.notification_driver): return notifier.api.notify(None, 'error.publisher', 'error_notification', notifier.api.ERROR, dict(error=record.msg)) def _create_logging_excepthook(product_name): def logging_excepthook(type, value, tb): extra = {} if CONF.verbose: extra['exc_info'] = (type, value, tb) getLogger(product_name).critical(str(value), **extra) return logging_excepthook class LogConfigError(Exception): message = _('Error loading logging config %(log_config)s: %(err_msg)s') def __init__(self, log_config, err_msg): self.log_config = log_config self.err_msg = err_msg def __str__(self): return self.message % dict(log_config=self.log_config, err_msg=self.err_msg) def _load_log_config(log_config): try: logging.config.fileConfig(log_config) except ConfigParser.Error, exc: raise LogConfigError(log_config, str(exc)) def setup(product_name): """Setup logging.""" if CONF.log_config: _load_log_config(CONF.log_config) else: _setup_logging_from_conf() sys.excepthook = _create_logging_excepthook(product_name) def set_defaults(logging_context_format_string): cfg.set_defaults(log_opts, logging_context_format_string= logging_context_format_string) def _find_facility_from_conf(): facility_names = logging.handlers.SysLogHandler.facility_names facility = getattr(logging.handlers.SysLogHandler, CONF.syslog_log_facility, None) if facility is None and CONF.syslog_log_facility in facility_names: facility = facility_names.get(CONF.syslog_log_facility) if facility is None: valid_facilities = facility_names.keys() consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] valid_facilities.extend(consts) raise TypeError(_('syslog facility must be one of: %s') % ', '.join("'%s'" % fac for fac in valid_facilities)) return facility def _setup_logging_from_conf(): log_root = getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) if CONF.use_syslog: facility = _find_facility_from_conf() syslog = logging.handlers.SysLogHandler(address='/dev/log', facility=facility) log_root.addHandler(syslog) logpath = _get_log_file_path() if logpath: filelog = logging.handlers.WatchedFileHandler(logpath) log_root.addHandler(filelog) mode = int(CONF.logfile_mode, 8) st = os.stat(logpath) if st.st_mode != (stat.S_IFREG | mode): os.chmod(logpath, mode) if CONF.use_stderr: streamlog = ColorHandler() log_root.addHandler(streamlog) elif not CONF.log_file: # pass sys.stdout as a positional argument # python2.6 calls the argument strm, in 2.7 it's stream streamlog = logging.StreamHandler(sys.stdout) log_root.addHandler(streamlog) if CONF.publish_errors: log_root.addHandler(PublishErrorsHandler(logging.ERROR)) for handler in log_root.handlers: datefmt = CONF.log_date_format if CONF.log_format: handler.setFormatter(logging.Formatter(fmt=CONF.log_format, datefmt=datefmt)) else: handler.setFormatter(LegacyFormatter(datefmt=datefmt)) if CONF.debug: log_root.setLevel(logging.DEBUG) elif CONF.verbose: log_root.setLevel(logging.INFO) else: log_root.setLevel(logging.WARNING) for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') level = logging.getLevelName(level_name) logger = logging.getLogger(mod) logger.setLevel(level) _loggers = {} def getLogger(name='unknown', version='unknown'): if name not in _loggers: _loggers[name] = ContextAdapter(logging.getLogger(name), name, version) return _loggers[name] class WritableLogger(object): """A thin wrapper that responds to `write` and logs.""" def __init__(self, logger, level=logging.INFO): self.logger = logger self.level = level def write(self, msg): self.logger.log(self.level, msg) class LegacyFormatter(logging.Formatter): """A context.RequestContext aware formatter configured through flags. The flags used to set format strings are: logging_context_format_string and logging_default_format_string. You can also specify logging_debug_format_suffix to append extra formatting if the log level is debug. For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter """ def format(self, record): """Uses contextstring if request_id is set, otherwise default.""" # NOTE(sdague): default the fancier formating params # to an empty string so we don't throw an exception if # they get used for key in ('instance', 'color'): if key not in record.__dict__: record.__dict__[key] = '' if record.__dict__.get('request_id', None): self._fmt = CONF.logging_context_format_string else: self._fmt = CONF.logging_default_format_string if (record.levelno == logging.DEBUG and CONF.logging_debug_format_suffix): self._fmt += " " + CONF.logging_debug_format_suffix # Cache this on the record, Logger will respect our formated copy if record.exc_info: record.exc_text = self.formatException(record.exc_info, record) return logging.Formatter.format(self, record) def formatException(self, exc_info, record=None): """Format exception output with CONF.logging_exception_prefix.""" if not record: return logging.Formatter.formatException(self, exc_info) stringbuffer = cStringIO.StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, stringbuffer) lines = stringbuffer.getvalue().split('\n') stringbuffer.close() if CONF.logging_exception_prefix.find('%(asctime)') != -1: record.asctime = self.formatTime(record, self.datefmt) formatted_lines = [] for line in lines: pl = CONF.logging_exception_prefix % record.__dict__ fl = '%s%s' % (pl, line) formatted_lines.append(fl) return '\n'.join(formatted_lines) class ColorHandler(logging.StreamHandler): LEVEL_COLORS = { logging.DEBUG: '\033[00;32m', # GREEN logging.INFO: '\033[00;36m', # CYAN logging.AUDIT: '\033[01;36m', # BOLD CYAN logging.WARN: '\033[01;33m', # BOLD YELLOW logging.ERROR: '\033[01;31m', # BOLD RED logging.CRITICAL: '\033[01;31m', # BOLD RED } def format(self, record): record.color = self.LEVEL_COLORS[record.levelno] return logging.StreamHandler.format(self, record) class DeprecatedConfig(Exception): message = _("Fatal call to deprecated config: %(msg)s") def __init__(self, msg): super(Exception, self).__init__(self.message % dict(msg=msg)) manila-2013.2.dev175.gbf1a399/manila/openstack/common/gettextutils.py0000664000175000017500000000304112301410454025345 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ gettext for openstack-common modules. Usual usage in an openstack.common module: from manila.openstack.common.gettextutils import _ """ import gettext import os _localedir = os.environ.get('manila'.upper() + '_LOCALEDIR') _t = gettext.translation('manila', localedir=_localedir, fallback=True) def _(msg): return _t.ugettext(msg) def install(domain): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). """ gettext.install(domain, localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), unicode=True) manila-2013.2.dev175.gbf1a399/manila/openstack/common/importutils.py0000664000175000017500000000421112301410454025173 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Import related utilities and helper functions. """ import sys import traceback def import_class(import_str): """Returns a class from a string including module and class""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ValueError, AttributeError): raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) def import_object(import_str, *args, **kwargs): """Import a class and return an instance of it.""" return import_class(import_str)(*args, **kwargs) def import_object_ns(name_space, import_str, *args, **kwargs): """ Import a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ import_value = "%s.%s" % (name_space, import_str) try: return import_class(import_value)(*args, **kwargs) except ImportError: return import_class(import_str)(*args, **kwargs) def import_module(import_str): """Import a module.""" __import__(import_str) return sys.modules[import_str] def try_import(import_str, default=None): """Try to import a module and if it fails return default.""" try: return import_module(import_str) except ImportError: return default manila-2013.2.dev175.gbf1a399/manila/openstack/common/policy.py0000664000175000017500000002233012301410454024101 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common Policy Engine Implementation""" import logging import urllib import urllib2 from manila.openstack.common.gettextutils import _ from manila.openstack.common import jsonutils LOG = logging.getLogger(__name__) _BRAIN = None def set_brain(brain): """Set the brain used by enforce(). Defaults use Brain() if not set. """ global _BRAIN _BRAIN = brain def reset(): """Clear the brain used by enforce().""" global _BRAIN _BRAIN = None def enforce(match_list, target_dict, credentials_dict, exc=None, *args, **kwargs): """Enforces authorization of some rules against credentials. :param match_list: nested tuples of data to match against The basic brain supports three types of match lists: 1) rules looks like: ``('rule:compute:get_instance',)`` Retrieves the named rule from the rules dict and recursively checks against the contents of the rule. 2) roles looks like: ``('role:compute:admin',)`` Matches if the specified role is in credentials_dict['roles']. 3) generic looks like: ``('tenant_id:%(tenant_id)s',)`` Substitutes values from the target dict into the match using the % operator and matches them against the creds dict. Combining rules: The brain returns True if any of the outer tuple of rules match and also True if all of the inner tuples match. You can use this to perform simple boolean logic. For example, the following rule would return True if the creds contain the role 'admin' OR the if the tenant_id matches the target dict AND the the creds contains the role 'compute_sysadmin': :: { "rule:combined": ( 'role:admin', ('tenant_id:%(tenant_id)s', 'role:compute_sysadmin') ) } Note that rule and role are reserved words in the credentials match, so you can't match against properties with those names. Custom brains may also add new reserved words. For example, the HttpBrain adds http as a reserved word. :param target_dict: dict of object properties Target dicts contain as much information as we can about the object being operated on. :param credentials_dict: dict of actor properties Credentials dicts contain as much information as we can about the user performing the action. :param exc: exception to raise Class of the exception to raise if the check fails. Any remaining arguments passed to enforce() (both positional and keyword arguments) will be passed to the exception class. If exc is not provided, returns False. :return: True if the policy allows the action :return: False if the policy does not allow the action and exc is not set """ global _BRAIN if not _BRAIN: _BRAIN = Brain() if not _BRAIN.check(match_list, target_dict, credentials_dict): if exc: raise exc(*args, **kwargs) return False return True class Brain(object): """Implements policy checking.""" _checks = {} @classmethod def _register(cls, name, func): cls._checks[name] = func @classmethod def load_json(cls, data, default_rule=None): """Init a brain using json instead of a rules dictionary.""" rules_dict = jsonutils.loads(data) return cls(rules=rules_dict, default_rule=default_rule) def __init__(self, rules=None, default_rule=None): if self.__class__ != Brain: LOG.warning(_("Inheritance-based rules are deprecated; use " "the default brain instead of %s.") % self.__class__.__name__) self.rules = rules or {} self.default_rule = default_rule def add_rule(self, key, match): self.rules[key] = match def _check(self, match, target_dict, cred_dict): try: match_kind, match_value = match.split(':', 1) except Exception: LOG.exception(_("Failed to understand rule %(match)r") % locals()) # If the rule is invalid, fail closed return False func = None try: old_func = getattr(self, '_check_%s' % match_kind) except AttributeError: func = self._checks.get(match_kind, self._checks.get(None, None)) else: LOG.warning(_("Inheritance-based rules are deprecated; update " "_check_%s") % match_kind) func = lambda brain, kind, value, target, cred: old_func(value, target, cred) if not func: LOG.error(_("No handler for matches of kind %s") % match_kind) # Fail closed return False return func(self, match_kind, match_value, target_dict, cred_dict) def check(self, match_list, target_dict, cred_dict): """Checks authorization of some rules against credentials. Detailed description of the check with examples in policy.enforce(). :param match_list: nested tuples of data to match against :param target_dict: dict of object properties :param credentials_dict: dict of actor properties :returns: True if the check passes """ if not match_list: return True for and_list in match_list: if isinstance(and_list, basestring): and_list = (and_list,) if all([self._check(item, target_dict, cred_dict) for item in and_list]): return True return False class HttpBrain(Brain): """A brain that can check external urls for policy. Posts json blobs for target and credentials. Note that this brain is deprecated; the http check is registered by default. """ pass def register(name, func=None): """ Register a function as a policy check. :param name: Gives the name of the check type, e.g., 'rule', 'role', etc. If name is None, a default function will be registered. :param func: If given, provides the function to register. If not given, returns a function taking one argument to specify the function to register, allowing use as a decorator. """ # Perform the actual decoration by registering the function. # Returns the function for compliance with the decorator # interface. def decorator(func): # Register the function Brain._register(name, func) return func # If the function is given, do the registration if func: return decorator(func) return decorator @register("rule") def _check_rule(brain, match_kind, match, target_dict, cred_dict): """Recursively checks credentials based on the brains rules.""" try: new_match_list = brain.rules[match] except KeyError: if brain.default_rule and match != brain.default_rule: new_match_list = ('rule:%s' % brain.default_rule,) else: return False return brain.check(new_match_list, target_dict, cred_dict) @register("role") def _check_role(brain, match_kind, match, target_dict, cred_dict): """Check that there is a matching role in the cred dict.""" return match.lower() in [x.lower() for x in cred_dict['roles']] @register('http') def _check_http(brain, match_kind, match, target_dict, cred_dict): """Check http: rules by calling to a remote server. This example implementation simply verifies that the response is exactly 'True'. A custom brain using response codes could easily be implemented. """ url = 'http:' + (match % target_dict) data = {'target': jsonutils.dumps(target_dict), 'credentials': jsonutils.dumps(cred_dict)} post_data = urllib.urlencode(data) f = urllib2.urlopen(url, post_data) return f.read() == "True" @register(None) def _check_generic(brain, match_kind, match, target_dict, cred_dict): """Check an individual match. Matches look like: tenant:%(tenant_id)s role:compute:admin """ # TODO(termie): do dict inspection via dot syntax match = match % target_dict if match_kind in cred_dict: return match == unicode(cred_dict[match_kind]) return False manila-2013.2.dev175.gbf1a399/manila/openstack/common/service.py0000664000175000017500000002367612301410454024260 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import errno import os import random import signal import sys import time import eventlet import logging as std_logging from oslo.config import cfg from manila.openstack.common import eventlet_backdoor from manila.openstack.common.gettextutils import _ from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.openstack.common import threadgroup rpc = importutils.try_import('manila.openstack.common.rpc') CONF = cfg.CONF LOG = logging.getLogger(__name__) class Launcher(object): """Launch one or more services and wait for them to complete.""" def __init__(self): """Initialize the service launcher. :returns: None """ self._services = threadgroup.ThreadGroup() eventlet_backdoor.initialize_if_enabled() @staticmethod def run_service(service): """Start and wait for a service to finish. :param service: service to run and wait for. :returns: None """ service.start() service.wait() def launch_service(self, service): """Load and start the given service. :param service: The service you would like to start. :returns: None """ self._services.add_thread(self.run_service, service) def stop(self): """Stop all services which are currently running. :returns: None """ self._services.stop() def wait(self): """Waits until all services have been stopped, and then returns. :returns: None """ self._services.wait() class SignalExit(SystemExit): def __init__(self, signo, exccode=1): super(SignalExit, self).__init__(exccode) self.signo = signo class ServiceLauncher(Launcher): def _handle_signal(self, signo, frame): # Allow the process to be killed again and die from natural causes signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.signal(signal.SIGINT, signal.SIG_DFL) raise SignalExit(signo) def wait(self): signal.signal(signal.SIGTERM, self._handle_signal) signal.signal(signal.SIGINT, self._handle_signal) LOG.debug(_('Full set of CONF:')) CONF.log_opt_values(LOG, std_logging.DEBUG) status = None try: super(ServiceLauncher, self).wait() except SignalExit as exc: signame = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'}[exc.signo] LOG.info(_('Caught %s, exiting'), signame) status = exc.code except SystemExit as exc: status = exc.code finally: if rpc: rpc.cleanup() self.stop() return status class ServiceWrapper(object): def __init__(self, service, workers): self.service = service self.workers = workers self.children = set() self.forktimes = [] class ProcessLauncher(object): def __init__(self): self.children = {} self.sigcaught = None self.running = True rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') signal.signal(signal.SIGTERM, self._handle_signal) signal.signal(signal.SIGINT, self._handle_signal) def _handle_signal(self, signo, frame): self.sigcaught = signo self.running = False # Allow the process to be killed again and die from natural causes signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.signal(signal.SIGINT, signal.SIG_DFL) def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read() LOG.info(_('Parent process has died unexpectedly, exiting')) sys.exit(1) def _child_process(self, service): # Setup child signal handlers differently def _sigterm(*args): signal.signal(signal.SIGTERM, signal.SIG_DFL) raise SignalExit(signal.SIGTERM) signal.signal(signal.SIGTERM, _sigterm) # Block SIGINT and let the parent send us a SIGTERM signal.signal(signal.SIGINT, signal.SIG_IGN) # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() # Close write to ensure only parent has it open os.close(self.writepipe) # Create greenthread to watch for parent to close pipe eventlet.spawn_n(self._pipe_watcher) # Reseed random number generator random.seed() launcher = Launcher() launcher.run_service(service) def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: # Limit ourselves to one process a second (over the period of # number of workers * 1 second). This will allow workers to # start up quickly but ensure we don't fork off children that # die instantly too quickly. if time.time() - wrap.forktimes[0] < wrap.workers: LOG.info(_('Forking too fast, sleeping')) time.sleep(1) wrap.forktimes.pop(0) wrap.forktimes.append(time.time()) pid = os.fork() if pid == 0: # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. status = 0 try: self._child_process(wrap.service) except SignalExit as exc: signame = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'}[exc.signo] LOG.info(_('Caught %s, exiting'), signame) status = exc.code except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_('Unhandled exception')) status = 2 finally: wrap.service.stop() os._exit(status) LOG.info(_('Started child %d'), pid) wrap.children.add(pid) self.children[pid] = wrap return pid def launch_service(self, service, workers=1): wrap = ServiceWrapper(service, workers) LOG.info(_('Starting %d workers'), wrap.workers) while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info(_('Child %(pid)d killed by signal %(sig)d'), dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) LOG.info(_('Child %(pid)s exited with status %(code)d'), dict(pid=pid, code=code)) if pid not in self.children: LOG.warning(_('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap def wait(self): """Loop waiting on children to die and respawning as necessary""" LOG.debug(_('Full set of CONF:')) CONF.log_opt_values(LOG, std_logging.DEBUG) while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) eventlet.greenthread.sleep(.01) continue while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) if self.sigcaught: signame = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'}[self.sigcaught] LOG.info(_('Caught %s, stopping children'), signame) for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child() class Service(object): """Service object for binaries running on hosts.""" def __init__(self, threads=1000): self.tg = threadgroup.ThreadGroup(threads) def start(self): pass def stop(self): self.tg.stop() def wait(self): self.tg.wait() def launch(service, workers=None): if workers: launcher = ProcessLauncher() launcher.launch_service(service, workers=workers) else: launcher = ServiceLauncher() launcher.launch_service(service) return launcher manila-2013.2.dev175.gbf1a399/manila/openstack/common/context.py0000664000175000017500000000507312301410454024273 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Simple class that stores security context information in the web request. Projects should subclass this class if they wish to enhance the request context or provide additional information in their specific WSGI pipeline. """ import itertools import uuid def generate_request_id(): return 'req-' + str(uuid.uuid4()) class RequestContext(object): """ Stores information about the security context under which the user accesses the system, as well as additional request information. """ def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, read_only=False, show_deleted=False, request_id=None): self.auth_token = auth_token self.user = user self.tenant = tenant self.is_admin = is_admin self.read_only = read_only self.show_deleted = show_deleted if not request_id: request_id = generate_request_id() self.request_id = request_id def to_dict(self): return {'user': self.user, 'tenant': self.tenant, 'is_admin': self.is_admin, 'read_only': self.read_only, 'show_deleted': self.show_deleted, 'auth_token': self.auth_token, 'request_id': self.request_id} def get_admin_context(show_deleted="no"): context = RequestContext(None, tenant=None, is_admin=True, show_deleted=show_deleted) return context def get_context_from_function_and_args(function, args, kwargs): """Find an arg of type RequestContext and return it. This is useful in a couple of decorators where we don't know much about the function we're wrapping. """ for arg in itertools.chain(kwargs.values(), args): if isinstance(arg, RequestContext): return arg return None manila-2013.2.dev175.gbf1a399/manila/openstack/common/local.py0000664000175000017500000000332112301410454023673 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Greenthread local storage of variables using weak references""" import weakref from eventlet import corolocal class WeakLocal(corolocal.local): def __getattribute__(self, attr): rval = corolocal.local.__getattribute__(self, attr) if rval: # NOTE(mikal): this bit is confusing. What is stored is a weak # reference, not the value itself. We therefore need to lookup # the weak reference and return the inner value here. rval = rval() return rval def __setattr__(self, attr, value): value = weakref.ref(value) return corolocal.local.__setattr__(self, attr, value) # NOTE(mikal): the name "store" should be deprecated in the future store = WeakLocal() # A "weak" store uses weak references and allows an object to fall out of scope # when it falls out of scope in the code that uses the thread local storage. A # "strong" store will hold a reference to the object so that it never falls out # of scope. weak_store = WeakLocal() strong_store = corolocal.local manila-2013.2.dev175.gbf1a399/manila/compute/0000775000175000017500000000000012301410516020424 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/compute/__init__.py0000664000175000017500000000226212301410454022540 0ustar chuckchuck00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo.config.cfg import manila.openstack.common.importutils _compute_opts = [ oslo.config.cfg.StrOpt('compute_api_class', default='manila.compute.nova.API', help='The full class name of the ' 'compute API class to use'), ] oslo.config.cfg.CONF.register_opts(_compute_opts) def API(): importutils = manila.openstack.common.importutils compute_api_class = oslo.config.cfg.CONF.compute_api_class cls = importutils.import_class(compute_api_class) return cls() manila-2013.2.dev175.gbf1a399/manila/compute/nova.py0000664000175000017500000002425312301410454021750 0ustar chuckchuck00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests to Nova. """ import sys from novaclient import exceptions as nova_exception from novaclient import extension from novaclient import service_catalog from novaclient.v1_1 import client as nova_client from novaclient.v1_1.contrib import assisted_volume_snapshots from novaclient.v1_1 import servers as nova_servers from oslo.config import cfg from manila.db import base from manila import exception from manila.openstack.common import log as logging nova_opts = [ cfg.StrOpt('nova_catalog_info', default='compute:nova:publicURL', help='Info to match when looking for nova in the service ' 'catalog. Format is : separated values of the form: ' '::'), cfg.StrOpt('nova_catalog_admin_info', default='compute:nova:adminURL', help='Same as nova_catalog_info, but for admin endpoint.'), cfg.StrOpt('os_region_name', default=None, help='region name of this node'), cfg.StrOpt('nova_ca_certificates_file', default=None, help='Location of ca certicates file to use for nova client ' 'requests.'), cfg.BoolOpt('nova_api_insecure', default=False, help='Allow to perform insecure SSL requests to nova'), cfg.StrOpt('nova_admin_username', default='nova', help='Nova admin username'), cfg.StrOpt('nova_admin_password', help='Nova admin password'), cfg.StrOpt('nova_admin_tenant_name', default='service', help='Nova admin tenant name'), cfg.StrOpt('nova_admin_auth_url', default='http://localhost:5000/v2.0', help='Identity service url'), ] CONF = cfg.CONF CONF.register_opts(nova_opts) LOG = logging.getLogger(__name__) def novaclient(context): if context.is_admin and context.project_id is None: c = nova_client.Client(CONF.nova_admin_username, CONF.nova_admin_password, CONF.nova_admin_tenant_name, CONF.nova_admin_auth_url) c.authenticate() return c compat_catalog = { 'access': {'serviceCatalog': context.service_catalog or []} } sc = service_catalog.ServiceCatalog(compat_catalog) nova_catalog_info = CONF.nova_catalog_info info = nova_catalog_info service_type, service_name, endpoint_type = info.split(':') # extract the region if set in configuration if CONF.os_region_name: attr = 'region' filter_value = CONF.os_region_name else: attr = None filter_value = None url = sc.url_for(attr=attr, filter_value=filter_value, service_type=service_type, service_name=service_name, endpoint_type=endpoint_type) LOG.debug(_('Novaclient connection created using URL: %s') % url) extensions = [assisted_volume_snapshots] c = nova_client.Client(context.user_id, context.auth_token, context.project_id, auth_url=url, insecure=CONF.nova_api_insecure, cacert=CONF.nova_ca_certificates_file, extensions=extensions) # noauth extracts user_id:project_id from auth_token c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id, context.project_id) c.client.management_url = url return c def _untranslate_server_summary_view(server): """Maps keys for servers summary view.""" d = {} d['id'] = server.id d['status'] = server.status d['flavor'] = server.flavor['id'] d['name'] = server.name d['image'] = server.image['id'] d['created'] = server.created d['addresses'] = server.addresses d['networks'] = server.networks d['tenant_id'] = server.tenant_id d['user_id'] = server.user_id return d def translate_server_exception(method): """Transforms the exception for the instance but keeps its traceback intact. """ def wrapper(self, ctx, instance_id, *args, **kwargs): try: res = method(self, ctx, instance_id, *args, **kwargs) except nova_exception.ClientException: exc_type, exc_value, exc_trace = sys.exc_info() if isinstance(exc_value, nova_exception.NotFound): exc_value = exception.InstanceNotFound(instance_id=instance_id) elif isinstance(exc_value, nova_exception.BadRequest): exc_value = exception.InvalidInput(reason=exc_value.message) raise exc_value, None, exc_trace return res return wrapper class API(base.Base): """API for interacting with novaclient.""" def server_create(self, context, name, image, flavor, key_name, user_data, security_groups, block_device_mapping=None, block_device_mapping_v2=None, nics=None, availability_zone=None, instance_count=1, admin_pass=None): return _untranslate_server_summary_view( novaclient(context).servers.create( name, image, flavor, userdata=user_data, security_groups=security_groups, key_name=key_name, block_device_mapping=block_device_mapping, block_device_mapping_v2=block_device_mapping_v2, nics=nics, availability_zone=availability_zone, min_count=instance_count, admin_pass=admin_pass) ) def server_delete(self, context, instance): novaclient(context).servers.delete(instance) @translate_server_exception def server_get(self, context, instance_id): return _untranslate_server_summary_view( novaclient(context).servers.get(instance_id) ) def server_list(self, context, search_opts=None, all_tenants=False): if search_opts is None: search_opts = {} if all_tenants: search_opts['all_tenants'] = True else: search_opts['project_id'] = context.project_id servers = [_untranslate_server_summary_view(s) for s in novaclient(context).servers.list(True, search_opts)] return servers @translate_server_exception def server_pause(self, context, instance_id): novaclient(context).servers.pause(instance_id) @translate_server_exception def server_unpause(self, context, instance_id): novaclient(context).servers.unpause(instance_id) @translate_server_exception def server_suspend(self, context, instance_id): novaclient(context).servers.suspend(instance_id) @translate_server_exception def server_resume(self, context, instance_id): novaclient(context).servers.resume(instance_id) @translate_server_exception def server_reboot(self, context, instance_id, soft_reboot=False): hardness = nova_servers.REBOOT_HARD if soft_reboot: hardness = nova_servers.REBOOT_SOFT novaclient(context).servers.reboot(instance_id, hardness) @translate_server_exception def server_rebuild(self, context, instance_id, image_id, password=None): return _untranslate_server_summary_view( novaclient(context).servers.rebuild(instance_id, image_id, password) ) @translate_server_exception def instance_volume_attach(self, context, instance_id, volume_id, device): return novaclient(context).volumes.create_server_volume(instance_id, volume_id, device) @translate_server_exception def instance_volume_detach(self, context, instance_id, att_id): return novaclient(context).volumes.delete_server_volume(instance_id, att_id) @translate_server_exception def instance_volumes_list(self, context, instance_id): from manila.volume.cinder import cinderclient volumes = novaclient(context).volumes.get_server_volumes(instance_id) for volume in volumes: volume_data = cinderclient(context).volumes.get(volume.id) volume.name = volume_data.display_name return volumes @translate_server_exception def server_update(self, context, instance_id, name): return _untranslate_server_summary_view( novaclient(context).servers.update(instance_id, name=name) ) def update_server_volume(self, context, instance_id, attachment_id, new_volume_id): novaclient(context).volumes.update_server_volume(instance_id, attachment_id, new_volume_id) def keypair_create(self, context, name): return novaclient(context).keypairs.create(name) def keypair_import(self, context, name, public_key): return novaclient(context).keypairs.create(name, public_key) def keypair_delete(self, context, keypair_id): novaclient(context).keypairs.delete(keypair_id) def keypair_list(self, context): return novaclient(context).keypairs.list() def image_list(self, context): return novaclient(context).images.list() manila-2013.2.dev175.gbf1a399/manila/__init__.py0000664000175000017500000000240212301410454021060 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`manila` -- Cloud IaaS Platform =================================== .. automodule:: manila :platform: Unix :synopsis: Infrastructure-as-a-Service Cloud platform. .. moduleauthor:: Jesse Andrews .. moduleauthor:: Devin Carlen .. moduleauthor:: Vishvananda Ishaya .. moduleauthor:: Joshua McKenty .. moduleauthor:: Manish Singh .. moduleauthor:: Andy Smith """ manila-2013.2.dev175.gbf1a399/manila/db/0000775000175000017500000000000012301410516017335 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/0000775000175000017500000000000012301410516021477 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/utils.py0000664000175000017500000004425112301410454023220 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me). # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from migrate.changeset import UniqueConstraint, ForeignKeyConstraint from sqlalchemy import Boolean from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy.engine import reflection from sqlalchemy.exc import OperationalError from sqlalchemy.exc import ProgrammingError from sqlalchemy.ext.compiler import compiles from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import schema from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import UpdateBase from sqlalchemy.sql import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.types import NullType from manila.db.sqlalchemy import api as db from manila import exception from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging from manila.openstack.common import timeutils LOG = logging.getLogger(__name__) def get_table(engine, name): """Returns an sqlalchemy table dynamically from db. Needed because the models don't work for us in migrations as models will be far out of sync with the current data. """ metadata = MetaData() metadata.bind = engine return Table(name, metadata, autoload=True) class InsertFromSelect(UpdateBase): def __init__(self, table, select): self.table = table self.select = select @compiles(InsertFromSelect) def visit_insert_from_select(element, compiler, **kw): return "INSERT INTO %s %s" % ( compiler.process(element.table, asfrom=True), compiler.process(element.select)) def _get_not_supported_column(col_name_col_instance, column_name): try: column = col_name_col_instance[column_name] except Exception: msg = _("Please specify column %s in col_name_col_instance " "param. It is required because column has unsupported " "type by sqlite).") raise exception.ManilaException(msg % column_name) if not isinstance(column, Column): msg = _("col_name_col_instance param has wrong type of " "column instance for column %s It should be instance " "of sqlalchemy.Column.") raise exception.ManilaException(msg % column_name) return column def _get_unique_constraints_in_sqlite(migrate_engine, table_name): regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" meta = MetaData(bind=migrate_engine) table = Table(table_name, meta, autoload=True) sql_data = migrate_engine.execute( """ SELECT sql FROM sqlite_master WHERE type = 'table' AND name = :table_name; """, table_name=table_name ).fetchone()[0] uniques = set([ schema.UniqueConstraint( *[getattr(table.c, c.strip(' "')) for c in cols.split(",")], name=name ) for name, cols in re.findall(regexp, sql_data) ]) return uniques def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, **col_name_col_instance): insp = reflection.Inspector.from_engine(migrate_engine) meta = MetaData(bind=migrate_engine) table = Table(table_name, meta, autoload=True) columns = [] for column in table.columns: if isinstance(column.type, NullType): new_column = _get_not_supported_column(col_name_col_instance, column.name) columns.append(new_column) else: columns.append(column.copy()) uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name) table.constraints.update(uniques) constraints = [constraint for constraint in table.constraints if not constraint.name == uc_name and not isinstance(constraint, schema.ForeignKeyConstraint)] new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in insp.get_indexes(table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) f_keys = [] for fk in insp.get_foreign_keys(table_name): refcolumns = [fk['referred_table'] + '.' + col for col in fk['referred_columns']] f_keys.append(ForeignKeyConstraint(fk['constrained_columns'], refcolumns, table=new_table, name=fk['name'])) ins = InsertFromSelect(new_table, table.select()) migrate_engine.execute(ins) table.drop() [index.create(migrate_engine) for index in indexes] for fkey in f_keys: fkey.create() new_table.rename(table_name) def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): """ This method drops UC from table and works for mysql, postgresql and sqlite. In mysql and postgresql we are able to use "alter table" constuction. In sqlite is only one way to drop UC: 1) Create new table with same columns, indexes and constraints (except one that we want to drop). 2) Copy data from old table to new. 3) Drop old table. 4) Rename new table to the name of old table. :param migrate_engine: sqlalchemy engine :param table_name: name of table that contains uniq constarint. :param uc_name: name of uniq constraint that will be dropped. :param columns: columns that are in uniq constarint. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. """ if migrate_engine.name == "sqlite": _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, **col_name_col_instance) else: meta = MetaData() meta.bind = migrate_engine t = Table(table_name, meta, autoload=True) uc = UniqueConstraint(*columns, table=t, name=uc_name) uc.drop() def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """ This method is used to drop all old rows that have the same values for columns in uc_columns. """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(list(columns_for_group_by)) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info(_("Deleted duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement) def _get_default_deleted_value(table): if isinstance(table.c.id.type, Integer): return 0 if isinstance(table.c.id.type, String): return "" raise exception.ManilaException(_("Unsupported id columns type")) def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): table = get_table(migrate_engine, table_name) insp = reflection.Inspector.from_engine(migrate_engine) real_indexes = insp.get_indexes(table_name) existing_index_names = dict([(index['name'], index['column_names']) for index in real_indexes]) # NOTE(boris-42): Restore indexes on `deleted` column for index in indexes: if 'deleted' not in index['column_names']: continue name = index['name'] if name in existing_index_names: column_names = [table.c[c] for c in existing_index_names[name]] old_index = Index(name, *column_names, unique=index["unique"]) old_index.drop(migrate_engine) column_names = [table.c[c] for c in index['column_names']] new_index = Index(index["name"], *column_names, unique=index["unique"]) new_index.create(migrate_engine) def change_deleted_column_type_to_boolean(migrate_engine, table_name, **col_name_col_instance): if migrate_engine.name == "sqlite": return _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, **col_name_col_instance) insp = reflection.Inspector.from_engine(migrate_engine) indexes = insp.get_indexes(table_name) table = get_table(migrate_engine, table_name) old_deleted = Column('old_deleted', Boolean, default=False) old_deleted.create(table, populate_default=False) table.update().\ where(table.c.deleted == table.c.id).\ values(old_deleted=True).\ execute() table.c.deleted.drop() table.c.old_deleted.alter(name="deleted") _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, **col_name_col_instance): insp = reflection.Inspector.from_engine(migrate_engine) table = get_table(migrate_engine, table_name) columns = [] for column in table.columns: column_copy = None if column.name != "deleted": if isinstance(column.type, NullType): column_copy = _get_not_supported_column(col_name_col_instance, column.name) else: column_copy = column.copy() else: column_copy = Column('deleted', Boolean, default=0) columns.append(column_copy) constraints = [constraint.copy() for constraint in table.constraints] meta = MetaData(bind=migrate_engine) new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in insp.get_indexes(table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) c_select = [] for c in table.c: if c.name != "deleted": c_select.append(c) else: c_select.append(table.c.deleted == table.c.id) ins = InsertFromSelect(new_table, select(c_select)) migrate_engine.execute(ins) table.drop() [index.create(migrate_engine) for index in indexes] new_table.rename(table_name) new_table.update().\ where(new_table.c.deleted == new_table.c.id).\ values(deleted=True).\ execute() def change_deleted_column_type_to_id_type(migrate_engine, table_name, **col_name_col_instance): if migrate_engine.name == "sqlite": return _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, **col_name_col_instance) insp = reflection.Inspector.from_engine(migrate_engine) indexes = insp.get_indexes(table_name) table = get_table(migrate_engine, table_name) new_deleted = Column('new_deleted', table.c.id.type, default=_get_default_deleted_value(table)) new_deleted.create(table, populate_default=True) table.update().\ where(table.c.deleted == True).\ values(new_deleted=table.c.id).\ execute() table.c.deleted.drop() table.c.new_deleted.alter(name="deleted") _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, **col_name_col_instance): # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check # constraints in sqlite DB and our `deleted` column has # 2 check constraints. So there is only one way to remove # these constraints: # 1) Create new table with the same columns, constraints # and indexes. (except deleted column). # 2) Copy all data from old to new table. # 3) Drop old table. # 4) Rename new table to old table name. insp = reflection.Inspector.from_engine(migrate_engine) meta = MetaData(bind=migrate_engine) table = Table(table_name, meta, autoload=True) default_deleted_value = _get_default_deleted_value(table) columns = [] for column in table.columns: column_copy = None if column.name != "deleted": if isinstance(column.type, NullType): column_copy = _get_not_supported_column(col_name_col_instance, column.name) else: column_copy = column.copy() else: column_copy = Column('deleted', table.c.id.type, default=default_deleted_value) columns.append(column_copy) def is_deleted_column_constraint(constraint): # NOTE(boris-42): There is no other way to check is CheckConstraint # associated with deleted column. if not isinstance(constraint, CheckConstraint): return False sqltext = str(constraint.sqltext) return (sqltext.endswith("deleted in (0, 1)") or sqltext.endswith("deleted IN (:deleted_1, :deleted_2)")) constraints = [] for constraint in table.constraints: if not is_deleted_column_constraint(constraint): constraints.append(constraint.copy()) new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in insp.get_indexes(table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) ins = InsertFromSelect(new_table, table.select()) migrate_engine.execute(ins) table.drop() [index.create(migrate_engine) for index in indexes] new_table.rename(table_name) new_table.update().\ where(new_table.c.deleted == True).\ values(deleted=new_table.c.id).\ execute() # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. new_table.update().\ where(new_table.c.deleted == False).\ values(deleted=default_deleted_value).\ execute() def _add_index(migrate_engine, table, index_name, idx_columns): index = Index( index_name, *[getattr(table.c, col) for col in idx_columns] ) index.create() def _drop_index(migrate_engine, table, index_name, idx_columns): index = Index( index_name, *[getattr(table.c, col) for col in idx_columns] ) index.drop() def _change_index_columns(migrate_engine, table, index_name, new_columns, old_columns): Index( index_name, *[getattr(table.c, col) for col in old_columns] ).drop(migrate_engine) Index( index_name, *[getattr(table.c, col) for col in new_columns] ).create() def modify_indexes(migrate_engine, data, upgrade=True): if migrate_engine.name == 'sqlite': return meta = MetaData() meta.bind = migrate_engine for table_name, indexes in data.iteritems(): table = Table(table_name, meta, autoload=True) for index_name, old_columns, new_columns in indexes: if not upgrade: new_columns, old_columns = old_columns, new_columns if migrate_engine.name == 'postgresql': if upgrade: _add_index(migrate_engine, table, index_name, new_columns) else: _drop_index(migrate_engine, table, index_name, old_columns) elif migrate_engine.name == 'mysql': _change_index_columns(migrate_engine, table, index_name, new_columns, old_columns) else: raise ValueError('Unsupported DB %s' % migrate_engine.name) manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/session.py0000664000175000017500000001205512301410454023540 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Session Handling for SQLAlchemy backend.""" import time from oslo.config import cfg from sqlalchemy.exc import DisconnectionError, OperationalError import sqlalchemy.interfaces import sqlalchemy.orm from sqlalchemy.pool import NullPool, StaticPool import manila.exception from manila.openstack.common import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) _ENGINE = None _MAKER = None def get_session(autocommit=True, expire_on_commit=False): """Return a SQLAlchemy session.""" global _MAKER if _MAKER is None: engine = get_engine() _MAKER = get_maker(engine, autocommit, expire_on_commit) session = _MAKER() session.query = manila.exception.wrap_db_error(session.query) session.flush = manila.exception.wrap_db_error(session.flush) return session def synchronous_switch_listener(dbapi_conn, connection_rec): """Switch sqlite connections to non-synchronous mode""" dbapi_conn.execute("PRAGMA synchronous = OFF") def ping_listener(dbapi_conn, connection_rec, connection_proxy): """ Ensures that MySQL connections checked out of the pool are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ try: dbapi_conn.cursor().execute('select 1') except dbapi_conn.OperationalError, ex: if ex.args[0] in (2006, 2013, 2014, 2045, 2055): LOG.warn(_('Got mysql server has gone away: %s'), ex) raise DisconnectionError("Database server went away") else: raise def is_db_connection_error(args): """Return True if error in connecting to db.""" # NOTE(adam_g): This is currently MySQL specific and needs to be extended # to support Postgres and others. conn_err_codes = ('2002', '2003', '2006') for err_code in conn_err_codes: if args.find(err_code) != -1: return True return False def get_engine(): """Return a SQLAlchemy engine.""" global _ENGINE if _ENGINE is None: connection_dict = sqlalchemy.engine.url.make_url(CONF.sql_connection) engine_args = { "pool_recycle": CONF.sql_idle_timeout, "echo": False, 'convert_unicode': True, } # Map our SQL debug level to SQLAlchemy's options if CONF.sql_connection_debug >= 100: engine_args['echo'] = 'debug' elif CONF.sql_connection_debug >= 50: engine_args['echo'] = True if "sqlite" in connection_dict.drivername: engine_args["poolclass"] = NullPool if CONF.sql_connection == "sqlite://": engine_args["poolclass"] = StaticPool engine_args["connect_args"] = {'check_same_thread': False} _ENGINE = sqlalchemy.create_engine(CONF.sql_connection, **engine_args) if 'mysql' in connection_dict.drivername: sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener) elif "sqlite" in connection_dict.drivername: if not CONF.sqlite_synchronous: sqlalchemy.event.listen(_ENGINE, 'connect', synchronous_switch_listener) try: _ENGINE.connect() except OperationalError, e: if not is_db_connection_error(e.args[0]): raise remaining = CONF.sql_max_retries if remaining == -1: remaining = 'infinite' while True: msg = _('SQL connection failed. %s attempts left.') LOG.warn(msg % remaining) if remaining != 'infinite': remaining -= 1 time.sleep(CONF.sql_retry_interval) try: _ENGINE.connect() break except OperationalError, e: if ((remaining != 'infinite' and remaining == 0) or not is_db_connection_error(e.args[0])): raise return _ENGINE def get_maker(engine, autocommit=True, expire_on_commit=False): """Return a SQLAlchemy sessionmaker using the given engine.""" return sqlalchemy.orm.sessionmaker(bind=engine, autocommit=autocommit, expire_on_commit=expire_on_commit) manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migration.py0000664000175000017500000000756412301410454024057 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import distutils.version as dist_version import os from oslo.config import cfg from manila.db import migration from manila.db.sqlalchemy.session import get_engine from manila import exception from manila.openstack.common import log as logging import migrate from migrate.versioning import util as migrate_util import sqlalchemy LOG = logging.getLogger(__name__) @migrate_util.decorator def patched_with_engine(f, *a, **kw): url = a[0] engine = migrate_util.construct_engine(url, **kw) try: kw['engine'] = engine return f(*a, **kw) finally: if isinstance(engine, migrate_util.Engine) and engine is not url: migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) engine.dispose() # TODO(jkoelker) When migrate 0.7.3 is released and manila depends # on that version or higher, this can be removed MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') if (not hasattr(migrate, '__version__') or dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): migrate_util.with_engine = patched_with_engine # NOTE(jkoelker) Delay importing migrate until we are patched from migrate import exceptions as versioning_exceptions from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository CONF = cfg.CONF _REPOSITORY = None def db_sync(version=None): if version is not None: try: version = int(version) except ValueError: raise exception.Error(_("version should be an integer")) current_version = db_version() repository = _find_migrate_repo() if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: return versioning_api.downgrade(get_engine(), repository, version) def db_version(): repository = _find_migrate_repo() try: return versioning_api.db_version(get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError: # If we aren't version controlled we may already have the database # in the state from before we started version control, check for that # and set up version_control appropriately meta = sqlalchemy.MetaData() engine = get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0: db_version_control(migration.INIT_VERSION) return versioning_api.db_version(get_engine(), repository) else: raise exception.Error(_("Upgrade DB using Essex release first.")) def db_version_control(version=None): repository = _find_migrate_repo() versioning_api.version_control(get_engine(), repository, version) return version def _find_migrate_repo(): """Get the path for the migrate repository.""" global _REPOSITORY path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') assert os.path.exists(path) if _REPOSITORY is None: _REPOSITORY = Repository(path) return _REPOSITORY manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/models.py0000664000175000017500000003577212301410454023353 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for Manila data. """ from sqlalchemy import Column, Index, Integer, String, Text, schema from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import ForeignKey, DateTime, Boolean, Enum from sqlalchemy.orm import relationship, backref, object_mapper from manila.common import constants from manila.db.sqlalchemy.session import get_session from manila import exception from manila.openstack.common import timeutils from oslo.config import cfg CONF = cfg.CONF BASE = declarative_base() class ManilaBase(object): """Base class for Manila Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} __table_initialized__ = False created_at = Column(DateTime, default=lambda: timeutils.utcnow()) updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) deleted_at = Column(DateTime) deleted = Column(Integer, default=0) metadata = None def save(self, session=None): """Save this object.""" if not session: session = get_session() session.add(self) try: session.flush() except IntegrityError, e: if str(e).endswith('is not unique'): raise exception.Duplicate(str(e)) else: raise def delete(self, session=None): """Delete this object.""" self.deleted = self.id self.deleted_at = timeutils.utcnow() self.save(session=session) def __setitem__(self, key, value): setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default=None): return getattr(self, key, default) def __iter__(self): self._i = iter(object_mapper(self).columns) return self def next(self): n = self._i.next().name return n, getattr(self, n) def update(self, values): """Make the model object behave like a dict.""" for k, v in values.iteritems(): setattr(self, k, v) def iteritems(self): """Make the model object behave like a dict. Includes attributes from joins.""" local = dict(self) joined = dict([(k, v) for k, v in self.__dict__.iteritems() if not k[0] == '_']) local.update(joined) return local.iteritems() class Service(BASE, ManilaBase): """Represents a running service on a host.""" __tablename__ = 'services' id = Column(Integer, primary_key=True) host = Column(String(255)) # , ForeignKey('hosts.id')) binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) availability_zone = Column(String(255), default='manila') class ManilaNode(BASE, ManilaBase): """Represents a running manila service on a host.""" __tablename__ = 'manila_nodes' id = Column(Integer, primary_key=True) service_id = Column(Integer, ForeignKey('services.id'), nullable=True) class Quota(BASE, ManilaBase): """Represents a single quota override for a project. If there is no row for a given project id and resource, then the default for the quota class is used. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quotas' id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) resource = Column(String(255)) hard_limit = Column(Integer, nullable=True) class ProjectUserQuota(BASE, ManilaBase): """Represents a single quota override for a user with in a project.""" __tablename__ = 'project_user_quotas' id = Column(Integer, primary_key=True, nullable=False) project_id = Column(String(255), nullable=False) user_id = Column(String(255), nullable=False) resource = Column(String(255), nullable=False) hard_limit = Column(Integer) class QuotaClass(BASE, ManilaBase): """Represents a single quota override for a quota class. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quota_classes' id = Column(Integer, primary_key=True) class_name = Column(String(255), index=True) resource = Column(String(255)) hard_limit = Column(Integer, nullable=True) class QuotaUsage(BASE, ManilaBase): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) user_id = Column(String(255)) resource = Column(String(255)) in_use = Column(Integer) reserved = Column(Integer) @property def total(self): return self.in_use + self.reserved until_refresh = Column(Integer, nullable=True) class Reservation(BASE, ManilaBase): """Represents a resource reservation for quotas.""" __tablename__ = 'reservations' id = Column(Integer, primary_key=True) uuid = Column(String(36), nullable=False) usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) project_id = Column(String(255), index=True) user_id = Column(String(255)) resource = Column(String(255)) delta = Column(Integer) expire = Column(DateTime, nullable=False) # usage = relationship( # "QuotaUsage", # foreign_keys=usage_id, # primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' # 'QuotaUsage.deleted == 0)') class Migration(BASE, ManilaBase): """Represents a running host-to-host migration.""" __tablename__ = 'migrations' id = Column(Integer, primary_key=True, nullable=False) # NOTE(tr3buchet): the ____compute variables are instance['host'] source_compute = Column(String(255)) dest_compute = Column(String(255)) # NOTE(tr3buchet): dest_host, btw, is an ip address dest_host = Column(String(255)) old_instance_type_id = Column(Integer()) new_instance_type_id = Column(Integer()) instance_uuid = Column(String(255), ForeignKey('instances.uuid'), nullable=True) #TODO(_cerberus_): enum status = Column(String(255)) class Share(BASE, ManilaBase): """Represents an NFS and CIFS shares.""" __tablename__ = 'shares' @property def name(self): return CONF.share_name_template % self.id id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') user_id = Column(String(255)) project_id = Column(String(255)) host = Column(String(255)) size = Column(Integer) availability_zone = Column(String(255)) status = Column(String(255)) scheduled_at = Column(DateTime) launched_at = Column(DateTime) terminated_at = Column(DateTime) display_name = Column(String(255)) display_description = Column(String(255)) snapshot_id = Column(String(36)) share_proto = Column(String(255)) export_location = Column(String(255)) share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=True) class ShareMetadata(BASE, ManilaBase): """Represents a metadata key/value pair for a share.""" __tablename__ = 'share_metadata' id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) share_id = Column(String(36), ForeignKey('shares.id'), nullable=False) share = relationship(Share, backref="share_metadata", foreign_keys=share_id, primaryjoin='and_(' 'ShareMetadata.share_id == Share.id,' 'ShareMetadata.deleted == 0)') class ShareAccessMapping(BASE, ManilaBase): """Represents access to NFS.""" STATE_NEW = 'new' STATE_ACTIVE = 'active' STATE_DELETING = 'deleting' STATE_DELETED = 'deleted' STATE_ERROR = 'error' __tablename__ = 'share_access_map' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_id = Column(String(36), ForeignKey('shares.id')) access_type = Column(String(255)) access_to = Column(String(255)) state = Column(Enum(STATE_NEW, STATE_ACTIVE, STATE_DELETING, STATE_DELETED, STATE_ERROR), default=STATE_NEW) class ShareSnapshot(BASE, ManilaBase): """Represents a snapshot of a share.""" __tablename__ = 'share_snapshots' @property def name(self): return CONF.share_snapshot_name_template % self.id @property def share_name(self): return CONF.share_name_template % self.share_id id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') user_id = Column(String(255)) project_id = Column(String(255)) share_id = Column(String(36)) size = Column(Integer) status = Column(String(255)) progress = Column(String(255)) display_name = Column(String(255)) display_description = Column(String(255)) share_size = Column(Integer) share_proto = Column(String(255)) export_location = Column(String(255)) share = relationship(Share, backref="snapshots", foreign_keys=share_id, primaryjoin='and_(' 'ShareSnapshot.share_id == Share.id,' 'ShareSnapshot.deleted == "False")') class SecurityService(BASE, ManilaBase): """Security service information for manila shares""" __tablename__ = 'security_services' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') project_id = Column(String(36), nullable=False) type = Column(String(32), nullable=False) dns_ip = Column(String(64), nullable=True) server = Column(String(255), nullable=True) domain = Column(String(255), nullable=True) sid = Column(String(255), nullable=True) password = Column(String(255), nullable=True) name = Column(String(255), nullable=True) description = Column(String(255), nullable=True) status = Column(Enum(constants.STATUS_NEW, constants.STATUS_ACTIVE, constants.STATUS_ERROR), default=constants.STATUS_NEW) class ShareNetwork(BASE, ManilaBase): "Represents network data used by share." __tablename__ = 'share_networks' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') project_id = Column(String(36), nullable=False) neutron_net_id = Column(String(36), nullable=True) neutron_subnet_id = Column(String(36), nullable=True) network_type = Column(String(32), nullable=True) segmentation_id = Column(Integer, nullable=True) cidr = Column(String(64), nullable=True) ip_version = Column(Integer, nullable=True) name = Column(String(255), nullable=True) description = Column(String(255), nullable=True) status = Column(Enum(constants.STATUS_INACTIVE, constants.STATUS_ACTIVE, constants.STATUS_ERROR), default=constants.STATUS_INACTIVE) security_services = relationship("SecurityService", secondary="share_network_security_service_association", backref="share_networks", primaryjoin='and_(' 'ShareNetwork.id == ' 'ShareNetworkSecurityServiceAssociation.share_network_id,' 'ShareNetworkSecurityServiceAssociation.deleted == 0,' 'ShareNetwork.deleted == "False")', secondaryjoin='and_(' 'SecurityService.id == ' 'ShareNetworkSecurityServiceAssociation.security_service_id,' 'SecurityService.deleted == "False")') network_allocations = relationship("NetworkAllocation", primaryjoin='and_(' 'ShareNetwork.id == NetworkAllocation.share_network_id,' 'NetworkAllocation.deleted == "False")') shares = relationship("Share", backref='share_network', primaryjoin='and_(' 'ShareNetwork.id == Share.share_network_id,' 'Share.deleted == "False")') class ShareNetworkSecurityServiceAssociation(BASE, ManilaBase): """" Association table between compute_zones and compute_nodes tables. """ __tablename__ = 'share_network_security_service_association' id = Column(Integer, primary_key=True) share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=False) security_service_id = Column(String(36), ForeignKey('security_services.id'), nullable=False) class NetworkAllocation(BASE, ManilaBase): "Represents network allocation data." __tablename__ = 'network_allocations' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') ip_address = Column(String(64), nullable=True) mac_address = Column(String(32), nullable=True) share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=False) status = Column(Enum(constants.STATUS_NEW, constants.STATUS_ACTIVE, constants.STATUS_ERROR), default=constants.STATUS_NEW) def register_models(): """Register Models and create metadata. Called from manila.db.sqlalchemy.__init__ as part of loading the driver, it will never need to be called explicitly elsewhere unless the connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine models = (Migration, Service, Share, ShareAccessMapping, ShareSnapshot ) engine = create_engine(CONF.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/__init__.py0000664000175000017500000000141012301410454023605 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migrate_repo/0000775000175000017500000000000012301410516024154 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migrate_repo/README0000664000175000017500000000015312301410454025034 0ustar chuckchuck00000000000000This is a database migration repository. More information at http://code.google.com/p/sqlalchemy-migrate/ manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migrate_repo/__init__.py0000664000175000017500000000000012301410454026254 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migrate_repo/versions/0000775000175000017500000000000012301410516026024 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migrate_repo/versions/001_manila_init.py0000664000175000017500000003515612301410454031255 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from sqlalchemy import Boolean, Column, DateTime, ForeignKey from sqlalchemy import Integer, MetaData, String, Table, UniqueConstraint from manila.openstack.common import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine migrations = Table( 'migrations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('source_compute', String(length=255)), Column('dest_compute', String(length=255)), Column('dest_host', String(length=255)), Column('status', String(length=255)), Column('instance_uuid', String(length=255)), Column('old_instance_type_id', Integer), Column('new_instance_type_id', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) services = Table( 'services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255)), Column('binary', String(length=255)), Column('topic', String(length=255)), Column('report_count', Integer, nullable=False), Column('disabled', Boolean), Column('availability_zone', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) quotas = Table( 'quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('project_id', String(length=255)), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) quota_classes = Table( 'quota_classes', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('class_name', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False), index=True), Column('resource', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False)), Column('hard_limit', Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) quota_usages = Table( 'quota_usages', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('user_id', String(length=255)), Column('project_id', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False), index=True), Column('resource', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False)), Column('in_use', Integer(), nullable=False), Column('reserved', Integer(), nullable=False), Column('until_refresh', Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) reservations = Table( 'reservations', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('user_id', String(length=255)), Column('uuid', String(length=36, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False), nullable=False), Column('usage_id', Integer(), ForeignKey('quota_usages.id'), nullable=False), Column('project_id', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False), index=True), Column('resource', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False)), Column('delta', Integer(), nullable=False), Column('expire', DateTime(timezone=False)), mysql_engine='InnoDB', mysql_charset='utf8', ) project_user_quotas = Table( 'project_user_quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('user_id', String(length=255), nullable=False), Column('project_id', String(length=255), nullable=False), Column('resource', String(length=25), nullable=False), Column('hard_limit', Integer, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) shares = Table( 'shares', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('size', Integer), Column('availability_zone', String(length=255)), Column('status', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('snapshot_id', String(length=36)), Column('share_network_id', String(length=36), ForeignKey('share_networks.id'), nullable=True), Column('share_proto', String(255)), Column('export_location', String(255)), mysql_engine='InnoDB', mysql_charset='utf8' ) access_map = Table( 'share_access_map', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_id', String(36), ForeignKey('shares.id'), nullable=False), Column('access_type', String(255)), Column('access_to', String(255)), Column('state', String(255)), mysql_engine='InnoDB', mysql_charset='utf8' ) share_snapshots = Table( 'share_snapshots', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('share_id', String(36), ForeignKey('shares.id'), nullable=False), Column('size', Integer), Column('status', String(length=255)), Column('progress', String(length=255)), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('share_size', Integer), Column('share_proto', String(length=255)), Column('export_location', String(255)), mysql_engine='InnoDB', mysql_charset='utf8' ) share_metadata = Table( 'share_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('share_id', String(length=36), ForeignKey('shares.id'), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=1023), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) security_services = Table( 'security_services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('project_id', String(length=36), nullable=False), Column('type', String(length=32), nullable=False), Column('dns_ip', String(length=64), nullable=True), Column('server', String(length=255), nullable=True), Column('domain', String(length=255), nullable=True), Column('sid', String(length=255), nullable=True), Column('password', String(length=255), nullable=True), Column('name', String(length=255), nullable=True), Column('description', String(length=255), nullable=True), Column('status', String(length=16)), mysql_engine='InnoDB', mysql_charset='utf8', ) share_networks = Table( 'share_networks', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('project_id', String(length=36), nullable=False), Column('neutron_net_id', String(length=36), nullable=True), Column('neutron_subnet_id', String(length=36), nullable=True), Column('network_type', String(length=32), nullable=True), Column('segmentation_id', Integer, nullable=True), Column('cidr', String(length=64), nullable=True), Column('ip_version', Integer, nullable=True), Column('name', String(length=255), nullable=True), Column('description', String(length=255), nullable=True), Column('status', String(length=32)), UniqueConstraint('neutron_net_id', 'neutron_subnet_id', 'project_id', 'deleted', name='net_subnet_uc'), mysql_engine='InnoDB', mysql_charset='utf8', ) network_allocations = Table( 'network_allocations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('ip_address', String(length=64), nullable=True), Column('mac_address', String(length=32), nullable=True), Column('share_network_id', String(length=36), ForeignKey('share_networks.id'), nullable=False), Column('status', String(length=32)), mysql_engine='InnoDB', mysql_charset='utf8', ) ss_nw_association = Table( 'share_network_security_service_association', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('share_network_id', String(length=36), ForeignKey('share_networks.id'), nullable=False), Column('security_service_id', String(length=36), ForeignKey('security_services.id'), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) # create all tables # Take care on create order for those with FK dependencies tables = [migrations, quotas, services, quota_classes, quota_usages, reservations, project_user_quotas, security_services, share_networks, network_allocations, ss_nw_association, shares, access_map, share_snapshots, share_metadata] for table in tables: try: table.create() except Exception: LOG.info(repr(table)) LOG.exception(_('Exception while creating table.')) raise if migrate_engine.name == "mysql": tables = ["migrate_version", "migrations", "quotas", "services", "quota_classes", "quota_usages", "reservations", "project_user_quotas", "share_access_map", "share_snapshots", "share_metadata", "security_services", "share_networks", "network_allocations", "shares", "share_network_security_service_association"] sql = "SET foreign_key_checks = 0;" for table in tables: sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table sql += "SET foreign_key_checks = 1;" sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \ % migrate_engine.url.database sql += "ALTER TABLE %s Engine=InnoDB;" % table migrate_engine.execute(sql) def downgrade(migrate_engine): raise NotImplementedError('Downgrade from initial Manila install is not' ' supported.') manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migrate_repo/versions/__init__.py0000664000175000017500000000000012301410454030124 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migrate_repo/manage.py0000664000175000017500000000020312301410454025752 0ustar chuckchuck00000000000000#!/usr/bin/env python from migrate.versioning.shell import main if __name__ == '__main__': main(debug='False', repository='.') manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/migrate_repo/migrate.cfg0000664000175000017500000000173512301410454026274 0ustar chuckchuck00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=manila # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] manila-2013.2.dev175.gbf1a399/manila/db/sqlalchemy/api.py0000664000175000017500000016033612301410454022634 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import datetime import functools import time import uuid import warnings from oslo.config import cfg from sqlalchemy.exc import IntegrityError from sqlalchemy import or_ from sqlalchemy.orm import joinedload from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql import func from manila.common import constants from manila.common import sqlalchemyutils from manila import db from manila.db.sqlalchemy import models from manila.db.sqlalchemy.session import get_session from manila import exception from manila.openstack.common import log as logging from manila.openstack.common import timeutils from manila.openstack.common import uuidutils CONF = cfg.CONF LOG = logging.getLogger(__name__) _DEFAULT_QUOTA_NAME = 'default' PER_PROJECT_QUOTAS = [] def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_share_exists(f): """Decorator to require the specified share to exist. Requires the wrapped function to use context and share_id as their first two arguments. """ def wrapper(context, share_id, *args, **kwargs): db.share_get(context, share_id) return f(context, share_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def model_query(context, model, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(model, *args) default_deleted_value = model.__mapper__.c.deleted.default.arg if read_deleted == 'no': query = query.filter(model.deleted == default_deleted_value) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter(model.deleted != default_deleted_value) else: raise Exception(_("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): query = query.filter_by(project_id=context.project_id) return query def exact_filter(query, model, filters, legal_keys): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query def _sync_shares(context, project_id, user_id, session): (shares, gigs) = share_data_get_for_project(context, project_id, user_id, session=session) return {'shares': shares} def _sync_snapshots(context, project_id, user_id, session): (snapshots, gigs) = snapshot_data_get_for_project(context, project_id, user_id, session=session) return {'snapshots': snapshots} def _sync_gigabytes(context, project_id, user_id, session): (_junk, share_gigs) = share_data_get_for_project(context, project_id, user_id, session=session) if CONF.no_snapshot_gb_quota: return {'gigabytes': share_gigs} (_junk, snap_gigs) = snapshot_data_get_for_project(context, project_id, user_id, session=session) return {'gigabytes': share_gigs + snap_gigs} QUOTA_SYNC_FUNCTIONS = { '_sync_shares': _sync_shares, '_sync_snapshots': _sync_snapshots, '_sync_gigabytes': _sync_gigabytes, } ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.delete(session=session) @require_admin_context def service_get(context, service_id, session=None): result = model_query( context, models.Service, session=session).\ filter_by(id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): return model_query( context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): result = model_query( context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() if not result: raise exception.ServiceNotFound(service_id=None) return result @require_admin_context def service_get_all_by_host(context, host): return model_query( context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return model_query(context, models.Service, func.coalesce(sort_value, 0), session=session, read_deleted="no").\ filter_by(topic=topic).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_share_sorted(context): session = get_session() with session.begin(): topic = CONF.share_topic label = 'share_gigabytes' subq = model_query(context, models.Share, models.Share.host, func.sum(models.Share.size).label(label), session=session, read_deleted="no").\ group_by(models.Share.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True service_ref.save() return service_ref @require_admin_context def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) ################### @require_context def quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project_and_user(context, project_id, user_id): authorize_project_context(context, project_id) user_quotas = model_query(context, models.ProjectUserQuota, models.ProjectUserQuota.resource, models.ProjectUserQuota.hard_limit).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ all() result = {'project_id': project_id, 'user_id': user_id} for quota in user_quotas: result[quota.resource] = quota.hard_limit return result @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_get_all(context, project_id): authorize_project_context(context, project_id) result = model_query(context, models.ProjectUserQuota).\ filter_by(project_id=project_id).\ all() return result @require_admin_context def quota_create(context, project_id, resource, limit, user_id=None): per_user = user_id and resource not in PER_PROJECT_QUOTAS if per_user: check = model_query(context, models.ProjectUserQuota).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ filter_by(resource=resource).\ all() else: check = model_query(context, models.Quota).\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ all() if check: raise exception.QuotaExists(project_id=project_id, resource=resource) quota_ref = models.ProjectUserQuota() if per_user else models.Quota() if per_user: quota_ref.user_id = user_id quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit quota_ref.save() return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit, user_id=None): per_user = user_id and resource not in PER_PROJECT_QUOTAS model = models.ProjectUserQuota if per_user else models.Quota query = model_query(context, model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if per_user: query = query.filter_by(user_id=user_id) result = query.update({'hard_limit': limit}) if not result: if per_user: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) ################### @require_context def quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result def quota_class_get_default(context): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).\ all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_class_get_all_by_name(context, class_name): authorize_quota_class_context(context, class_name) rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save() return quota_class_ref @require_admin_context def quota_class_update(context, class_name, resource, limit): result = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ update({'hard_limit': limit}) if not result: raise exception.QuotaClassNotFound(class_name=class_name) ################### @require_context def quota_usage_get(context, project_id, resource, user_id=None): query = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource) if user_id: if resource not in PER_PROJECT_QUOTAS: result = query.filter_by(user_id=user_id).first() else: result = query.filter_by(user_id=None).first() else: result = query.first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result def _quota_usage_get_all(context, project_id, user_id=None): authorize_project_context(context, project_id) query = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id) result = {'project_id': project_id} if user_id: query = query.filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id == None)) result['user_id'] = user_id rows = query.all() for row in rows: if row.resource in result: result[row.resource]['in_use'] += row.in_use result[row.resource]['reserved'] += row.reserved else: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_context def quota_usage_get_all_by_project(context, project_id): return _quota_usage_get_all(context, project_id) @require_context def quota_usage_get_all_by_project_and_user(context, project_id, user_id): return _quota_usage_get_all(context, project_id, user_id=user_id) def _quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh, session=None): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.user_id = user_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh # updated_at is needed for judgement of max_age quota_usage_ref.updated_at = timeutils.utcnow() quota_usage_ref.save(session=session) return quota_usage_ref @require_admin_context def quota_usage_update(context, project_id, user_id, resource, **kwargs): updates = {} for key in ['in_use', 'reserved', 'until_refresh']: if key in kwargs: updates[key] = kwargs[key] result = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id == None)).\ update(updates) if not result: raise exception.QuotaUsageNotFound(project_id=project_id) ################### @require_context def reservation_get(context, uuid, session=None): result = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(uuid=uuid).first() if not result: raise exception.ReservationNotFound(uuid=uuid) return result @require_admin_context def reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire): return _reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire) def _reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire, session=None): reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage['id'] reservation_ref.project_id = project_id reservation_ref.user_id = user_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.save(session=session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_user_quota_usages(context, session, project_id, user_id): # Broken out for testability rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id == None)).\ with_lockmode('update').\ all() return dict((row.resource, row) for row in rows) def _get_project_quota_usages(context, session, project_id): rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ with_lockmode('update').\ all() result = dict() # Get the total count of in_use,reserved for row in rows: if row.resource in result: result[row.resource]['in_use'] += row.in_use result[row.resource]['reserved'] += row.reserved result[row.resource]['total'] += (row.in_use + row.reserved) else: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved, total=row.in_use + row.reserved) return result @require_context def quota_reserve(context, resources, project_quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): elevated = context.elevated() session = get_session() with session.begin(): if project_id is None: project_id = context.project_id if user_id is None: user_id = context.user_id # Get the current usages user_usages = _get_user_quota_usages(context, session, project_id, user_id) project_usages = _get_project_quota_usages(context, session, project_id) # Handle usage refresh work = set(deltas.keys()) while work: resource = work.pop() # Do we need to refresh the usage? refresh = False if ((resource not in PER_PROJECT_QUOTAS) and (resource not in user_usages)): user_usages[resource] = _quota_usage_create(elevated, project_id, user_id, resource, 0, 0, until_refresh or None, session=session) refresh = True elif ((resource in PER_PROJECT_QUOTAS) and (resource not in user_usages)): user_usages[resource] = _quota_usage_create(elevated, project_id, None, resource, 0, 0, until_refresh or None, session=session) refresh = True elif user_usages[resource].in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... refresh = True elif user_usages[resource].until_refresh is not None: user_usages[resource].until_refresh -= 1 if user_usages[resource].until_refresh <= 0: refresh = True elif max_age and (user_usages[resource].updated_at - timeutils.utcnow()).seconds >= max_age: refresh = True # OK, refresh the usage if refresh: # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] updates = sync(elevated, project_id, user_id, session) for res, in_use in updates.items(): # Make sure we have a destination for the usage! if ((res not in PER_PROJECT_QUOTAS) and (res not in user_usages)): user_usages[res] = _quota_usage_create(elevated, project_id, user_id, res, 0, 0, until_refresh or None, session=session) if ((res in PER_PROJECT_QUOTAS) and (res not in user_usages)): user_usages[res] = _quota_usage_create(elevated, project_id, None, res, 0, 0, until_refresh or None, session=session) if user_usages[res].in_use != in_use: LOG.debug(_('quota_usages out of sync, updating. ' 'project_id: %(project_id)s, ' 'user_id: %(user_id)s, ' 'resource: %(res)s, ' 'tracked usage: %(tracked_use)s, ' 'actual usage: %(in_use)s'), {'project_id': project_id, 'user_id': user_id, 'res': res, 'tracked_use': user_usages[res].in_use, 'in_use': in_use}) # Update the usage user_usages[res].in_use = in_use user_usages[res].until_refresh = until_refresh or None # Because more than one resource may be refreshed # by the call to the sync routine, and we don't # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is # a best-effort mechanism. # Check for deltas that would go negative unders = [res for res, delta in deltas.items() if delta < 0 and delta + user_usages[res].in_use < 0] # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. for key, value in user_usages.items(): if key not in project_usages: project_usages[key] = value overs = [res for res, delta in deltas.items() if user_quotas[res] >= 0 and delta >= 0 and (project_quotas[res] < delta + project_usages[res]['total'] or user_quotas[res] < delta + user_usages[res].total)] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for res, delta in deltas.items(): reservation = _reservation_create(elevated, str(uuid.uuid4()), user_usages[res], project_id, user_id, res, delta, expire, session=session) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0: user_usages[res].reserved += delta # Apply updates to the usages table for usage_ref in user_usages.values(): session.add(usage_ref) if unders: LOG.warning(_("Change will make usage less than 0 for the following " "resources: %s"), unders) if overs: if project_quotas == user_quotas: usages = project_usages else: usages = user_usages usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) for k, v in usages.items()) raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas, usages=usages) return reservations def _quota_reservations_query(session, context, reservations): """Return the relevant reservations.""" # Get the listed reservations return model_query(context, models.Reservation, read_deleted="no", session=session).\ filter(models.Reservation.uuid.in_(reservations)).\ with_lockmode('update') @require_context def reservation_commit(context, reservations, project_id=None, user_id=None): session = get_session() with session.begin(): usages = _get_user_quota_usages(context, session, project_id, user_id) reservation_query = _quota_reservations_query(session, context, reservations) for reservation in reservation_query.all(): usage = usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation_query.update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) @require_context def reservation_rollback(context, reservations, project_id=None, user_id=None): session = get_session() with session.begin(): usages = _get_user_quota_usages(context, session, project_id, user_id) reservation_query = _quota_reservations_query(session, context, reservations) for reservation in reservation_query.all(): usage = usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation_query.update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) @require_admin_context def quota_destroy_all_by_project_and_user(context, project_id, user_id): session = get_session() with session.begin(): model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) @require_admin_context def reservation_expire(context): session = get_session() with session.begin(): current_time = timeutils.utcnow() reservation_query = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter(models.Reservation.expire < current_time) for reservation in reservation_query.join(models.QuotaUsage).all(): if reservation.delta >= 0: reservation.usage.reserved -= reservation.delta session.add(reservation.usage) reservation_query.update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}, synchronize_session=False) ################ def _share_get_query(context, session=None): if session is None: session = get_session() return model_query(context, models.Share, session=session).\ options(joinedload('share_metadata')) def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs @require_context def share_create(context, values): values['share_metadata'] = _metadata_refs(values.get('metadata'), models.ShareMetadata) share_ref = models.Share() if not values.get('id'): values['id'] = str(uuid.uuid4()) share_ref.update(values) session = get_session() with session.begin(): share_ref.save(session=session) return share_ref @require_admin_context def share_data_get_for_project(context, project_id, user_id, session=None): query = model_query(context, models.Share, func.count(models.Share.id), func.sum(models.Share.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if user_id: result = query.filter_by(user_id=user_id).first() else: result = query.first() return (result[0] or 0, result[1] or 0) @require_context def share_update(context, share_id, values): session = get_session() with session.begin(): share_ref = share_get(context, share_id, session=session) share_ref.update(values) share_ref.save(session=session) return share_ref @require_context def share_get(context, share_id, session=None): result = _share_get_query(context, session).filter_by(id=share_id).first() if result is None: raise exception.NotFound() return result @require_admin_context def share_get_all(context): return _share_get_query(context).all() @require_admin_context def share_get_all_by_host(context, host): query = _share_get_query(context) return query.filter_by(host=host).all() @require_context def share_get_all_by_project(context, project_id): """Returns list of shares with given project ID.""" return _share_get_query(context).filter_by(project_id=project_id).all() @require_context def share_delete(context, share_id): session = get_session() share_ref = share_get(context, share_id, session) share_ref.update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at'), 'status': 'deleted'}) session.query(models.ShareMetadata).\ filter_by(share_id=share_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) share_ref.save(session) ################### def _share_access_get_query(context, session, values): """ Get access record. """ query = model_query(context, models.ShareAccessMapping, session=session) return query.filter_by(**values) @require_context def share_access_create(context, values): session = get_session() with session.begin(): access_ref = models.ShareAccessMapping() if not values.get('id'): values['id'] = str(uuid.uuid4()) access_ref.update(values) access_ref.save(session=session) return access_ref @require_context def share_access_get(context, access_id): """ Get access record. """ session = get_session() access = _share_access_get_query(context, session, {'id': access_id}).first() if access: return access else: raise exception.NotFound() @require_context def share_access_get_all_for_share(context, share_id): session = get_session() return _share_access_get_query(context, session, {'share_id': share_id}).all() @require_context def share_access_get_all_by_type_and_access(context, share_id, access_type, access): session = get_session() return _share_access_get_query(context, session, {'share_id': share_id, 'access_type': access_type, 'access_to': access}).all() @require_context def share_access_delete(context, access_id): session = get_session() with session.begin(): session.query(models.ShareAccessMapping).\ filter_by(id=access_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at'), 'state': models.ShareAccessMapping.STATE_DELETED}) @require_context def share_access_update(context, access_id, values): session = get_session() with session.begin(): access = _share_access_get_query(context, session, {'id': access_id}) access = access.one() access.update(values) access.save(session=session) return access ################### @require_context def share_snapshot_create(context, values): snapshot_ref = models.ShareSnapshot() if not values.get('id'): values['id'] = str(uuid.uuid4()) snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) return share_snapshot_get(context, values['id'], session=session) @require_admin_context def snapshot_data_get_for_project(context, project_id, user_id, session=None): query = model_query(context, models.ShareSnapshot, func.count(models.ShareSnapshot.id), func.sum(models.ShareSnapshot.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if user_id: result = query.filter_by(user_id=user_id).first() else: result = query.first() return (result[0] or 0, result[1] or 0) @require_context def share_snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): session.query(models.ShareSnapshot).\ filter_by(id=snapshot_id).\ update({'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def share_snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.ShareSnapshot, session=session, project_only=True).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.ShareSnapshotNotFound(snapshot_id=snapshot_id) return result @require_admin_context def share_snapshot_get_all(context): return model_query(context, models.ShareSnapshot).all() @require_context def share_snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.ShareSnapshot).\ filter_by(project_id=project_id).\ all() @require_context def share_snapshot_get_all_for_share(context, share_id): return model_query(context, models.ShareSnapshot, read_deleted='no', project_only=True).\ filter_by(share_id=share_id).all() @require_context def share_snapshot_data_get_for_project(context, project_id, session=None): authorize_project_context(context, project_id) result = model_query(context, models.ShareSnapshot, func.count(models.ShareSnapshot.id), func.sum(models.ShareSnapshot.share_size), read_deleted="no", session=session).\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_context def share_snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = share_snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) snapshot_ref.save(session=session) return snapshot_ref ################################# @require_context @require_share_exists def share_metadata_get(context, share_id): return _share_metadata_get(context, share_id) @require_context @require_share_exists def share_metadata_delete(context, share_id, key): _share_metadata_get_query(context, share_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_share_exists def share_metadata_update(context, share_id, metadata, delete): return _share_metadata_update(context, share_id, metadata, delete) def _share_metadata_get_query(context, share_id, session=None): return model_query(context, models.ShareMetadata, session=session, read_deleted="no").\ filter_by(share_id=share_id) @require_context @require_share_exists def _share_metadata_get(context, share_id, session=None): rows = _share_metadata_get_query(context, share_id, session=session).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_share_exists def _share_metadata_update(context, share_id, metadata, delete, session=None): if not session: session = get_session() with session.begin(): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _share_metadata_get(context, share_id, session=session) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = _share_metadata_get_item(context, share_id, meta_key, session=session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _share_metadata_get_item(context, share_id, meta_key, session=session) except exception.ShareMetadataNotFound: meta_ref = models.ShareMetadata() item.update({"key": meta_key, "share_id": share_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata def _share_metadata_get_item(context, share_id, key, session=None): result = _share_metadata_get_query(context, share_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.ShareMetadataNotFound(metadata_key=key, share_id=share_id) return result @require_context def security_service_create(context, values): if not values.get('id'): values['id'] = uuidutils.generate_uuid() security_service_ref = models.SecurityService() security_service_ref.update(values) session = get_session() with session.begin(): security_service_ref.save(session=session) return security_service_ref @require_context def security_service_delete(context, id): session = get_session() with session.begin(): security_service_ref = security_service_get(context, id, session=session) security_service_ref.delete(session=session) @require_context def security_service_update(context, id, values): session = get_session() with session.begin(): security_service_ref = security_service_get(context, id, session=session) security_service_ref.update(values) security_service_ref.save(session=session) return security_service_ref @require_context def security_service_get(context, id, session=None): result = _security_service_get_query(context, session=session).\ filter_by(id=id).first() if result is None: raise exception.SecurityServiceNotFound(security_service_id=id) return result @require_context def security_service_get_all(context): return _security_service_get_query(context).all() @require_context def security_service_get_all_by_project(context, project_id): return _security_service_get_query(context).\ filter_by(project_id=project_id).all() def _security_service_get_query(context, session=None): if session is None: session = get_session() return model_query(context, models.SecurityService, session=session) ################### def _network_get_query(context, session=None): if session is None: session = get_session() return model_query(context, models.ShareNetwork, session=session).\ options(joinedload('shares'), joinedload('network_allocations'), joinedload('security_services')) @require_context def share_network_create(context, values): if not values.get('id'): values['id'] = uuidutils.generate_uuid() network_ref = models.ShareNetwork() network_ref.update(values) session = get_session() with session.begin(): network_ref.save(session=session) return network_ref @require_context def share_network_delete(context, id): session = get_session() with session.begin(): network_ref = share_network_get(context, id, session=session) network_ref.delete(session=session) @require_context def share_network_update(context, id, values): session = get_session() with session.begin(): network_ref = share_network_get(context, id, session=session) network_ref.update(values) network_ref.save(session=session) return network_ref @require_context def share_network_get(context, id, session=None): result = _network_get_query(context, session).filter_by(id=id).first() if result is None: raise exception.ShareNetworkNotFound(share_network_id=id) return result @require_context def share_network_get_all(context): return _network_get_query(context).all() @require_context def share_network_get_all_by_project(context, project_id): return _network_get_query(context).filter_by(project_id=project_id).all() @require_context def share_network_add_security_service(context, id, security_service_id): session = get_session() with session.begin(): assoc_ref = model_query( context, models.ShareNetworkSecurityServiceAssociation, session=session).\ filter_by(share_network_id=id).\ filter_by(security_service_id=security_service_id).first() if assoc_ref: msg = "Already associated" raise exception.ShareNetworkSecurityServiceAssociationError( share_network_id=id, security_service_id=security_service_id, reason=msg) share_nw_ref = share_network_get(context, id, session=session) if share_nw_ref['status'] == constants.STATUS_ACTIVE: msg = "Share network is active" raise exception.ShareNetworkSecurityServiceAssociationError( share_network_id=id, security_service_id=security_service_id, reason=msg) security_service_ref = security_service_get(context, security_service_id, session=session) share_nw_ref.security_services += [security_service_ref] share_nw_ref.save(session=session) return share_nw_ref @require_context def share_network_remove_security_service(context, id, security_service_id): session = get_session() with session.begin(): share_nw_ref = share_network_get(context, id, session=session) security_service_get(context, security_service_id, session=session) assoc_ref = model_query( context, models.ShareNetworkSecurityServiceAssociation, session=session).\ filter_by(share_network_id=id).\ filter_by(security_service_id=security_service_id).first() if assoc_ref: assoc_ref.delete(session=session) else: msg = "No association defined" raise exception.ShareNetworkSecurityServiceDissociationError( share_network_id=id, security_service_id=security_service_id, reason=msg) return share_nw_ref ################### @require_context def network_allocation_create(context, values): alloc_ref = models.NetworkAllocation() alloc_ref.update(values) session = get_session() with session.begin(): alloc_ref.save(session=session) return alloc_ref @require_context def network_allocation_delete(context, id): session = get_session() with session.begin(): alloc_ref = network_allocation_get(context, id, session=session) alloc_ref.delete(session=session) @require_context def network_allocation_get(context, id, session=None): if session is None: session = get_session() result = model_query(context, models.NetworkAllocation, session=session).\ filter_by(id=id).first() if result is None: raise exception.NotFound() return result @require_context def network_allocation_update(context, id, values): session = get_session() with session.begin(): alloc_ref = network_allocation_get(context, id, session=session) alloc_ref.update(values) alloc_ref.save(session=session) return alloc_ref manila-2013.2.dev175.gbf1a399/manila/db/migration.py0000664000175000017500000000230712301410454021703 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from manila import utils IMPL = utils.LazyPluggable('db_backend', sqlalchemy='manila.db.sqlalchemy.migration') INIT_VERSION = 000 def db_sync(version=None): """Migrate the database to `version` or the most recent version.""" return IMPL.db_sync(version=version) def db_version(): """Display the current database version.""" return IMPL.db_version() manila-2013.2.dev175.gbf1a399/manila/db/__init__.py0000664000175000017500000000156312301410454021454 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DB abstraction for Manila """ from manila.db.api import * manila-2013.2.dev175.gbf1a399/manila/db/base.py0000664000175000017500000000255412301410454020630 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from oslo.config import cfg from manila.openstack.common import importutils db_driver_opt = cfg.StrOpt('db_driver', default='manila.db', help='driver to use for database access') CONF = cfg.CONF CONF.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver) # pylint: disable=C0103 manila-2013.2.dev175.gbf1a399/manila/db/api.py0000664000175000017500000004262012301410454020465 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the manila.db namespace. Call these functions from manila.db namespace, not the manila.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/manila/manila.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from oslo.config import cfg from manila import exception from manila import utils db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for db'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('share_name_template', default='share-%s', help='Template string to be used to generate share names'), cfg.StrOpt('share_snapshot_name_template', default='share-snapshot-%s', help='Template string to be used to generate share snapshot ' 'names'), ] CONF = cfg.CONF CONF.register_opts(db_opts) IMPL = utils.LazyPluggable('db_backend', sqlalchemy='manila.db.sqlalchemy.api') ################### def service_destroy(context, service_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, service_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_all_share_sorted(context): """Get all share services sorted by share count. :returns: a list of (Service, share_count) tuples. """ return IMPL.service_get_all_share_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ################### def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance uuid its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) def migration_get_all_unconfirmed(context, confirm_window): """Finds all unconfirmed migrations within the confirmation window.""" return IMPL.migration_get_all_unconfirmed(context, confirm_window) #################### def quota_create(context, project_id, resource, limit, user_id=None): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit, user_id=user_id) def quota_get(context, project_id, resource, user_id=None): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource, user_id=user_id) def quota_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all quotas associated with a given project and user.""" return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_get_all(context, project_id): """Retrieve all user quotas associated with a given project.""" return IMPL.quota_get_all(context, project_id) def quota_update(context, project_id, resource, limit, user_id=None): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit, user_id=user_id) ################### def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" return IMPL.quota_class_create(context, class_name, resource, limit) def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" return IMPL.quota_class_get(context, class_name, resource) def quota_class_get_default(context): """Retrieve all default quotas.""" return IMPL.quota_class_get_default(context) def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" return IMPL.quota_class_update(context, class_name, resource, limit) ################### def quota_usage_get(context, project_id, resource, user_id=None): """Retrieve a quota usage or raise if it does not exist.""" return IMPL.quota_usage_get(context, project_id, resource, user_id=user_id) def quota_usage_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project_and_user(context, project_id, user_id) def quota_usage_get_all_by_project(context, project_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project(context, project_id) def quota_usage_update(context, project_id, user_id, resource, **kwargs): """Update a quota usage or raise if it does not exist.""" return IMPL.quota_usage_update(context, project_id, user_id, resource, **kwargs) ################### def reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire): """Create a reservation for the given project and resource.""" return IMPL.reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire) def reservation_get(context, uuid): """Retrieve a reservation or raise if it does not exist.""" return IMPL.reservation_get(context, uuid) ################### def quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): """Check quotas and create appropriate reservations.""" return IMPL.quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=project_id, user_id=user_id) def reservation_commit(context, reservations, project_id=None, user_id=None): """Commit quota reservations.""" return IMPL.reservation_commit(context, reservations, project_id=project_id, user_id=user_id) def reservation_rollback(context, reservations, project_id=None, user_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id) def quota_destroy_all_by_project_and_user(context, project_id, user_id): """Destroy all quotas associated with a given project and user.""" return IMPL.quota_destroy_all_by_project_and_user(context, project_id, user_id) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_destroy_all_by_project(context, project_id) def reservation_expire(context): """Roll back any expired reservations.""" return IMPL.reservation_expire(context) ################### def share_create(context, values): """Create new share.""" return IMPL.share_create(context, values) def share_data_get_for_project(context, project_id, session=None): """Get (share_count, gigabytes) for project.""" return IMPL.share_data_get_for_project(context, project_id) def share_update(context, share_id, values): """Update share fields.""" return IMPL.share_update(context, share_id, values) def share_get(context, share_id): """Get share by id.""" return IMPL.share_get(context, share_id) def share_get_all(context): """Get all shares.""" return IMPL.share_get_all(context) def share_get_all_by_host(context, host): """Returns all shares with given host.""" return IMPL.share_get_all_by_host(context, host) def share_get_all_by_project(context, project_id): """Returns all shares with given project ID.""" return IMPL.share_get_all_by_project(context, project_id) def share_delete(context, share_id): """Delete share.""" return IMPL.share_delete(context, share_id) ################### def share_access_create(context, values): """Allow access to share.""" return IMPL.share_access_create(context, values) def share_access_get(context, access_id): """Allow access to share.""" return IMPL.share_access_get(context, access_id) def share_access_get_all_for_share(context, share_id): """Allow access to share.""" return IMPL.share_access_get_all_for_share(context, share_id) def share_access_get_all_by_type_and_access(context, share_id, access_type, access): """Returns share access by given type and access""" return IMPL.share_access_get_all_by_type_and_access( context, share_id, access_type, access) def share_access_delete(context, access_id): """Deny access to share.""" return IMPL.share_access_delete(context, access_id) def share_access_update(context, access_id, values): """Update access record.""" return IMPL.share_access_update(context, access_id, values) #################### def share_snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.share_snapshot_create(context, values) def snapshot_data_get_for_project(context, project_id, session=None): """Get (snapshot_count, gigabytes) for project.""" return IMPL.snapshot_data_get_for_project(context, project_id) def share_snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.share_snapshot_destroy(context, snapshot_id) def share_snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.share_snapshot_get(context, snapshot_id) def share_snapshot_get_all(context): """Get all snapshots.""" return IMPL.share_snapshot_get_all(context) def share_snapshot_get_all_by_project(context, project_id): """Get all snapshots belonging to a project.""" return IMPL.share_snapshot_get_all_by_project(context, project_id) def share_snapshot_get_all_for_share(context, share_id): """Get all snapshots for a share.""" return IMPL.share_snapshot_get_all_for_share(context, share_id) def share_snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.share_snapshot_update(context, snapshot_id, values) def share_snapshot_data_get_for_project(context, project_id, session=None): """Get count and gigabytes used for snapshots for specified project.""" return IMPL.share_snapshot_data_get_for_project(context, project_id, session=None) ################### def security_service_create(context, values): """ Create security service DB record.""" return IMPL.security_service_create(context, values) def security_service_delete(context, id): """ Delete security service DB record.""" return IMPL.security_service_delete(context, id) def security_service_update(context, id, values): """ Update security service DB record.""" return IMPL.security_service_update(context, id, values) def security_service_get(context, id): """ Get security service DB record.""" return IMPL.security_service_get(context, id) def security_service_get_all(context): """ Get all security service DB records.""" return IMPL.security_service_get_all(context) def security_service_get_all_by_project(context, project_id): """ Get all security service DB records for the given project.""" return IMPL.security_service_get_all_by_project(context, project_id) #################### def share_metadata_get(context, share_id): """Get all metadata for a share.""" return IMPL.share_metadata_get(context, share_id) def share_metadata_delete(context, share_id, key): """Delete the given metadata item.""" IMPL.share_metadata_delete(context, share_id, key) def share_metadata_update(context, share, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.share_metadata_update(context, share, metadata, delete) ################### def share_network_create(context, values): """Create a share network DB record.""" return IMPL.share_network_create(context, values) def share_network_delete(context, id): """Delete a share network DB record.""" return IMPL.share_network_delete(context, id) def share_network_update(context, id, values): """Update a share network DB record.""" return IMPL.share_network_update(context, id, values) def share_network_get(context, id): """Get requested share network DB record.""" return IMPL.share_network_get(context, id) def share_network_get_all(context): """Get all share network DB records.""" return IMPL.share_network_get_all(context) def share_network_get_all_by_project(context, project_id): """Get all share network DB records for the given project.""" return IMPL.share_network_get_all_by_project(context, project_id) def share_network_add_security_service(context, id, security_service_id): return IMPL.share_network_add_security_service(context, id, security_service_id) def share_network_remove_security_service(context, id, security_service_id): return IMPL.share_network_remove_security_service(context, id, security_service_id) def network_allocation_create(context, values): """Create a network allocation DB record.""" return IMPL.network_allocation_create(context, values) def network_allocation_delete(context, id): """Delete a network allocation DB record.""" return IMPL.network_allocation_delete(context, id) def network_allocation_update(context, id, values): """Update a network allocation DB record.""" return IMPL.network_allocation_update(context, id, values) manila-2013.2.dev175.gbf1a399/manila/tests/0000775000175000017500000000000012301410516020112 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/policy.json0000664000175000017500000000120012301410454022276 0ustar chuckchuck00000000000000{ "context_is_admin": [["role:admin"]], "admin_api": [["is_admin:True"]], "share:create": [], "share:get": [], "share:get_all": [], "share:delete": [], "share:update": [], "share:snapshot_update": [], "share:create_snapshot": [], "share:delete_snapshot": [], "share:get_snapshot": [], "share:get_all_snapshots": [], "share:get_share_metadata": [], "share:delete_share_metadata": [], "share:update_share_metadata": [], "share_extension:share_admin_actions:reset_status": [["rule:admin_api"]], "share_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]] } manila-2013.2.dev175.gbf1a399/manila/tests/fake_compute.py0000664000175000017500000000476012301410454023136 0ustar chuckchuck00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # vim: tabstop=4 shiftwidth=4 softtabstop=4 from oslo.config import cfg from manila.openstack.common import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) class FakeServer(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_id') self.status = kwargs.pop('status', 'ACTIVE') self.networks = kwargs.pop('networks', {}) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) def __setitem__(self, attr, value): setattr(self, attr, value) def get(self, attr, default): return getattr(self, attr, default) def update(self, *args, **kwargs): pass class FakeKeypair(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_keypair_id') for key, value in kwargs.items(): setattr(self, key, value) class FakeImage(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_image_id') for key, value in kwargs.items(): setattr(self, key, value) class API(object): """Fake Compute API""" def instance_volume_attach(self, ctx, server_id, volume_id, mount_path): pass def instance_volume_detach(self, ctx, server_id, volume_id): pass def instance_volumes_list(self, ctx, server_id): pass def server_list(self, ctx, search_opts, all_tenants): pass def server_create(self, *args, **kwargs): pass def server_delete(self, *args, **kwargs): pass def server_get(self, *args, **kwargs): pass def keypair_list(self, *args, **kwargs): pass def keypair_import(self, *args, **kwargs): pass def keypair_delete(self, *args, **kwargs): pass def image_list(self, *args, **kwargs): pass manila-2013.2.dev175.gbf1a399/manila/tests/api/0000775000175000017500000000000012301410516020663 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/api/openstack/0000775000175000017500000000000012301410516022652 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/api/openstack/__init__.py0000664000175000017500000000141212301410454024762 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work from manila.tests import * manila-2013.2.dev175.gbf1a399/manila/tests/api/openstack/test_wsgi.py0000664000175000017500000007151612301410454025247 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 import inspect import webob from manila.api.openstack import wsgi from manila import exception from manila import test from manila.tests.api import fakes class RequestTest(test.TestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = "" self.assertEqual(None, request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = "asdf
    " self.assertRaises(exception.InvalidContentType, request.get_content_type) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual(result, "application/json") def test_content_type_from_accept(self): for content_type in ('application/xml', 'application/vnd.openstack.volume+xml', 'application/json', 'application/vnd.openstack.volume+json'): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = content_type result = request.best_match_content_type() self.assertEqual(result, content_type) def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() self.assertEqual(result, "application/json") request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual(result, "application/xml") def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.xml') result = request.best_match_content_type() self.assertEqual(result, "application/xml") request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual(result, "application/json") request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual(result, "application/json") def test_content_type_accept_and_query_extension(self): request = wsgi.Request.blank('/tests/123.xml') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual(result, "application/xml") def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual(result, "application/json") class ActionDispatcherTest(test.TestCase): def test_dispatch(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' self.assertEqual(serializer.dispatch({}, action='create'), 'pants') def test_dispatch_action_None(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual(serializer.dispatch({}, action=None), 'trousers') def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual(serializer.dispatch({}, action='update'), 'trousers') class DictSerializerTest(test.TestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual(serializer.serialize({}, 'update'), '') class XMLDictSerializerTest(test.TestCase): def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = '(2,3)' serializer = wsgi.XMLDictSerializer(xmlns="asdf") result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_xml) class JSONDictSerializerTest(test.TestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = '{"servers":{"a":[2,3]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace('\n', '').replace(' ', '') self.assertEqual(result, expected_json) class TextDeserializerTest(test.TestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual(deserializer.deserialize({}, 'update'), {}) class JSONDeserializerTest(test.TestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1', }, }, } deserializer = wsgi.JSONDeserializer() self.assertEqual(deserializer.deserialize(data), as_dict) class XMLDeserializerTest(test.TestCase): def test_xml(self): xml = """ 123 1 1 """.strip() as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1', }, }, } metadata = {'plurals': {'bs': 'b', 'ts': 't'}} deserializer = wsgi.XMLDeserializer(metadata=metadata) self.assertEqual(deserializer.deserialize(xml), as_dict) def test_xml_empty(self): xml = """""" as_dict = {"body": {"a": {}}} deserializer = wsgi.XMLDeserializer() self.assertEqual(deserializer.deserialize(xml), as_dict) class ResourceTest(test.TestCase): def test_resource_call(self): class Controller(object): def index(self, req): return 'off' req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(response.body, 'off') self.assertEqual(response.status_int, 200) def test_resource_not_authorized(self): class Controller(object): def index(self, req): raise exception.NotAuthorized() req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(response.status_int, 403) def test_dispatch(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'index', None, '') actual = resource.dispatch(method, None, {'pants': 'off'}) expected = 'off' self.assertEqual(actual, expected) def test_get_method_undefined_controller_action(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(AttributeError, resource.get_method, None, 'create', None, '') def test_get_method_action_json(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/json', '{"fooAction": true}') self.assertEqual(controller._action_foo, method) def test_get_method_action_xml(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/xml', 'true') self.assertEqual(controller._action_foo, method) def test_get_method_action_bad_body(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(exception.MalformedRequestBody, resource.get_method, None, 'action', 'application/json', '{}') def test_get_method_unknown_controller_action(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(KeyError, resource.get_method, None, 'action', 'application/json', '{"barAction": true}') def test_get_method_action_method(self): class Controller(): def action(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/xml', 'true") self.assertTrue(has_dec) def test_index(self): serializer = limits.LimitsTemplate() fixture = { "limits": { "rate": [{ "uri": "*", "regex": ".*", "limit": [{ "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z"}]}, {"uri": "*/servers", "regex": "^/servers", "limit": [{ "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": "2011-12-15T22:42:45Z"}]}], "absolute": {"maxServerMeta": 1, "maxImageMeta": 1, "maxPersonality": 5, "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'limits') #verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 4) for limit in absolutes: name = limit.get('name') value = limit.get('value') self.assertEqual(value, str(fixture['limits']['absolute'][name])) #verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 2) for i, rate in enumerate(rates): for key in ['uri', 'regex']: self.assertEqual(rate.get(key), str(fixture['limits']['rate'][i][key])) rate_limits = rate.xpath('ns:limit', namespaces=NS) self.assertEqual(len(rate_limits), 1) for j, limit in enumerate(rate_limits): for key in ['verb', 'value', 'remaining', 'unit', 'next-available']: self.assertEqual( limit.get(key), str(fixture['limits']['rate'][i]['limit'][j][key])) def test_index_no_limits(self): serializer = limits.LimitsTemplate() fixture = {"limits": { "rate": [], "absolute": {}}} output = serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'limits') #verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 0) #verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 0) manila-2013.2.dev175.gbf1a399/manila/tests/api/v1/test_security_service.py0000664000175000017500000001334212301410454026215 0ustar chuckchuck00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import webob from manila.api.v1 import security_service from manila.common import constants from manila import db from manila import exception from manila import test from manila.tests.api import fakes class ShareApiTest(test.TestCase): """Share Api Test.""" def setUp(self): super(ShareApiTest, self).setUp() self.controller = security_service.SecurityServiceController() self.maxDiff = None self.security_service = { "created_at": "fake-time", "updated_at": "fake-time-2", "id": 1, "name": "fake-name", "description": "Fake Security Service Desc", "type": constants.SECURITY_SERVICES_ALLOWED_TYPES[0], "dns_ip": "1.1.1.1", "server": "fake-server", "domain": "fake-domain", "sid": "fake-sid", "password": "fake-password", "status": "new" } security_service.policy.check_policy = mock.Mock() def test_security_service_show(self): db.security_service_get = mock.Mock(return_value=self.security_service) req = fakes.HTTPRequest.blank('/security-services/1') res_dict = self.controller.show(req, '1') expected = self.security_service.copy() expected.update() self.assertEqual(res_dict, {'security_service': self.security_service}) def test_security_service_show_not_found(self): db.security_service_get = mock.Mock(side_effect=exception.NotFound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '1') def test_security_service_create(self): sec_service = self.security_service.copy() db.security_service_create = mock.Mock( return_value=sec_service) req = fakes.HTTPRequest.blank('/security-services') res_dict = self.controller.create( req, {"security_service": sec_service}) expected = self.security_service.copy() self.assertEqual(res_dict, {'security_service': expected}) def test_security_service_create_invalid_types(self): sec_service = self.security_service.copy() sec_service['type'] = 'invalid' req = fakes.HTTPRequest.blank('/security-services') self.assertRaises(exception.InvalidInput, self.controller.create, req, {"security_service": sec_service}) def test_create_security_service_no_body(self): body = {} req = fakes.HTTPRequest.blank('/security-services') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_security_service_delete(self): db.security_service_delete = mock.Mock() db.security_service_get = mock.Mock() req = fakes.HTTPRequest.blank('/shares/1') resp = self.controller.delete(req, 1) db.security_service_delete.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(resp.status_int, 202) def test_security_service_delete_not_found(self): db.security_service_get = mock.Mock(side_effect=exception.NotFound) req = fakes.HTTPRequest.blank('/security_services/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def test_security_service_update_name(self): new = self.security_service.copy() updated = self.security_service.copy() updated['name'] = 'new' db.security_service_get = mock.Mock(return_value=new) db.security_service_update = mock.Mock(return_value=updated) body = {"security_service": {"name": "new"}} req = fakes.HTTPRequest.blank('/security_service/1') res_dict = self.controller.update(req, 1, body)['security_service'] self.assertEqual(res_dict['name'], updated['name']) def test_security_service_update_description(self): new = self.security_service.copy() updated = self.security_service.copy() updated['description'] = 'new' db.security_service_get = mock.Mock(return_value=new) db.security_service_update = mock.Mock(return_value=updated) body = {"security_service": {"description": "new"}} req = fakes.HTTPRequest.blank('/security_service/1') res_dict = self.controller.update(req, 1, body)['security_service'] self.assertEqual(res_dict['description'], updated['description']) def test_security_service_list(self): db.security_service_get_all_by_project = mock.Mock( return_value=[self.security_service.copy()]) req = fakes.HTTPRequest.blank('/security_services') res_dict = self.controller.index(req) expected = {'security_services': [ {'id': self.security_service['id'], 'name': self.security_service['name'], 'status': self.security_service['status'] } ]} self.assertEqual(res_dict, expected) manila-2013.2.dev175.gbf1a399/manila/tests/api/v1/test_share_networks.py0000664000175000017500000002375512301410454025675 0ustar chuckchuck00000000000000# Copyright 2014 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import unittest from webob import exc as webob_exc from manila.api.v1 import share_networks from manila.common import constants from manila.db import api as db_api from manila import exception from manila.tests.api import fakes fake_share_network = {'id': 'fake network id', 'project_id': 'fake project', 'created_at': None, 'updated_at': None, 'neutron_net_id': 'fake net id', 'neutron_subnet_id': 'fake subnet id', 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'fake name', 'description': 'fake description', 'status': constants.STATUS_INACTIVE, 'shares': [], 'network_allocations': [], 'security_services': [] } class ShareNetworkAPITest(unittest.TestCase): def __init__(self, *args, **kwargs): super(ShareNetworkAPITest, self).__init__(*args, **kwargs) self.controller = share_networks.ShareNetworkController() self.req = fakes.HTTPRequest.blank('/share-networks') self.context = self.req.environ['manila.context'] self.body = {share_networks.RESOURCE_NAME: {'name': 'fake name'}} def _check_share_network_view(self, view, share_nw): self.assertEqual(view['id'], share_nw['id']) self.assertEqual(view['project_id'], share_nw['project_id']) self.assertEqual(view['created_at'], share_nw['created_at']) self.assertEqual(view['updated_at'], share_nw['updated_at']) self.assertEqual(view['neutron_net_id'], share_nw['neutron_net_id']) self.assertEqual(view['neutron_subnet_id'], share_nw['neutron_subnet_id']) self.assertEqual(view['network_type'], share_nw['network_type']) self.assertEqual(view['segmentation_id'], share_nw['segmentation_id']) self.assertEqual(view['cidr'], share_nw['cidr']) self.assertEqual(view['ip_version'], share_nw['ip_version']) self.assertEqual(view['name'], share_nw['name']) self.assertEqual(view['description'], share_nw['description']) self.assertEqual(view['status'], share_nw['status']) self.assertEqual(view['created_at'], None) self.assertEqual(view['updated_at'], None) self.assertFalse('shares' in view) self.assertFalse('network_allocations' in view) self.assertFalse('security_services' in view) def test_create_nominal(self): with mock.patch.object(db_api, 'share_network_create', mock.Mock(return_value=fake_share_network)): result = self.controller.create(self.req, self.body) db_api.share_network_create.assert_called_once_with( self.req.environ['manila.context'], self.body[share_networks.RESOURCE_NAME]) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) def test_create_db_api_exception(self): with mock.patch.object(db_api, 'share_network_create', mock.Mock(side_effect=exception.DBError)): self.assertRaises(webob_exc.HTTPBadRequest, self.controller.create, self.req, self.body) def test_create_wrong_body(self): body = None self.assertRaises(webob_exc.HTTPUnprocessableEntity, self.controller.create, self.req, body) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) def test_delete_nominal(self): share_nw = 'fake network id' with mock.patch.object(db_api, 'share_network_delete'): self.controller.delete(self.req, share_nw) db_api.share_network_delete.assert_called_once_with( self.req.environ['manila.context'], share_nw) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_delete_not_found(self): share_nw = 'fake network id' db_api.share_network_get.side_effect = exception.ShareNetworkNotFound( share_network_id=share_nw) self.assertRaises(webob_exc.HTTPNotFound, self.controller.delete, self.req, share_nw) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_delete_in_use(self): share_nw = fake_share_network.copy() share_nw['status'] = constants.STATUS_ACTIVE db_api.share_network_get.return_value = share_nw self.assertRaises(webob_exc.HTTPBadRequest, self.controller.delete, self.req, share_nw['id']) def test_show_nominal(self): share_nw = 'fake network id' with mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)): result = self.controller.show(self.req, share_nw) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) def test_show_not_found(self): share_nw = 'fake network id' test_exception = exception.ShareNetworkNotFound() with mock.patch.object(db_api, 'share_network_get', mock.Mock(side_effect=test_exception)): self.assertRaises(webob_exc.HTTPNotFound, self.controller.show, self.req, share_nw) def test_index_no_filters(self): networks = [fake_share_network] with mock.patch.object(db_api, 'share_network_get_all_by_project', mock.Mock(return_value=networks)): result = self.controller.index(self.req) db_api.share_network_get_all_by_project.assert_called_once_with( self.context, self.context.project_id) self.assertEqual(len(result[share_networks.RESOURCES_NAME]), 1) self._check_share_network_view( result[share_networks.RESOURCES_NAME][0], fake_share_network) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_nominal(self): share_nw = 'fake network id' db_api.share_network_get.return_value = fake_share_network body = {share_networks.RESOURCE_NAME: {'name': 'new name'}} with mock.patch.object(db_api, 'share_network_update', mock.Mock(return_value=fake_share_network)): result = self.controller.update(self.req, share_nw, body) db_api.share_network_update.assert_called_once_with( self.req.environ['manila.context'], share_nw, body[share_networks.RESOURCE_NAME]) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_not_found(self): share_nw = 'fake network id' db_api.share_network_get.side_effect = exception.ShareNetworkNotFound( share_network_id=share_nw) self.assertRaises(webob_exc.HTTPNotFound, self.controller.update, self.req, share_nw, self.body) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_in_use(self): share_nw = fake_share_network.copy() share_nw['status'] = constants.STATUS_ACTIVE db_api.share_network_get.return_value = share_nw self.assertRaises(webob_exc.HTTPBadRequest, self.controller.update, self.req, share_nw['id'], self.body) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_db_api_exception(self): share_nw = 'fake network id' db_api.share_network_get.return_value = fake_share_network body = {share_networks.RESOURCE_NAME: {'neutron_subnet_id': 'new subnet'}} with mock.patch.object(db_api, 'share_network_update', mock.Mock(side_effect=exception.DBError)): self.assertRaises(webob_exc.HTTPBadRequest, self.controller.update, self.req, share_nw, body) manila-2013.2.dev175.gbf1a399/manila/tests/api/v1/test_share_metadata.py0000664000175000017500000004117412301410454025574 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo.config import cfg import webob from manila.api import extensions from manila.api.v1 import share_metadata from manila.api.v1 import shares import manila.db from manila import exception from manila.openstack.common import jsonutils from manila import test from manila.tests.api import fakes CONF = cfg.CONF def return_create_share_metadata_max(context, share_id, metadata, delete): return stub_max_share_metadata() def return_create_share_metadata(context, share_id, metadata, delete): return stub_share_metadata() def return_share_metadata(context, share_id): if not isinstance(share_id, str) or not len(share_id) == 36: msg = 'id %s must be a uuid in return share metadata' % share_id raise Exception(msg) return stub_share_metadata() def return_empty_share_metadata(context, share_id): return {} def delete_share_metadata(context, share_id, key): pass def stub_share_metadata(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } return metadata def stub_max_share_metadata(): metadata = {"metadata": {}} for num in range(CONF.quota_metadata_items): metadata['metadata']['key%i' % num] = "blah" return metadata def return_share(context, share_id): return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', 'name': 'fake', 'metadata': {}} def return_share_nonexistent(context, share_id): raise exception.NotFound('bogus test message') def fake_update_share_metadata(self, context, share, diff): pass class ShareMetaDataTest(test.TestCase): def setUp(self): super(ShareMetaDataTest, self).setUp() self.share_api = manila.share.api.API() fakes.stub_out_key_pair_funcs(self.stubs) self.stubs.Set(manila.db, 'share_get', return_share) self.stubs.Set(manila.db, 'share_metadata_get', return_share_metadata) self.stubs.Set(self.share_api, 'update_share_metadata', fake_update_share_metadata) self.share_controller = shares.ShareController() self.controller = share_metadata.ShareMetadataController() self.req_id = str(uuid.uuid4()) self.url = '/shares/%s/metadata' % self.req_id sh = {"size": 1, "name": "Share Test Name", "share_proto": "nfs", "display_name": "Updated Desc", "display_description": "Share Test Desc", "metadata": {}} body = {"share": sh} req = fakes.HTTPRequest.blank('/shares') self.share_controller.create(req, body) def test_index(self): req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) def test_index_nonexistent_share(self): self.stubs.Set(manila.db, 'share_metadata_get', return_share_nonexistent) req = fakes.HTTPRequest.blank(self.url) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, req, self.url) def test_index_no_data(self): self.stubs.Set(manila.db, 'share_metadata_get', return_empty_share_metadata) req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) def test_show(self): req = fakes.HTTPRequest.blank(self.url + '/key2') res_dict = self.controller.show(req, self.req_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) def test_show_nonexistent_share(self): self.stubs.Set(manila.db, 'share_metadata_get', return_share_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key2') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key2') def test_show_meta_not_found(self): self.stubs.Set(manila.db, 'share_metadata_get', return_empty_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.req_id, 'key6') def test_delete(self): self.stubs.Set(manila.db, 'share_metadata_get', return_share_metadata) self.stubs.Set(manila.db, 'share_metadata_delete', delete_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' res = self.controller.delete(req, self.req_id, 'key2') self.assertEqual(200, res.status_int) def test_delete_nonexistent_share(self): self.stubs.Set(manila.db, 'share_get', return_share_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key1') def test_delete_meta_not_found(self): self.stubs.Set(manila.db, 'share_metadata_get', return_empty_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.req_id, 'key6') def test_create(self): self.stubs.Set(manila.db, 'share_metadata_get', return_empty_share_metadata) self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank('/v1/share_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dumps(body) res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(body, res_dict) def test_create_empty_body(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, None) def test_create_item_empty_key(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_item_key_too_long(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_nonexistent_share(self): self.stubs.Set(manila.db, 'share_get', return_share_nonexistent) self.stubs.Set(manila.db, 'share_metadata_get', return_share_metadata) self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank('/v1/share_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dumps(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, self.req_id, body) def test_update_all(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', }, } req.body = jsonutils.dumps(expected) res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) def test_update_all_empty_container(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = jsonutils.dumps(expected) res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) def test_update_all_malformed_container(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = jsonutils.dumps(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_malformed_data(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': ['asdf']} req.body = jsonutils.dumps(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_nonexistent_share(self): self.stubs.Set(manila.db, 'share_get', return_share_nonexistent) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dumps(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) def test_update_item(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res_dict = self.controller.update(req, self.req_id, 'key1', body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) def test_update_item_nonexistent_share(self): self.stubs.Set(manila.db, 'share_get', return_share_nonexistent) req = fakes.HTTPRequest.blank('/v1.1/fake/shares/asdf/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_empty_body(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', None) def test_update_item_empty_key(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, '', body) def test_update_item_key_too_long(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, ("a" * 260), body) def test_update_item_value_too_long(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": ("a" * 1025)}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, "key1", body) def test_update_item_too_many_keys(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1", "key2": "value2"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_body_uri_mismatch(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url + '/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'bad', body) def test_invalid_metadata_items_on_create(self): self.stubs.Set(manila.db, 'share_metadata_update', return_create_share_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" #test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, data) #test for long value data = {"metadata": {"key": "v" * 1025}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, data) #test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, data) manila-2013.2.dev175.gbf1a399/manila/tests/api/__init__.py0000664000175000017500000000141212301410454022773 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work from manila.tests import * manila-2013.2.dev175.gbf1a399/manila/tests/api/common.py0000664000175000017500000000233312301410454022527 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def compare_links(actual, expected): """Compare xml atom links.""" return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) def compare_media_types(actual, expected): """Compare xml media types.""" return compare_tree_to_dict(actual, expected, ('base', 'type')) def compare_tree_to_dict(actual, expected, keys): """Compare parts of lxml.etree objects to dicts.""" for elem, data in zip(actual, expected): for key in keys: if elem.get(key) != data.get(key): return False return True manila-2013.2.dev175.gbf1a399/manila/tests/api/test_wsgi.py0000664000175000017500000000427212301410454023253 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test WSGI basics and provide some helper functions for other WSGI tests. """ from manila import test import routes import webob from manila import wsgi class Test(test.TestCase): def test_debug(self): class Application(wsgi.Application): """Dummy application to test debug.""" def __call__(self, environ, start_response): start_response("200", [("X-Test", "checking")]) return ['Test result'] application = wsgi.Debug(Application()) result = webob.Request.blank('/').get_response(application) self.assertEqual(result.body, "Test result") def test_router(self): class Application(wsgi.Application): """Test application to call from router.""" def __call__(self, environ, start_response): start_response("200", []) return ['Router result'] class Router(wsgi.Router): """Test router.""" def __init__(self): mapper = routes.Mapper() mapper.connect("/test", controller=Application()) super(Router, self).__init__(mapper) result = webob.Request.blank('/test').get_response(Router()) self.assertEqual(result.body, "Router result") result = webob.Request.blank('/bad').get_response(Router()) self.assertNotEqual(result.body, "Router result") manila-2013.2.dev175.gbf1a399/manila/tests/api/fakes.py0000664000175000017500000001304312301410454022330 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes import webob import webob.dec import webob.request from manila.api.middleware import auth from manila.api.middleware import fault from manila.api.openstack import wsgi as os_wsgi from manila.api import urlmap from manila.api.v1 import limits from manila.api.v1 import router from manila.api import versions from manila import context from manila import exception as exc from manila.openstack.common import timeutils from manila import wsgi FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} class Context(object): pass class FakeRouter(wsgi.Router): def __init__(self, ext_mgr=None): pass @webob.dec.wsgify def __call__(self, req): res = webob.Response() res.status = '200' res.headers['X-Test-Success'] = 'True' return res @webob.dec.wsgify def fake_wsgi(self, req): return self.application def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, use_no_auth=False, ext_mgr=None): if not inner_app_v2: inner_app_v2 = router.APIRouter(ext_mgr) if fake_auth: if fake_auth_context is not None: ctxt = fake_auth_context else: ctxt = context.RequestContext('fake', 'fake', auth_token=True) api_v2 = fault.FaultWrapper(auth.InjectContext(ctxt, inner_app_v2)) elif use_no_auth: api_v2 = fault.FaultWrapper(auth.NoAuthMiddleware( limits.RateLimitingMiddleware(inner_app_v2))) else: api_v2 = fault.FaultWrapper(auth.AuthMiddleware( limits.RateLimitingMiddleware(inner_app_v2))) mapper = urlmap.URLMap() mapper['/v2'] = api_v2 mapper['/'] = fault.FaultWrapper(versions.Versions()) return mapper def stub_out_rate_limiting(stubs): def fake_rate_init(self, app): # super(limits.RateLimitingMiddleware, self).__init__(app) self.application = app # FIXME(ja): unsure about limits in volumes # stubs.Set(manila.api.openstack.compute.limits.RateLimitingMiddleware, # '__init__', fake_rate_init) # stubs.Set(manila.api.openstack.compute.limits.RateLimitingMiddleware, # '__call__', fake_wsgi) def stub_out_key_pair_funcs(stubs, have_key_pair=True): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] def one_key_pair(context, user_id, name): if name == 'key': return dict(name='key', public_key='public_key') else: raise exc.KeypairNotFound(user_id=user_id, name=name) def no_key_pair(context, user_id): return [] class FakeToken(object): id_count = 0 def __getitem__(self, key): return getattr(self, key) def __init__(self, **kwargs): FakeToken.id_count += 1 self.id = FakeToken.id_count for k, v in kwargs.iteritems(): setattr(self, k, v) class FakeRequestContext(context.RequestContext): def __init__(self, *args, **kwargs): kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') return super(FakeRequestContext, self).__init__(*args, **kwargs) class HTTPRequest(webob.Request): @classmethod def blank(cls, *args, **kwargs): kwargs['base_url'] = 'http://localhost/v1' use_admin_context = kwargs.pop('use_admin_context', False) out = webob.Request.blank(*args, **kwargs) out.environ['manila.context'] = FakeRequestContext( 'fake_user', 'fake', is_admin=use_admin_context) return out class TestRouter(wsgi.Router): def __init__(self, controller): mapper = routes.Mapper() mapper.resource("test", "tests", controller=os_wsgi.Resource(controller)) super(TestRouter, self).__init__(mapper) class FakeAuthDatabase(object): data = {} @staticmethod def auth_token_get(context, token_hash): return FakeAuthDatabase.data.get(token_hash, None) @staticmethod def auth_token_create(context, token): fake_token = FakeToken(created_at=timeutils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token @staticmethod def auth_token_destroy(context, token_id): token = FakeAuthDatabase.data.get('id_%i' % token_id) if token and token.token_hash in FakeAuthDatabase.data: del FakeAuthDatabase.data[token.token_hash] del FakeAuthDatabase.data['id_%i' % token_id] class FakeRateLimiter(object): def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): return self.application def get_fake_uuid(token=0): if token not in FAKE_UUIDS: FAKE_UUIDS[token] = str(uuid.uuid4()) return FAKE_UUIDS[token] manila-2013.2.dev175.gbf1a399/manila/tests/api/test_xmlutil.py0000664000175000017500000006173512301410454024007 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from manila.api import xmlutil from manila import test class SelectorTest(test.TestCase): obj_for_test = {'test': {'name': 'test', 'values': [1, 2, 3], 'attrs': {'foo': 1, 'bar': 2, 'baz': 3, }, }, } def test_empty_selector(self): sel = xmlutil.Selector() self.assertEqual(len(sel.chain), 0) self.assertEqual(sel(self.obj_for_test), self.obj_for_test) def test_dict_selector(self): sel = xmlutil.Selector('test') self.assertEqual(len(sel.chain), 1) self.assertEqual(sel.chain[0], 'test') self.assertEqual(sel(self.obj_for_test), self.obj_for_test['test']) def test_datum_selector(self): sel = xmlutil.Selector('test', 'name') self.assertEqual(len(sel.chain), 2) self.assertEqual(sel.chain[0], 'test') self.assertEqual(sel.chain[1], 'name') self.assertEqual(sel(self.obj_for_test), 'test') def test_list_selector(self): sel = xmlutil.Selector('test', 'values', 0) self.assertEqual(len(sel.chain), 3) self.assertEqual(sel.chain[0], 'test') self.assertEqual(sel.chain[1], 'values') self.assertEqual(sel.chain[2], 0) self.assertEqual(sel(self.obj_for_test), 1) def test_items_selector(self): sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items) self.assertEqual(len(sel.chain), 3) self.assertEqual(sel.chain[2], xmlutil.get_items) for key, val in sel(self.obj_for_test): self.assertEqual(self.obj_for_test['test']['attrs'][key], val) def test_missing_key_selector(self): sel = xmlutil.Selector('test2', 'attrs') self.assertEqual(sel(self.obj_for_test), None) self.assertRaises(KeyError, sel, self.obj_for_test, True) def test_constant_selector(self): sel = xmlutil.ConstantSelector('Foobar') self.assertEqual(sel.value, 'Foobar') self.assertEqual(sel(self.obj_for_test), 'Foobar') class TemplateElementTest(test.TestCase): def test_element_initial_attributes(self): # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3), c=4, d=5, e=6) # Verify all the attributes are as expected expected = dict(a=1, b=2, c=4, d=5, e=6) for k, v in expected.items(): self.assertEqual(elem.attrib[k].chain[0], v) def test_element_get_attributes(self): expected = dict(a=1, b=2, c=3) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=expected) # Verify that get() retrieves the attributes for k, v in expected.items(): self.assertEqual(elem.get(k).chain[0], v) def test_element_set_attributes(self): attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar')) # Create a bare template element with no attributes elem = xmlutil.TemplateElement('test') # Set the attribute values for k, v in attrs.items(): elem.set(k, v) # Now verify what got set self.assertEqual(len(elem.attrib['a'].chain), 1) self.assertEqual(elem.attrib['a'].chain[0], 'a') self.assertEqual(len(elem.attrib['b'].chain), 1) self.assertEqual(elem.attrib['b'].chain[0], 'foo') self.assertEqual(elem.attrib['c'], attrs['c']) def test_element_attribute_keys(self): attrs = dict(a=1, b=2, c=3, d=4) expected = set(attrs.keys()) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=attrs) # Now verify keys self.assertEqual(set(elem.keys()), expected) def test_element_attribute_items(self): expected = dict(a=xmlutil.Selector(1), b=xmlutil.Selector(2), c=xmlutil.Selector(3)) keys = set(expected.keys()) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=expected) # Now verify items for k, v in elem.items(): self.assertEqual(expected[k], v) keys.remove(k) # Did we visit all keys? self.assertEqual(len(keys), 0) def test_element_selector_none(self): # Create a template element with no selector elem = xmlutil.TemplateElement('test') self.assertEqual(len(elem.selector.chain), 0) def test_element_selector_string(self): # Create a template element with a string selector elem = xmlutil.TemplateElement('test', selector='test') self.assertEqual(len(elem.selector.chain), 1) self.assertEqual(elem.selector.chain[0], 'test') def test_element_selector(self): sel = xmlutil.Selector('a', 'b') # Create a template element with an explicit selector elem = xmlutil.TemplateElement('test', selector=sel) self.assertEqual(elem.selector, sel) def test_element_subselector_none(self): # Create a template element with no subselector elem = xmlutil.TemplateElement('test') self.assertEqual(elem.subselector, None) def test_element_subselector_string(self): # Create a template element with a string subselector elem = xmlutil.TemplateElement('test', subselector='test') self.assertEqual(len(elem.subselector.chain), 1) self.assertEqual(elem.subselector.chain[0], 'test') def test_element_subselector(self): sel = xmlutil.Selector('a', 'b') # Create a template element with an explicit subselector elem = xmlutil.TemplateElement('test', subselector=sel) self.assertEqual(elem.subselector, sel) def test_element_append_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(len(elem), 0) # Create a child element child = xmlutil.TemplateElement('child') # Append the child to the parent elem.append(child) # Verify that the child was added self.assertEqual(len(elem), 1) self.assertEqual(elem[0], child) self.assertEqual('child' in elem, True) self.assertEqual(elem['child'], child) # Ensure that multiple children of the same name are rejected child2 = xmlutil.TemplateElement('child') self.assertRaises(KeyError, elem.append, child2) def test_element_extend_children(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(len(elem), 0) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Verify that the children were added self.assertEqual(len(elem), 3) for idx in range(len(elem)): self.assertEqual(children[idx], elem[idx]) self.assertEqual(children[idx].tag in elem, True) self.assertEqual(elem[children[idx].tag], children[idx]) # Ensure that multiple children of the same name are rejected children2 = [xmlutil.TemplateElement('child4'), xmlutil.TemplateElement('child1'), ] self.assertRaises(KeyError, elem.extend, children2) # Also ensure that child4 was not added self.assertEqual(len(elem), 3) self.assertEqual(elem[-1].tag, 'child3') def test_element_insert_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(len(elem), 0) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Create a child to insert child = xmlutil.TemplateElement('child4') # Insert it elem.insert(1, child) # Ensure the child was inserted in the right place self.assertEqual(len(elem), 4) children.insert(1, child) for idx in range(len(elem)): self.assertEqual(children[idx], elem[idx]) self.assertEqual(children[idx].tag in elem, True) self.assertEqual(elem[children[idx].tag], children[idx]) # Ensure that multiple children of the same name are rejected child2 = xmlutil.TemplateElement('child2') self.assertRaises(KeyError, elem.insert, 2, child2) def test_element_remove_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(len(elem), 0) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Create a test child to remove child = xmlutil.TemplateElement('child2') # Try to remove it self.assertRaises(ValueError, elem.remove, child) # Ensure that no child was removed self.assertEqual(len(elem), 3) # Now remove a legitimate child elem.remove(children[1]) # Ensure that the child was removed self.assertEqual(len(elem), 2) self.assertEqual(elem[0], children[0]) self.assertEqual(elem[1], children[2]) self.assertEqual('child2' in elem, False) # Ensure the child cannot be retrieved by name def get_key(elem, key): return elem[key] self.assertRaises(KeyError, get_key, elem, 'child2') def test_element_text(self): # Create an element elem = xmlutil.TemplateElement('test') # Ensure that it has no text self.assertEqual(elem.text, None) # Try setting it to a string and ensure it becomes a selector elem.text = 'test' self.assertEqual(hasattr(elem.text, 'chain'), True) self.assertEqual(len(elem.text.chain), 1) self.assertEqual(elem.text.chain[0], 'test') # Try resetting the text to None elem.text = None self.assertEqual(elem.text, None) # Now make up a selector and try setting the text to that sel = xmlutil.Selector() elem.text = sel self.assertEqual(elem.text, sel) # Finally, try deleting the text and see what happens del elem.text self.assertEqual(elem.text, None) def test_apply_attrs(self): # Create a template element attrs = dict(attr1=xmlutil.ConstantSelector(1), attr2=xmlutil.ConstantSelector(2)) tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs) # Create an etree element elem = etree.Element('test') # Apply the template to the element tmpl_elem.apply(elem, None) # Now, verify the correct attributes were set for k, v in elem.items(): self.assertEqual(str(attrs[k].value), v) def test_apply_text(self): # Create a template element tmpl_elem = xmlutil.TemplateElement('test') tmpl_elem.text = xmlutil.ConstantSelector(1) # Create an etree element elem = etree.Element('test') # Apply the template to the element tmpl_elem.apply(elem, None) # Now, verify the text was set self.assertEqual(str(tmpl_elem.text.value), elem.text) def test__render(self): attrs = dict(attr1=xmlutil.ConstantSelector(1), attr2=xmlutil.ConstantSelector(2), attr3=xmlutil.ConstantSelector(3)) # Create a master template element master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) # Create a couple of slave template element slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']), xmlutil.TemplateElement('test', attr3=attrs['attr3']), ] # Try the render elem = master_elem._render(None, None, slave_elems, None) # Verify the particulars of the render self.assertEqual(elem.tag, 'test') self.assertEqual(len(elem.nsmap), 0) for k, v in elem.items(): self.assertEqual(str(attrs[k].value), v) # Create a parent for the element to be rendered parent = etree.Element('parent') # Try the render again... elem = master_elem._render(parent, None, slave_elems, dict(a='foo')) # Verify the particulars of the render self.assertEqual(len(parent), 1) self.assertEqual(parent[0], elem) self.assertEqual(len(elem.nsmap), 1) self.assertEqual(elem.nsmap['a'], 'foo') def test_render(self): # Create a template element tmpl_elem = xmlutil.TemplateElement('test') tmpl_elem.text = xmlutil.Selector() # Create the object we're going to render obj = ['elem1', 'elem2', 'elem3', 'elem4'] # Try a render with no object elems = tmpl_elem.render(None, None) self.assertEqual(len(elems), 0) # Try a render with one object elems = tmpl_elem.render(None, 'foo') self.assertEqual(len(elems), 1) self.assertEqual(elems[0][0].text, 'foo') self.assertEqual(elems[0][1], 'foo') # Now, try rendering an object with multiple entries parent = etree.Element('parent') elems = tmpl_elem.render(parent, obj) self.assertEqual(len(elems), 4) # Check the results for idx in range(len(obj)): self.assertEqual(elems[idx][0].text, obj[idx]) self.assertEqual(elems[idx][1], obj[idx]) def test_subelement(self): # Try the SubTemplateElement constructor parent = xmlutil.SubTemplateElement(None, 'parent') self.assertEqual(parent.tag, 'parent') self.assertEqual(len(parent), 0) # Now try it with a parent element child = xmlutil.SubTemplateElement(parent, 'child') self.assertEqual(child.tag, 'child') self.assertEqual(len(parent), 1) self.assertEqual(parent[0], child) def test_wrap(self): # These are strange methods, but they make things easier elem = xmlutil.TemplateElement('test') self.assertEqual(elem.unwrap(), elem) self.assertEqual(elem.wrap().root, elem) def test_dyntag(self): obj = ['a', 'b', 'c'] # Create a template element with a dynamic tag tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector()) # Try the render parent = etree.Element('parent') elems = tmpl_elem.render(parent, obj) # Verify the particulars of the render self.assertEqual(len(elems), len(obj)) for idx in range(len(obj)): self.assertEqual(elems[idx][0].tag, obj[idx]) class TemplateTest(test.TestCase): def test_wrap(self): # These are strange methods, but they make things easier elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem) self.assertEqual(tmpl.unwrap(), elem) self.assertEqual(tmpl.wrap(), tmpl) def test__siblings(self): # Set up a basic template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem) # Check that we get the right siblings siblings = tmpl._siblings() self.assertEqual(len(siblings), 1) self.assertEqual(siblings[0], elem) def test__nsmap(self): # Set up a basic template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem, nsmap=dict(a="foo")) # Check out that we get the right namespace dictionary nsmap = tmpl._nsmap() self.assertNotEqual(id(nsmap), id(tmpl.nsmap)) self.assertEqual(len(nsmap), 1) self.assertEqual(nsmap['a'], 'foo') def test_master_attach(self): # Set up a master template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.MasterTemplate(elem, 1) # Make sure it has a root but no slaves self.assertEqual(tmpl.root, elem) self.assertEqual(len(tmpl.slaves), 0) # Try to attach an invalid slave bad_elem = xmlutil.TemplateElement('test2') self.assertRaises(ValueError, tmpl.attach, bad_elem) self.assertEqual(len(tmpl.slaves), 0) # Try to attach an invalid and a valid slave good_elem = xmlutil.TemplateElement('test') self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem) self.assertEqual(len(tmpl.slaves), 0) # Try to attach an inapplicable template class InapplicableTemplate(xmlutil.Template): def apply(self, master): return False inapp_tmpl = InapplicableTemplate(good_elem) tmpl.attach(inapp_tmpl) self.assertEqual(len(tmpl.slaves), 0) # Now try attaching an applicable template tmpl.attach(good_elem) self.assertEqual(len(tmpl.slaves), 1) self.assertEqual(tmpl.slaves[0].root, good_elem) def test_master_copy(self): # Construct a master template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo')) # Give it a slave slave = xmlutil.TemplateElement('test') tmpl.attach(slave) # Construct a copy copy = tmpl.copy() # Check to see if we actually managed a copy self.assertNotEqual(tmpl, copy) self.assertEqual(tmpl.root, copy.root) self.assertEqual(tmpl.version, copy.version) self.assertEqual(id(tmpl.nsmap), id(copy.nsmap)) self.assertNotEqual(id(tmpl.slaves), id(copy.slaves)) self.assertEqual(len(tmpl.slaves), len(copy.slaves)) self.assertEqual(tmpl.slaves[0], copy.slaves[0]) def test_slave_apply(self): # Construct a master template elem = xmlutil.TemplateElement('test') master = xmlutil.MasterTemplate(elem, 3) # Construct a slave template with applicable minimum version slave = xmlutil.SlaveTemplate(elem, 2) self.assertEqual(slave.apply(master), True) # Construct a slave template with equal minimum version slave = xmlutil.SlaveTemplate(elem, 3) self.assertEqual(slave.apply(master), True) # Construct a slave template with inapplicable minimum version slave = xmlutil.SlaveTemplate(elem, 4) self.assertEqual(slave.apply(master), False) # Construct a slave template with applicable version range slave = xmlutil.SlaveTemplate(elem, 2, 4) self.assertEqual(slave.apply(master), True) # Construct a slave template with low version range slave = xmlutil.SlaveTemplate(elem, 1, 2) self.assertEqual(slave.apply(master), False) # Construct a slave template with high version range slave = xmlutil.SlaveTemplate(elem, 4, 5) self.assertEqual(slave.apply(master), False) # Construct a slave template with matching version range slave = xmlutil.SlaveTemplate(elem, 3, 3) self.assertEqual(slave.apply(master), True) def test__serialize(self): # Our test object to serialize obj = {'test': {'name': 'foobar', 'values': [1, 2, 3, 4], 'attrs': {'a': 1, 'b': 2, 'c': 3, 'd': 4, }, 'image': {'name': 'image_foobar', 'id': 42, }, }, } # Set up our master template root = xmlutil.TemplateElement('test', selector='test', name='name') value = xmlutil.SubTemplateElement(root, 'value', selector='values') value.text = xmlutil.Selector() attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs') xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items, key=0, value=1) master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo')) # Set up our slave template root_slave = xmlutil.TemplateElement('test', selector='test') image = xmlutil.SubTemplateElement(root_slave, 'image', selector='image', id='id') image.text = xmlutil.Selector('name') slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar')) # Attach the slave to the master... master.attach(slave) # Try serializing our object siblings = master._siblings() nsmap = master._nsmap() result = master._serialize(None, obj, siblings, nsmap) # Now we get to manually walk the element tree... self.assertEqual(result.tag, 'test') self.assertEqual(len(result.nsmap), 2) self.assertEqual(result.nsmap['f'], 'foo') self.assertEqual(result.nsmap['b'], 'bar') self.assertEqual(result.get('name'), obj['test']['name']) for idx, val in enumerate(obj['test']['values']): self.assertEqual(result[idx].tag, 'value') self.assertEqual(result[idx].text, str(val)) idx += 1 self.assertEqual(result[idx].tag, 'attrs') for attr in result[idx]: self.assertEqual(attr.tag, 'attr') self.assertEqual(attr.get('value'), str(obj['test']['attrs'][attr.get('key')])) idx += 1 self.assertEqual(result[idx].tag, 'image') self.assertEqual(result[idx].get('id'), str(obj['test']['image']['id'])) self.assertEqual(result[idx].text, obj['test']['image']['name']) class MasterTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') return xmlutil.MasterTemplate(elem, 1) class SlaveTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') return xmlutil.SlaveTemplate(elem, 1) class TemplateBuilderTest(test.TestCase): def test_master_template_builder(self): # Make sure the template hasn't been built yet self.assertEqual(MasterTemplateBuilder._tmpl, None) # Now, construct the template tmpl1 = MasterTemplateBuilder() # Make sure that there is a template cached... self.assertNotEqual(MasterTemplateBuilder._tmpl, None) # Make sure it wasn't what was returned... self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt cached = MasterTemplateBuilder._tmpl tmpl2 = MasterTemplateBuilder() self.assertEqual(MasterTemplateBuilder._tmpl, cached) # Make sure we're always getting fresh copies self.assertNotEqual(tmpl1, tmpl2) # Make sure we can override the copying behavior tmpl3 = MasterTemplateBuilder(False) self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3) def test_slave_template_builder(self): # Make sure the template hasn't been built yet self.assertEqual(SlaveTemplateBuilder._tmpl, None) # Now, construct the template tmpl1 = SlaveTemplateBuilder() # Make sure there is a template cached... self.assertNotEqual(SlaveTemplateBuilder._tmpl, None) # Make sure it was what was returned... self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt tmpl2 = SlaveTemplateBuilder() self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) # Make sure we're always getting the cached copy self.assertEqual(tmpl1, tmpl2) class MiscellaneousXMLUtilTests(test.TestCase): def test_make_flat_dict(self): expected_xml = ("\n" 'foobar') root = xmlutil.make_flat_dict('wrapper') tmpl = xmlutil.MasterTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) self.assertEqual(result, expected_xml) manila-2013.2.dev175.gbf1a399/manila/tests/api/middleware/0000775000175000017500000000000012301410516023000 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/api/middleware/test_faults.py0000664000175000017500000001635712301410454025724 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from xml.dom import minidom import webob import webob.dec import webob.exc from manila.api import common from manila.api.openstack import wsgi from manila.openstack.common import jsonutils from manila import test class TestFaults(test.TestCase): """Tests covering `manila.api.openstack.faults:Fault` class.""" def _prepare_xml(self, xml_string): """Remove characters from string which hinder XML equality testing.""" xml_string = xml_string.replace(" ", "") xml_string = xml_string.replace("\n", "") xml_string = xml_string.replace("\t", "") return xml_string def test_400_fault_json(self): """Test fault serialized to JSON via file-extension and/or header.""" requests = [ webob.Request.blank('/.json'), webob.Request.blank('/', headers={"Accept": "application/json"}), ] for request in requests: fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) response = request.get_response(fault) expected = { "badRequest": { "message": "scram", "code": 400, }, } actual = jsonutils.loads(response.body) self.assertEqual(response.content_type, "application/json") self.assertEqual(expected, actual) def test_413_fault_json(self): """Test fault serialized to JSON via file-extension and/or header.""" requests = [ webob.Request.blank('/.json'), webob.Request.blank('/', headers={"Accept": "application/json"}), ] for request in requests: exc = webob.exc.HTTPRequestEntityTooLarge fault = wsgi.Fault(exc(explanation='sorry', headers={'Retry-After': 4})) response = request.get_response(fault) expected = { "overLimit": { "message": "sorry", "code": 413, "retryAfter": 4, }, } actual = jsonutils.loads(response.body) self.assertEqual(response.content_type, "application/json") self.assertEqual(expected, actual) def test_raise(self): """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" @webob.dec.wsgify def raiser(req): raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?')) req = webob.Request.blank('/.xml') resp = req.get_response(raiser) self.assertEqual(resp.content_type, "application/xml") self.assertEqual(resp.status_int, 404) self.assertTrue('whut?' in resp.body) def test_raise_403(self): """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" @webob.dec.wsgify def raiser(req): raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?')) req = webob.Request.blank('/.xml') resp = req.get_response(raiser) self.assertEqual(resp.content_type, "application/xml") self.assertEqual(resp.status_int, 403) self.assertTrue('resizeNotAllowed' not in resp.body) self.assertTrue('forbidden' in resp.body) def test_fault_has_status_int(self): """Ensure the status_int is set correctly on faults""" fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) self.assertEqual(fault.status_int, 400) def test_xml_serializer(self): """Ensure that a v1.1 request responds with a v1 xmlns""" request = webob.Request.blank('/v1', headers={"Accept": "application/xml"}) fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) response = request.get_response(fault) self.assertTrue(common.XML_NS_V1 in response.body) self.assertEqual(response.content_type, "application/xml") self.assertEqual(response.status_int, 400) class FaultsXMLSerializationTestV11(test.TestCase): """Tests covering `manila.api.openstack.faults:Fault` class.""" def _prepare_xml(self, xml_string): xml_string = xml_string.replace(" ", "") xml_string = xml_string.replace("\n", "") xml_string = xml_string.replace("\t", "") return xml_string def test_400_fault(self): metadata = {'attributes': {"badRequest": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V1) fixture = { "badRequest": { "message": "scram", "code": 400, }, } output = serializer.serialize(fixture) actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" scram """) % common.XML_NS_V1) self.assertEqual(expected.toxml(), actual.toxml()) def test_413_fault(self): metadata = {'attributes': {"overLimit": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V1) fixture = { "overLimit": { "message": "sorry", "code": 413, "retryAfter": 4, }, } output = serializer.serialize(fixture) actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" sorry 4 """) % common.XML_NS_V1) self.assertEqual(expected.toxml(), actual.toxml()) def test_404_fault(self): metadata = {'attributes': {"itemNotFound": 'code'}} serializer = wsgi.XMLDictSerializer(metadata=metadata, xmlns=common.XML_NS_V1) fixture = { "itemNotFound": { "message": "sorry", "code": 404, }, } output = serializer.serialize(fixture) actual = minidom.parseString(self._prepare_xml(output)) expected = minidom.parseString(self._prepare_xml(""" sorry """) % common.XML_NS_V1) self.assertEqual(expected.toxml(), actual.toxml()) manila-2013.2.dev175.gbf1a399/manila/tests/api/middleware/__init__.py0000664000175000017500000000000012301410454025100 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/api/middleware/test_sizelimit.py0000664000175000017500000000661012301410454026426 0ustar chuckchuck00000000000000# Copyright (c) 2012 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import StringIO import webob from manila.api.middleware import sizelimit from manila import test from oslo.config import cfg CONF = cfg.CONF MAX_REQUEST_BODY_SIZE = CONF.osapi_max_request_body_size class TestLimitingReader(test.TestCase): def test_limiting_reader(self): BYTES = 1024 bytes_read = 0 data = StringIO.StringIO("*" * BYTES) for chunk in sizelimit.LimitingReader(data, BYTES): bytes_read += len(chunk) self.assertEquals(bytes_read, BYTES) bytes_read = 0 data = StringIO.StringIO("*" * BYTES) reader = sizelimit.LimitingReader(data, BYTES) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertEquals(bytes_read, BYTES) def test_limiting_reader_fails(self): BYTES = 1024 def _consume_all_iter(): bytes_read = 0 data = StringIO.StringIO("*" * BYTES) for chunk in sizelimit.LimitingReader(data, BYTES - 1): bytes_read += len(chunk) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, _consume_all_iter) def _consume_all_read(): bytes_read = 0 data = StringIO.StringIO("*" * BYTES) reader = sizelimit.LimitingReader(data, BYTES - 1) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, _consume_all_read) class TestRequestBodySizeLimiter(test.TestCase): def setUp(self): super(TestRequestBodySizeLimiter, self).setUp() @webob.dec.wsgify() def fake_app(req): return webob.Response(req.body) self.middleware = sizelimit.RequestBodySizeLimiter(fake_app) self.request = webob.Request.blank('/', method='POST') def test_content_length_acceptable(self): self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE self.request.body = "0" * MAX_REQUEST_BODY_SIZE response = self.request.get_response(self.middleware) self.assertEqual(response.status_int, 200) def test_content_length_too_large(self): self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1 self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) response = self.request.get_response(self.middleware) self.assertEqual(response.status_int, 413) def test_request_too_large_no_content_length(self): self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) self.request.headers['Content-Length'] = None response = self.request.get_response(self.middleware) self.assertEqual(response.status_int, 413) manila-2013.2.dev175.gbf1a399/manila/tests/api/middleware/test_auth.py0000664000175000017500000000440212301410454025353 0ustar chuckchuck00000000000000# Copyright (c) 2012 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob import manila.api.middleware.auth from manila import test class TestManilaKeystoneContextMiddleware(test.TestCase): def setUp(self): super(TestManilaKeystoneContextMiddleware, self).setUp() @webob.dec.wsgify() def fake_app(req): self.context = req.environ['manila.context'] return webob.Response() self.context = None self.middleware = (manila.api.middleware.auth .ManilaKeystoneContext(fake_app)) self.request = webob.Request.blank('/') self.request.headers['X_TENANT_ID'] = 'testtenantid' self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' def test_no_user_or_user_id(self): response = self.request.get_response(self.middleware) self.assertEqual(response.status, '401 Unauthorized') def test_user_only(self): self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual(response.status, '200 OK') self.assertEqual(self.context.user_id, 'testuserid') def test_user_id_only(self): self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual(response.status, '200 OK') self.assertEqual(self.context.user_id, 'testuser') def test_user_id_trumps_user(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual(response.status, '200 OK') self.assertEqual(self.context.user_id, 'testuserid') manila-2013.2.dev175.gbf1a399/manila/tests/api/contrib/0000775000175000017500000000000012301410516022323 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/api/contrib/stubs.py0000664000175000017500000000730512301410454024043 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from manila import exception as exc FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} def stub_share(id, **kwargs): share = { 'id': id, 'share_proto': 'fakeproto', 'export_location': 'fake_location', 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'host': 'fakehost', 'size': 1, 'availability_zone': 'fakeaz', 'status': 'fakestatus', 'name': 'vol name', 'display_name': 'displayname', 'display_description': 'displaydesc', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'snapshot_id': '2', 'share_network_id': None } share.update(kwargs) return share def stub_snapshot(id, **kwargs): snapshot = { 'id': id, 'share_id': 'fakeshareid', 'share_proto': 'fakesnapproto', 'export_location': 'fakesnaplocation', 'user_id': 'fakesnapuser', 'project_id': 'fakesnapproject', 'host': 'fakesnaphost', 'share_size': 1, 'size': 1, 'status': 'fakesnapstatus', 'share_name': 'fakesharename', 'display_name': 'displaysnapname', 'display_description': 'displaysnapdesc', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } snapshot.update(kwargs) return snapshot def stub_share_get(self, context, share_id): return stub_share(share_id) def stub_share_get_notfound(self, context, share_id): raise exc.NotFound def stub_share_create(self, context, share_proto, size, name, description, **param): share = stub_share('1') share['status'] = 'creating' share['share_proto'] = share_proto share['size'] = size share['display_name'] = name share['display_description'] = description return share def stub_share_delete(self, context, *args, **param): pass def stub_share_update(self, context, *args, **param): share = stub_share('1') return share def stub_snapshot_update(self, context, *args, **param): share = stub_share('1') return share def stub_share_get_all_by_project(self, context, search_opts=None): return [stub_share_get(self, context, '1')] def stub_get_all_shares(self, context): return [stub_share(100, project_id='fake'), stub_share(101, project_id='superfake'), stub_share(102, project_id='superduperfake')] def stub_snapshot_get(self, context, snapshot_id): return stub_snapshot(snapshot_id) def stub_snapshot_get_notfound(self, context, snapshot_id): raise exc.NotFound def stub_snapshot_create(self, context, share, display_name, display_description): return stub_snapshot(200, share_id=share['id'], display_name=display_name, display_description=display_description) def stub_snapshot_delete(self, context, *args, **param): pass def stub_snapshot_get_all_by_project(self, context, search_opts=None): return [stub_snapshot_get(self, context, 2)] manila-2013.2.dev175.gbf1a399/manila/tests/api/contrib/test_admin_actions.py0000664000175000017500000001746212301410454026557 0ustar chuckchuck00000000000000# Copyright 2013 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import shutil import tempfile import webob from oslo.config import cfg from manila import context from manila import db from manila import exception from manila.openstack.common import jsonutils from manila.share import api as share_api from manila import test from manila.tests.api import fakes CONF = cfg.CONF def app(): # no auth, just let environ['manila.context'] pass through api = fakes.router.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v2'] = api return mapper class AdminActionsTest(test.TestCase): def setUp(self): super(AdminActionsTest, self).setUp() self.tempdir = tempfile.mkdtemp() self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake') self.flags(lock_path=self.tempdir) self.share_api = share_api.API() def tearDown(self): shutil.rmtree(self.tempdir) super(AdminActionsTest, self).tearDown() def test_reset_status_as_admin(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available share = db.share_create(ctx, {'status': 'available'}) req = webob.Request.blank('/v2/fake/shares/%s/action' % share['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) # attach admin context to request req.environ['manila.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) share = db.share_get(ctx, share['id']) # status changed to 'error' self.assertEqual(share['status'], 'error') def test_reset_status_as_non_admin(self): # current status is 'error' share = db.share_create(context.get_admin_context(), {'status': 'error'}) req = webob.Request.blank('/v2/fake/shares/%s/action' % share['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request changing status to available req.body = jsonutils.dumps({'os-reset_status': {'status': 'available'}}) # non-admin context req.environ['manila.context'] = context.RequestContext('fake', 'fake') resp = req.get_response(app()) # request is not authorized self.assertEqual(resp.status_int, 403) share = db.share_get(context.get_admin_context(), share['id']) # status is still 'error' self.assertEqual(share['status'], 'error') def test_malformed_reset_status_body(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available share = db.share_create(ctx, {'status': 'available'}) req = webob.Request.blank('/v2/fake/shares/%s/action' % share['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # malformed request body req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}}) # attach admin context to request req.environ['manila.context'] = ctx resp = req.get_response(app()) # bad request self.assertEqual(resp.status_int, 400) share = db.share_get(ctx, share['id']) # status is still 'available' self.assertEqual(share['status'], 'available') def test_invalid_status_for_share(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available share = db.share_create(ctx, {'status': 'available'}) req = webob.Request.blank('/v2/fake/shares/%s/action' % share['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'invalid' is not a valid status req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}}) # attach admin context to request req.environ['manila.context'] = ctx resp = req.get_response(app()) # bad request self.assertEqual(resp.status_int, 400) share = db.share_get(ctx, share['id']) # status is still 'available' self.assertEqual(share['status'], 'available') def test_reset_status_for_missing_share(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # missing-share-id req = webob.Request.blank('/v2/fake/shares/%s/action' % 'missing-share-id') req.method = 'POST' req.headers['content-type'] = 'application/json' # malformed request body req.body = jsonutils.dumps({'os-reset_status': {'status': 'available'}}) # attach admin context to request req.environ['manila.context'] = ctx resp = req.get_response(app()) # not found self.assertEqual(resp.status_int, 404) self.assertRaises(exception.NotFound, db.share_get, ctx, 'missing-share-id') def test_snapshot_reset_status(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'error_deleting' share = db.share_create(ctx, {}) snapshot = db.share_snapshot_create(ctx, {'status': 'error_deleting', 'share_id': share['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) # attach admin context to request req.environ['manila.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) snapshot = db.share_snapshot_get(ctx, snapshot['id']) # status changed to 'error' self.assertEqual(snapshot['status'], 'error') def test_invalid_status_for_snapshot(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'available' share = db.share_create(ctx, {}) snapshot = db.share_snapshot_create(ctx, {'status': 'available', 'share_id': share['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'attaching' is not a valid status for snapshots req.body = jsonutils.dumps({'os-reset_status': {'status': 'attaching'}}) # attach admin context to request req.environ['manila.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 400) snapshot = db.share_snapshot_get(ctx, snapshot['id']) # status is still 'available' self.assertEqual(snapshot['status'], 'available') manila-2013.2.dev175.gbf1a399/manila/tests/api/contrib/__init__.py0000664000175000017500000000141212301410454024433 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work from manila.tests import * manila-2013.2.dev175.gbf1a399/manila/tests/api/contrib/test_share_actions.py0000664000175000017500000001421012301410454026555 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import webob from manila.api.contrib import share_actions from manila import exception from manila.openstack.common import jsonutils from manila.openstack.common.rpc import common as rpc_common from manila import share from manila.share import api as share_api from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from oslo.config import cfg CONF = cfg.CONF def _fake_access_get(self, ctxt, access_id): class Access(object): def __init__(self, **kwargs): self.STATE_NEW = 'fake_new' self.STATE_ACTIVE = 'fake_active' self.STATE_ERROR = 'fake_error' self.params = kwargs self.params['state'] = self.STATE_NEW self.share_id = kwargs.get('share_id') self.id = access_id def __getitem__(self, item): return self.params[item] access = Access(access_id=access_id, share_id='fake_share_id') return access class ShareActionsTest(test.TestCase): def setUp(self): super(ShareActionsTest, self).setUp() self.controller = share_actions.ShareActionsController() self.stubs.Set(share_api.API, 'get', stubs.stub_share_get) def test_allow_access(self): def _stub_allow_access(*args, **kwargs): return {'fake': 'fake'} self.stubs.Set(share_api.API, "allow_access", _stub_allow_access) id = 'fake_share_id' body = {"os-allow_access": {"access_type": 'ip', "access_to": '127.0.0.1'}} expected = {'access': {'fake': 'fake'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._allow_access(req, id, body) self.assertEqual(res, expected) def test_allow_access_error(self): id = 'fake_share_id' body = {"os-allow_access": {"access_type": 'error_type', "access_to": '127.0.0.1'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._allow_access, req, id, body) body = {"os-allow_access": {"access_type": 'ip', "access_to": '127.0.0.*'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._allow_access, req, id, body) body = {"os-allow_access": {"access_type": 'ip', "access_to": '127.0.0.0/33'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._allow_access, req, id, body) body = {"os-allow_access": {"access_type": 'ip', "access_to": '127.0.0.256'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._allow_access, req, id, body) body = {"os-allow_access": {"access_type": 'sid', "access_to": '1'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._allow_access, req, id, body) body = {"os-allow_access": {"access_type": 'sid', "access_to": '1' * 33}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._allow_access, req, id, body) def test_deny_access(self): def _stub_deny_access(*args, **kwargs): pass self.stubs.Set(share_api.API, "deny_access", _stub_deny_access) self.stubs.Set(share_api.API, "access_get", _fake_access_get) id = 'fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._deny_access(req, id, body) self.assertEqual(res.status_int, 202) def test_deny_access_not_found(self): def _stub_deny_access(*args, **kwargs): pass self.stubs.Set(share_api.API, "deny_access", _stub_deny_access) self.stubs.Set(share_api.API, "access_get", _fake_access_get) id = 'super_fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPNotFound, self.controller._deny_access, req, id, body) def test_access_list(self): def _fake_access_get_all(*args, **kwargs): return [{"state": "fakestatus", "id": "fake_share_id", "access_type": "fakeip", "access_to": "127.0.0.1"}] self.stubs.Set(share_api.API, "access_get_all", _fake_access_get_all) id = 'fake_share_id' body = {"os-access_list": None} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res_dict = self.controller._access_list(req, id, body) expected = _fake_access_get_all() self.assertEqual(res_dict['access_list'], expected) manila-2013.2.dev175.gbf1a399/manila/tests/api/contrib/test_services.py0000664000175000017500000002063412301410454025565 0ustar chuckchuck00000000000000# Copyright 2012 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from manila.api.contrib import services from manila import context from manila import db from manila import exception from manila.openstack.common import timeutils from manila import policy from manila import test from manila.tests.api import fakes fake_services_list = [{'binary': 'manila-scheduler', 'host': 'host1', 'availability_zone': 'manila', 'id': 1, 'disabled': True, 'updated_at': datetime(2012, 10, 29, 13, 42, 2), 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, {'binary': 'manila-volume', 'host': 'host1', 'availability_zone': 'manila', 'id': 2, 'disabled': True, 'updated_at': datetime(2012, 10, 29, 13, 42, 5), 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, {'binary': 'manila-scheduler', 'host': 'host2', 'availability_zone': 'manila', 'id': 3, 'disabled': False, 'updated_at': datetime(2012, 9, 19, 6, 55, 34), 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, {'binary': 'manila-volume', 'host': 'host2', 'availability_zone': 'manila', 'id': 4, 'disabled': True, 'updated_at': datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, ] class FakeRequest(object): environ = {"manila.context": context.get_admin_context()} GET = {} class FakeRequestWithSevice(object): environ = {"manila.context": context.get_admin_context()} GET = {"service": "manila-volume"} class FakeRequestWithHost(object): environ = {"manila.context": context.get_admin_context()} GET = {"host": "host1"} class FakeRequestWithHostService(object): environ = {"manila.context": context.get_admin_context()} GET = {"host": "host1", "service": "manila-volume"} def fake_servcie_get_all(context): return fake_services_list def fake_service_get_by_host_binary(context, host, binary): for service in fake_services_list: if service['host'] == host and service['binary'] == binary: return service return None def fake_service_get_by_id(value): for service in fake_services_list: if service['id'] == value: return service return None def fake_service_update(context, service_id, values): service = fake_service_get_by_id(service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) else: {'host': 'host1', 'service': 'manila-volume', 'disabled': values['disabled']} def fake_policy_enforce(context, action, target): pass def fake_utcnow(): return datetime(2012, 10, 29, 13, 42, 11) class ServicesTest(test.TestCase): def setUp(self): super(ServicesTest, self).setUp() self.stubs.Set(db, "service_get_all", fake_servcie_get_all) self.stubs.Set(timeutils, "utcnow", fake_utcnow) self.stubs.Set(db, "service_get_by_args", fake_service_get_by_host_binary) self.stubs.Set(db, "service_update", fake_service_update) self.stubs.Set(policy, "enforce", fake_policy_enforce) self.context = context.get_admin_context() self.controller = services.ServiceController() def tearDown(self): super(ServicesTest, self).tearDown() def test_services_list(self): req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [{'binary': 'manila-scheduler', 'host': 'host1', 'zone': 'manila', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'manila-volume', 'host': 'host1', 'zone': 'manila', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'manila-scheduler', 'host': 'host2', 'zone': 'manila', 'status': 'enabled', 'state': 'up', 'updated_at': datetime(2012, 9, 19, 6, 55, 34)}, {'binary': 'manila-volume', 'host': 'host2', 'zone': 'manila', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]} self.assertEqual(res_dict, response) def test_services_list_with_host(self): req = FakeRequestWithHost() res_dict = self.controller.index(req) response = {'services': [{'binary': 'manila-scheduler', 'host': 'host1', 'zone': 'manila', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'manila-volume', 'host': 'host1', 'zone': 'manila', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(res_dict, response) def test_services_list_with_service(self): req = FakeRequestWithSevice() res_dict = self.controller.index(req) response = {'services': [{'binary': 'manila-volume', 'host': 'host1', 'zone': 'manila', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'manila-volume', 'host': 'host2', 'zone': 'manila', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]} self.assertEqual(res_dict, response) def test_services_list_with_host_service(self): req = FakeRequestWithHostService() res_dict = self.controller.index(req) response = {'services': [{'binary': 'manila-volume', 'host': 'host1', 'zone': 'manila', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(res_dict, response) def test_services_enable(self): body = {'host': 'host1', 'service': 'manila-volume'} req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable') res_dict = self.controller.update(req, "enable", body) self.assertEqual(res_dict['disabled'], False) def test_services_disable(self): req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable') body = {'host': 'host1', 'service': 'manila-volume'} res_dict = self.controller.update(req, "disable", body) self.assertEqual(res_dict['disabled'], True) manila-2013.2.dev175.gbf1a399/manila/tests/api/test_common.py0000664000175000017500000002372412301410454023575 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suites for 'common' code used throughout the OpenStack HTTP API. """ import webob import webob.exc from manila.api import common from manila import test NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" class LimiterTest(test.TestCase): """ Unit tests for the `manila.api.common.limited` method which takes in a list of items and, depending on the 'offset' and 'limit' GET params, returns a subset or complete set of the given items. """ def setUp(self): """ Run before each test. """ super(LimiterTest, self).setUp() self.tiny = range(1) self.small = range(10) self.medium = range(1000) self.large = range(10000) def test_limiter_offset_zero(self): """ Test offset key works with 0. """ req = webob.Request.blank('/?offset=0') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_offset_medium(self): """ Test offset key works with a medium sized number. """ req = webob.Request.blank('/?offset=10') self.assertEqual(common.limited(self.tiny, req), []) self.assertEqual(common.limited(self.small, req), self.small[10:]) self.assertEqual(common.limited(self.medium, req), self.medium[10:]) self.assertEqual(common.limited(self.large, req), self.large[10:1010]) def test_limiter_offset_over_max(self): """ Test offset key works with a number over 1000 (max_limit). """ req = webob.Request.blank('/?offset=1001') self.assertEqual(common.limited(self.tiny, req), []) self.assertEqual(common.limited(self.small, req), []) self.assertEqual(common.limited(self.medium, req), []) self.assertEqual( common.limited(self.large, req), self.large[1001:2001]) def test_limiter_offset_blank(self): """ Test offset key works with a blank offset. """ req = webob.Request.blank('/?offset=') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_offset_bad(self): """ Test offset key works with a BAD offset. """ req = webob.Request.blank(u'/?offset=\u0020aa') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_nothing(self): """ Test request with no offset or limit """ req = webob.Request.blank('/') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_zero(self): """ Test limit of zero. """ req = webob.Request.blank('/?limit=0') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_medium(self): """ Test limit of 10. """ req = webob.Request.blank('/?limit=10') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium[:10]) self.assertEqual(common.limited(self.large, req), self.large[:10]) def test_limiter_limit_over_max(self): """ Test limit of 3000. """ req = webob.Request.blank('/?limit=3000') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_and_offset(self): """ Test request with both limit and offset. """ items = range(2000) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual(common.limited(items, req), items[1:4]) req = webob.Request.blank('/?offset=3&limit=0') self.assertEqual(common.limited(items, req), items[3:1003]) req = webob.Request.blank('/?offset=3&limit=1500') self.assertEqual(common.limited(items, req), items[3:1003]) req = webob.Request.blank('/?offset=3000&limit=10') self.assertEqual(common.limited(items, req), []) def test_limiter_custom_max_limit(self): """ Test a max_limit other than 1000. """ items = range(2000) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual( common.limited(items, req, max_limit=2000), items[1:4]) req = webob.Request.blank('/?offset=3&limit=0') self.assertEqual( common.limited(items, req, max_limit=2000), items[3:]) req = webob.Request.blank('/?offset=3&limit=2500') self.assertEqual( common.limited(items, req, max_limit=2000), items[3:]) req = webob.Request.blank('/?offset=3000&limit=10') self.assertEqual(common.limited(items, req, max_limit=2000), []) def test_limiter_negative_limit(self): """ Test a negative limit. """ req = webob.Request.blank('/?limit=-3000') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_negative_offset(self): """ Test a negative offset. """ req = webob.Request.blank('/?offset=-30') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) class PaginationParamsTest(test.TestCase): """ Unit tests for the `manila.api.common.get_pagination_params` method which takes in a request object and returns 'marker' and 'limit' GET params. """ def test_no_params(self): """ Test no params. """ req = webob.Request.blank('/') self.assertEqual(common.get_pagination_params(req), {}) def test_valid_marker(self): """ Test valid marker param. """ req = webob.Request.blank( '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2') self.assertEqual(common.get_pagination_params(req), {'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'}) def test_valid_limit(self): """ Test valid limit param. """ req = webob.Request.blank('/?limit=10') self.assertEqual(common.get_pagination_params(req), {'limit': 10}) def test_invalid_limit(self): """ Test invalid limit param. """ req = webob.Request.blank('/?limit=-2') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req) def test_valid_limit_and_marker(self): """ Test valid limit and marker parameters. """ marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' req = webob.Request.blank('/?limit=20&marker=%s' % marker) self.assertEqual(common.get_pagination_params(req), {'marker': marker, 'limit': 20}) class MiscFunctionsTest(test.TestCase): def test_remove_major_version_from_href(self): fixture = 'http://www.testsite.com/v1/images' expected = 'http://www.testsite.com/images' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href(self): fixture = 'http://www.testsite.com/v1.1/images' expected = 'http://www.testsite.com/images' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href_2(self): fixture = 'http://www.testsite.com/v1.1/' expected = 'http://www.testsite.com/' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href_3(self): fixture = 'http://www.testsite.com/v10.10' expected = 'http://www.testsite.com' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href_4(self): fixture = 'http://www.testsite.com/v1.1/images/v10.5' expected = 'http://www.testsite.com/images/v10.5' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href_bad_request(self): fixture = 'http://www.testsite.com/1.1/images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_remove_version_from_href_bad_request_2(self): fixture = 'http://www.testsite.com/v/images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_remove_version_from_href_bad_request_3(self): fixture = 'http://www.testsite.com/v1.1images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) manila-2013.2.dev175.gbf1a399/manila/tests/api/test_extensions.py0000664000175000017500000001345312301410454024502 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import iso8601 from lxml import etree import webob from manila.api.v1 import router from manila.api import xmlutil from manila.openstack.common import jsonutils from manila import test from oslo.config import cfg CONF = cfg.CONF NS = "{http://docs.openstack.org/common/api/v1.0}" class ExtensionTestCase(test.TestCase): def setUp(self): super(ExtensionTestCase, self).setUp() ext_list = CONF.osapi_share_extension[:] fox = ('manila.tests.api.extensions.foxinsocks.Foxinsocks') if fox not in ext_list: ext_list.append(fox) self.flags(osapi_share_extension=ext_list) class ExtensionControllerTest(ExtensionTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.ext_list = [] self.ext_list.sort() def test_list_extensions_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions") response = request.get_response(app) self.assertEqual(200, response.status_int) # Make sure we have all the extensions, extra extensions being OK. data = jsonutils.loads(response.body) names = [str(x['name']) for x in data['extensions'] if str(x['name']) in self.ext_list] names.sort() self.assertEqual(names, self.ext_list) # Ensure all the timestamps are valid according to iso8601 for ext in data['extensions']: iso8601.parse_date(ext['updated']) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual( fox_ext, {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', 'name': 'Fox In Socks', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension', 'alias': 'FOXNSOX', 'links': []}, ) for ext in data['extensions']: url = '/fake/extensions/%s' % ext['alias'] request = webob.Request.blank(url) response = request.get_response(app) output = jsonutils.loads(response.body) self.assertEqual(output['extension']['alias'], ext['alias']) def test_get_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") response = request.get_response(app) self.assertEqual(200, response.status_int) data = jsonutils.loads(response.body) self.assertEqual( data['extension'], {"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0", "name": "Fox In Socks", "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension", "alias": "FOXNSOX", "links": []}) def test_get_non_existing_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/4") response = request.get_response(app) self.assertEqual(404, response.status_int) def test_list_extensions_xml(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions") request.accept = "application/xml" response = request.get_response(app) self.assertEqual(200, response.status_int) root = etree.XML(response.body) self.assertEqual(root.tag.split('extensions')[0], NS) # Make sure we have all the extensions, extras extensions being OK. exts = root.findall('{0}extension'.format(NS)) self.assert_(len(exts) >= len(self.ext_list)) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] self.assertEqual(fox_ext.get('name'), 'Fox In Socks') self.assertEqual( fox_ext.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00') self.assertEqual( fox_ext.findtext('{0}description'.format(NS)), 'The Fox In Socks Extension') xmlutil.validate_schema(root, 'extensions') def test_get_extension_xml(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") request.accept = "application/xml" response = request.get_response(app) self.assertEqual(200, response.status_int) xml = response.body root = etree.XML(xml) self.assertEqual(root.tag.split('extension')[0], NS) self.assertEqual(root.get('alias'), 'FOXNSOX') self.assertEqual(root.get('name'), 'Fox In Socks') self.assertEqual( root.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00') self.assertEqual( root.findtext('{0}description'.format(NS)), 'The Fox In Socks Extension') xmlutil.validate_schema(root, 'extension') manila-2013.2.dev175.gbf1a399/manila/tests/api/test_router.py0000664000175000017500000001020012301410454023606 0ustar chuckchuck00000000000000# Copyright 2011 Denali Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.openstack import wsgi from manila.api.v1 import router from manila.api import versions from manila.openstack.common import log as logging from manila import test from manila.tests.api import fakes from oslo.config import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) class FakeController(object): def __init__(self, ext_mgr=None): self.ext_mgr = ext_mgr def index(self, req): return {} def detail(self, req): return {} def create_resource(ext_mgr): return wsgi.Resource(FakeController(ext_mgr)) class VolumeRouterTestCase(test.TestCase): def setUp(self): super(VolumeRouterTestCase, self).setUp() # NOTE(vish): versions is just returning text so, no need to stub. self.app = router.APIRouter() def test_versions(self): req = fakes.HTTPRequest.blank('') req.method = 'GET' req.content_type = 'application/json' response = req.get_response(self.app) self.assertEqual(302, response.status_int) req = fakes.HTTPRequest.blank('/') req.method = 'GET' req.content_type = 'application/json' response = req.get_response(self.app) self.assertEqual(200, response.status_int) def test_versions_multi(self): req = fakes.HTTPRequest.blank('/') req.method = 'GET' req.content_type = 'application/json' resource = versions.Versions() result = resource.dispatch(resource.multi, req, {}) ids = [v['id'] for v in result['choices']] self.assertEqual(set(ids), set(['v1.0', 'v2.0'])) def test_versions_multi_disable_v1(self): self.flags(enable_v1_api=False) req = fakes.HTTPRequest.blank('/') req.method = 'GET' req.content_type = 'application/json' resource = versions.Versions() result = resource.dispatch(resource.multi, req, {}) ids = [v['id'] for v in result['choices']] self.assertEqual(set(ids), set(['v2.0'])) def test_versions_multi_disable_v2(self): self.flags(enable_v2_api=False) req = fakes.HTTPRequest.blank('/') req.method = 'GET' req.content_type = 'application/json' resource = versions.Versions() result = resource.dispatch(resource.multi, req, {}) ids = [v['id'] for v in result['choices']] self.assertEqual(set(ids), set(['v1.0'])) def test_versions_index(self): req = fakes.HTTPRequest.blank('/') req.method = 'GET' req.content_type = 'application/json' resource = versions.Versions() result = resource.dispatch(resource.index, req, {}) ids = [v['id'] for v in result['versions']] self.assertEqual(set(ids), set(['v1.0', 'v2.0'])) def test_versions_index_disable_v1(self): self.flags(enable_v1_api=False) req = fakes.HTTPRequest.blank('/') req.method = 'GET' req.content_type = 'application/json' resource = versions.Versions() result = resource.dispatch(resource.index, req, {}) ids = [v['id'] for v in result['versions']] self.assertEqual(set(ids), set(['v2.0'])) def test_versions_index_disable_v2(self): self.flags(enable_v2_api=False) req = fakes.HTTPRequest.blank('/') req.method = 'GET' req.content_type = 'application/json' resource = versions.Versions() result = resource.dispatch(resource.index, req, {}) ids = [v['id'] for v in result['versions']] self.assertEqual(set(ids), set(['v1.0'])) manila-2013.2.dev175.gbf1a399/manila/tests/volume/0000775000175000017500000000000012301410516021421 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/volume/__init__.py0000664000175000017500000000000012301410454023521 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/volume/test_cinder.py0000664000175000017500000002073212301410454024303 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinderclient import exceptions as cinder_exception from manila import context from manila import exception from manila import test from manila.volume import cinder class FakeCinderClient(object): class Volumes(object): def get(self, volume_id): return {'id': volume_id} def list(self, detailed, search_opts={}): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None def __init__(self): self.volumes = self.Volumes() self.volume_snapshots = self.volumes class CinderApiTestCase(test.TestCase): def setUp(self): super(CinderApiTestCase, self).setUp() self.api = cinder.API() self.cinderclient = FakeCinderClient() self.ctx = context.get_admin_context() self.stubs.Set(cinder, 'cinderclient', mock.Mock(return_value=self.cinderclient)) self.stubs.Set(cinder, '_untranslate_volume_summary_view', lambda ctx, vol: vol) self.stubs.Set(cinder, '_untranslate_snapshot_summary_view', lambda ctx, snap: snap) def test_get(self): volume_id = 'volume_id1' result = self.api.get(self.ctx, volume_id) self.assertEqual(result['id'], volume_id) def test_get_failed(self): cinder.cinderclient.side_effect = cinder_exception.NotFound(404) volume_id = 'volume_id' self.assertRaises(exception.VolumeNotFound, self.api.get, self.ctx, volume_id) def test_create(self): result = self.api.create(self.ctx, 1, '', '') self.assertEqual(result['id'], 'created_id') def test_create_failed(self): cinder.cinderclient.side_effect = cinder_exception.BadRequest(400) self.assertRaises(exception.InvalidInput, self.api.create, self.ctx, 1, '', '') def test_get_all(self): cinder._untranslate_volume_summary_view.return_value = ['id1', 'id2'] self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.get_all(self.ctx)) def test_check_attach_volume_status_error(self): volume = {'status': 'error'} self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_volume_already_attached(self): volume = {'status': 'available'} volume['attach_status'] = "attached" self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_availability_zone_differs(self): volume = {'status': 'available'} volume['attach_status'] = "detached" instance = {'availability_zone': 'zone1'} volume['availability_zone'] = 'zone2' cinder.CONF.set_override('cinder_cross_az_attach', False) self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) cinder.CONF.reset() def test_check_attach(self): volume = {'status': 'available'} volume['attach_status'] = "detached" volume['availability_zone'] = 'zone1' instance = {'availability_zone': 'zone1'} cinder.CONF.set_override('cinder_cross_az_attach', False) self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) cinder.CONF.reset() def test_check_detach(self): volume = {'status': 'available'} self.assertRaises(exception.InvalidVolume, self.api.check_detach, self.ctx, volume) volume['status'] = 'non-available' self.assertIsNone(self.api.check_detach(self.ctx, volume)) def test_update(self): self.assertRaises(NotImplementedError, self.api.update, self.ctx, '', '') def test_reserve_volume(self): self.stubs.Set(self.cinderclient.volumes, 'reserve', mock.Mock()) self.api.reserve_volume(self.ctx, 'id1') self.cinderclient.volumes.reserve.assert_called_once_with('id1') def test_unreserve_volume(self): self.stubs.Set(self.cinderclient.volumes, 'unreserve', mock.Mock()) self.api.unreserve_volume(self.ctx, 'id1') self.cinderclient.volumes.unreserve.assert_called_once_with('id1') def test_begin_detaching(self): self.stubs.Set(self.cinderclient.volumes, 'begin_detaching', mock.Mock()) self.api.begin_detaching(self.ctx, 'id1') self.cinderclient.volumes.begin_detaching.\ assert_called_once_with('id1') def test_roll_detaching(self): self.stubs.Set(self.cinderclient.volumes, 'roll_detaching', mock.Mock()) self.api.roll_detaching(self.ctx, 'id1') self.cinderclient.volumes.roll_detaching.\ assert_called_once_with('id1') def test_attach(self): self.stubs.Set(self.cinderclient.volumes, 'attach', mock.Mock()) self.api.attach(self.ctx, 'id1', 'uuid', 'point') self.cinderclient.volumes.attach.assert_called_once_with('id1', 'uuid', 'point') def test_detach(self): self.stubs.Set(self.cinderclient.volumes, 'detach', mock.Mock()) self.api.detach(self.ctx, 'id1') self.cinderclient.volumes.detach.assert_called_once_with('id1') def test_initialize_connection(self): self.stubs.Set(self.cinderclient.volumes, 'initialize_connection', mock.Mock()) self.api.initialize_connection(self.ctx, 'id1', 'connector') self.cinderclient.volumes.initialize_connection.\ assert_called_once_with('id1', 'connector') def test_terminate_connection(self): self.stubs.Set(self.cinderclient.volumes, 'terminate_connection', mock.Mock()) self.api.terminate_connection(self.ctx, 'id1', 'connector') self.cinderclient.volumes.terminate_connection.\ assert_called_once_with('id1', 'connector') def test_delete(self): self.stubs.Set(self.cinderclient.volumes, 'delete', mock.Mock()) self.api.delete(self.ctx, 'id1') self.cinderclient.volumes.delete.assert_called_once_with('id1') def test_get_snapshot(self): snapshot_id = 'snapshot_id1' result = self.api.get_snapshot(self.ctx, snapshot_id) self.assertEqual(result['id'], snapshot_id) def test_get_snapshot_failed(self): cinder.cinderclient.side_effect = cinder_exception.NotFound(404) snapshot_id = 'snapshot_id' self.assertRaises(exception.VolumeSnapshotNotFound, self.api.get_snapshot, self.ctx, snapshot_id) def test_get_all_snapshots(self): cinder._untranslate_snapshot_summary_view.return_value = ['id1', 'id2'] self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.get_all_snapshots(self.ctx)) def test_create_snapshot(self): result = self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '') self.assertEqual(result['id'], 'created_id') def test_create_force(self): result = self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '') self.assertEqual(result['id'], 'created_id') def test_delete_snapshot(self): self.stubs.Set(self.cinderclient.volume_snapshots, 'delete', mock.Mock()) self.api.delete_snapshot(self.ctx, 'id1') self.cinderclient.volume_snapshots.delete.\ assert_called_once_with('id1') manila-2013.2.dev175.gbf1a399/manila/tests/utils.py0000664000175000017500000000163112301410454021626 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2011 OpenStack LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import manila.context def get_test_admin_context(): return manila.context.get_admin_context() def is_manila_installed(): if os.path.exists('../../manila.manila.egg-info'): return True else: return False manila-2013.2.dev175.gbf1a399/manila/tests/test_service.py0000664000175000017500000001725512301410454023176 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for remote procedure calls using queue """ import mox from oslo.config import cfg from manila import context from manila import db from manila import exception from manila import manager from manila import service from manila import test from manila import wsgi test_service_opts = [ cfg.StrOpt("fake_manager", default="manila.tests.test_service.FakeManager", help="Manager for testing"), cfg.StrOpt("test_service_listen", default=None, help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, help="Port number to bind test service to"), ] CONF = cfg.CONF CONF.register_opts(test_service_opts) class FakeManager(manager.Manager): """Fake manager for tests""" def __init__(self, host=None, db_driver=None, service_name=None): super(FakeManager, self).__init__(host=host, db_driver=db_driver) def test_method(self): return 'manager' class ExtendedService(service.Service): def test_method(self): return 'service' class ServiceManagerTestCase(test.TestCase): """Test cases for Services""" def test_message_gets_to_manager(self): serv = service.Service('test', 'test', 'test', 'manila.tests.test_service.FakeManager') serv.start() self.assertEqual(serv.test_method(), 'manager') def test_override_manager_method(self): serv = ExtendedService('test', 'test', 'test', 'manila.tests.test_service.FakeManager') serv.start() self.assertEqual(serv.test_method(), 'service') class ServiceFlagsTestCase(test.TestCase): def test_service_enabled_on_create_based_on_flag(self): self.flags(enable_new_services=True) host = 'foo' binary = 'manila-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assert_(not ref['disabled']) def test_service_disabled_on_create_based_on_flag(self): self.flags(enable_new_services=False) host = 'foo' binary = 'manila-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assert_(ref['disabled']) class ServiceTestCase(test.TestCase): """Test cases for Services""" def setUp(self): super(ServiceTestCase, self).setUp() self.mox.StubOutWithMock(service, 'db') def test_create(self): host = 'foo' binary = 'manila-fake' topic = 'fake' # NOTE(vish): Create was moved out of mox replay to make sure that # the looping calls are created in StartService. app = service.Service.create(host=host, binary=binary, topic=topic) self.assert_(app) def test_report_state_newly_disconnected(self): host = 'foo' binary = 'bar' topic = 'test' service_create = {'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, 'availability_zone': 'nova'} service_ref = {'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, 'availability_zone': 'nova', 'id': 1} service.db.service_get_by_args(mox.IgnoreArg(), host, binary).AndRaise(exception.NotFound()) service.db.service_create(mox.IgnoreArg(), service_create).AndReturn(service_ref) service.db.service_get(mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(Exception()) self.mox.ReplayAll() serv = service.Service(host, binary, topic, 'manila.tests.test_service.FakeManager') serv.start() serv.report_state() self.assert_(serv.model_disconnected) def test_report_state_newly_connected(self): host = 'foo' binary = 'bar' topic = 'test' service_create = {'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, 'availability_zone': 'nova'} service_ref = {'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, 'availability_zone': 'nova', 'id': 1} service.db.service_get_by_args(mox.IgnoreArg(), host, binary).AndRaise(exception.NotFound()) service.db.service_create(mox.IgnoreArg(), service_create).AndReturn(service_ref) service.db.service_get(mox.IgnoreArg(), service_ref['id']).AndReturn(service_ref) service.db.service_update(mox.IgnoreArg(), service_ref['id'], mox.ContainsKeyValue('report_count', 1)) self.mox.ReplayAll() serv = service.Service(host, binary, topic, 'manila.tests.test_service.FakeManager') serv.start() serv.model_disconnected = True serv.report_state() self.assert_(not serv.model_disconnected) class TestWSGIService(test.TestCase): def setUp(self): super(TestWSGIService, self).setUp() self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) def test_service_random_port(self): test_service = service.WSGIService("test_service") self.assertEquals(0, test_service.port) test_service.start() self.assertNotEqual(0, test_service.port) test_service.stop() class TestLauncher(test.TestCase): def setUp(self): super(TestLauncher, self).setUp() self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) self.service = service.WSGIService("test_service") def test_launch_app(self): self.assertEquals(0, self.service.port) launcher = service.Launcher() launcher.launch_server(self.service) self.assertEquals(0, self.service.port) launcher.stop() manila-2013.2.dev175.gbf1a399/manila/tests/test_context.py0000664000175000017500000000511012301410454023205 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import context from manila import test class ContextTestCase(test.TestCase): def test_request_context_sets_is_admin(self): ctxt = context.RequestContext('111', '222', roles=['admin', 'weasel']) self.assertEquals(ctxt.is_admin, True) def test_request_context_sets_is_admin_upcase(self): ctxt = context.RequestContext('111', '222', roles=['Admin', 'weasel']) self.assertEquals(ctxt.is_admin, True) def test_request_context_read_deleted(self): ctxt = context.RequestContext('111', '222', read_deleted='yes') self.assertEquals(ctxt.read_deleted, 'yes') ctxt.read_deleted = 'no' self.assertEquals(ctxt.read_deleted, 'no') def test_request_context_read_deleted_invalid(self): self.assertRaises(ValueError, context.RequestContext, '111', '222', read_deleted=True) ctxt = context.RequestContext('111', '222') self.assertRaises(ValueError, setattr, ctxt, 'read_deleted', True) def test_extra_args_to_context_get_logged(self): info = {} def fake_warn(log_msg): info['log_msg'] = log_msg self.stubs.Set(context.LOG, 'warn', fake_warn) c = context.RequestContext('user', 'project', extra_arg1='meow', extra_arg2='wuff') self.assertTrue(c) self.assertIn("'extra_arg1': 'meow'", info['log_msg']) self.assertIn("'extra_arg2': 'wuff'", info['log_msg']) manila-2013.2.dev175.gbf1a399/manila/tests/test_migrations.conf0000664000175000017500000000047512301410454024203 0ustar chuckchuck00000000000000[DEFAULT] # Set up any number of migration data stores you want, one # The "name" used in the test is the config variable key. #sqlite=sqlite:///test_migrations.db sqlite=sqlite:// #mysql=mysql://root:@localhost/test_migrations #postgresql=postgresql://user:pass@localhost/test_migrations [walk_style] snake_walk=yes manila-2013.2.dev175.gbf1a399/manila/tests/glance/0000775000175000017500000000000012301410516021343 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/glance/stubs.py0000664000175000017500000000707512301410454023067 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glanceclient.exc NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" class StubGlanceClient(object): def __init__(self, images=None): self._images = [] _images = images or [] map(lambda image: self.create(**image), _images) #NOTE(bcwaldon): HACK to get client.images.* to work self.images = lambda: None for fn in ('list', 'get', 'data', 'create', 'update', 'delete'): setattr(self.images, fn, getattr(self, fn)) #TODO(bcwaldon): implement filters def list(self, filters=None, marker=None, limit=30): if marker is None: index = 0 else: for index, image in enumerate(self._images): if image.id == str(marker): index += 1 break else: raise glanceclient.exc.BadRequest('Marker not found') return self._images[index:index + limit] def get(self, image_id): for image in self._images: if image.id == str(image_id): return image raise glanceclient.exc.NotFound(image_id) def data(self, image_id): self.get(image_id) return [] def create(self, **metadata): metadata['created_at'] = NOW_GLANCE_FORMAT metadata['updated_at'] = NOW_GLANCE_FORMAT self._images.append(FakeImage(metadata)) try: image_id = str(metadata['id']) except KeyError: # auto-generate an id if one wasn't provided image_id = str(len(self._images)) self._images[-1].id = image_id return self._images[-1] def update(self, image_id, **metadata): for i, image in enumerate(self._images): if image.id == str(image_id): for k, v in metadata.items(): setattr(self._images[i], k, v) return self._images[i] raise glanceclient.exc.NotFound(image_id) def delete(self, image_id): for i, image in enumerate(self._images): if image.id == image_id: del self._images[i] return raise glanceclient.exc.NotFound(image_id) class FakeImage(object): def __init__(self, metadata): IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', 'container_format', 'checksum', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'status', 'min_disk', 'min_ram', 'is_public'] raw = dict.fromkeys(IMAGE_ATTRIBUTES) raw.update(metadata) self.__dict__['raw'] = raw def __getattr__(self, key): try: return self.__dict__['raw'][key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): try: self.__dict__['raw'][key] = value except KeyError: raise AttributeError(key) manila-2013.2.dev175.gbf1a399/manila/tests/glance/__init__.py0000664000175000017500000000134512301410454023460 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`glance` -- Stubs for Glance ================================= """ manila-2013.2.dev175.gbf1a399/manila/tests/var/0000775000175000017500000000000012301410516020702 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/var/privatekey.key0000664000175000017500000000625312301410454023606 0ustar chuckchuck00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe 4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD /P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN +Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ 8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh /W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw 1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U 4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn 7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL 5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB 1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS 8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= -----END RSA PRIVATE KEY----- manila-2013.2.dev175.gbf1a399/manila/tests/var/ca.crt0000664000175000017500000000415712301410454022007 0ustar chuckchuck00000000000000-----BEGIN CERTIFICATE----- MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX /l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ 4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm 2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ +C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY 9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA WoRMgEwjGJWqzhJZUYpUAQ== -----END CERTIFICATE----- manila-2013.2.dev175.gbf1a399/manila/tests/var/certificate.crt0000664000175000017500000000350212301410454023677 0ustar chuckchuck00000000000000-----BEGIN CERTIFICATE----- MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q 8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG /64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y 0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== -----END CERTIFICATE----- manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/0000775000175000017500000000000012301410516022070 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/test_host_manager.py0000664000175000017500000002343412301410454026157 0ustar chuckchuck00000000000000# Copyright (c) 2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For HostManager """ from manila import db from manila import exception from manila.openstack.common.scheduler import filters from manila.openstack.common import timeutils from manila.scheduler import host_manager from manila import test from manila.tests.scheduler import fakes from oslo.config import cfg CONF = cfg.CONF class FakeFilterClass1(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class FakeFilterClass2(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class HostManagerTestCase(test.TestCase): """Test case for HostManager class""" def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x) for x in xrange(1, 5)] def test_choose_host_filters_not_found(self): self.flags(scheduler_default_filters='FakeFilterClass3') self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] self.assertRaises(exception.SchedulerHostFilterNotFound, self.host_manager._choose_host_filters, None) def test_choose_host_filters(self): self.flags(scheduler_default_filters=['FakeFilterClass2']) self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] # Test 'volume' returns 1 correct function filter_classes = self.host_manager._choose_host_filters(None) self.assertEqual(len(filter_classes), 1) self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2') def _mock_get_filtered_hosts(self, info, specified_filters=None): self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters') info['got_objs'] = [] info['got_fprops'] = [] def fake_filter_one(_self, obj, filter_props): info['got_objs'].append(obj) info['got_fprops'].append(filter_props) return True self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one) self.host_manager._choose_host_filters(specified_filters).AndReturn( [FakeFilterClass1]) def _verify_result(self, info, result): for x in info['got_fprops']: self.assertEqual(x, info['expected_fprops']) self.assertEqual(set(info['expected_objs']), set(info['got_objs'])) self.assertEqual(set(result), set(info['got_objs'])) def test_get_filtered_hosts(self): fake_properties = {'moo': 1, 'cow': 2} info = {'expected_objs': self.fake_hosts, 'expected_fprops': fake_properties} self._mock_get_filtered_hosts(info) self.mox.ReplayAll() result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result) def test_update_service_capabilities_for_shares(self): service_states = self.host_manager.service_states self.assertDictMatch(service_states, {}) self.mox.StubOutWithMock(timeutils, 'utcnow') timeutils.utcnow().AndReturn(31337) timeutils.utcnow().AndReturn(31338) timeutils.utcnow().AndReturn(31339) host1_share_capabs = dict(free_capacity_gb=4321, timestamp=1) host2_share_capabs = dict(free_capacity_gb=5432, timestamp=1) host3_share_capabs = dict(free_capacity_gb=6543, timestamp=1) self.mox.ReplayAll() service_name = 'share' self.host_manager.update_service_capabilities(service_name, 'host1', host1_share_capabs) self.host_manager.update_service_capabilities(service_name, 'host2', host2_share_capabs) self.host_manager.update_service_capabilities(service_name, 'host3', host3_share_capabs) # Make sure dictionary isn't re-assigned self.assertEqual(self.host_manager.service_states, service_states) # Make sure original dictionary wasn't copied self.assertEqual(host1_share_capabs['timestamp'], 1) host1_share_capabs['timestamp'] = 31337 host2_share_capabs['timestamp'] = 31338 host3_share_capabs['timestamp'] = 31339 expected = {'host1': host1_share_capabs, 'host2': host2_share_capabs, 'host3': host3_share_capabs} self.assertDictMatch(service_states, expected) def test_get_all_host_states_share(self): context = 'fake_context' topic = CONF.share_topic self.mox.StubOutWithMock(db, 'service_get_all_by_topic') self.mox.StubOutWithMock(host_manager.LOG, 'warn') ret_services = fakes.SHARE_SERVICES db.service_get_all_by_topic(context, topic).AndReturn(ret_services) # Disabled service host_manager.LOG.warn("service is down or disabled.") self.mox.ReplayAll() self.host_manager.get_all_host_states_share(context) host_state_map = self.host_manager.host_state_map self.assertEqual(len(host_state_map), 4) # Check that service is up for i in xrange(4): share_node = fakes.SHARE_SERVICES[i] host = share_node['host'] self.assertEqual(host_state_map[host].service, share_node) class HostStateTestCase(test.TestCase): """Test case for HostState class.""" def test_update_from_share_capability(self): fake_host = host_manager.HostState('host1') self.assertEqual(fake_host.free_capacity_gb, None) share_capability = {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_share_capability(share_capability) self.assertEqual(fake_host.free_capacity_gb, 512) def test_update_from_share_infinite_capability(self): fake_host = host_manager.HostState('host1') self.assertEqual(fake_host.free_capacity_gb, None) share_capability = {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'infinite', 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_share_capability(share_capability) self.assertEqual(fake_host.total_capacity_gb, 'infinite') self.assertEqual(fake_host.free_capacity_gb, 'infinite') def test_update_from_share_unknown_capability(self): fake_host = host_manager.HostState('host1') self.assertEqual(fake_host.free_capacity_gb, None) share_capability = {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_share_capability(share_capability) self.assertEqual(fake_host.total_capacity_gb, 'infinite') self.assertEqual(fake_host.free_capacity_gb, 'unknown') def test_consume_from_share_capability(self): fake_host = host_manager.HostState('host1') share_size = 10 free_capacity = 100 fake_share = {'id': 'foo', 'size': share_size} share_capability = {'total_capacity_gb': free_capacity * 2, 'free_capacity_gb': free_capacity, 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_share_capability(share_capability) fake_host.consume_from_share(fake_share) self.assertEqual(fake_host.free_capacity_gb, free_capacity - share_size) def test_consume_from_share_infinite_capability(self): fake_host = host_manager.HostState('host1') share_size = 1000 fake_share = {'id': 'foo', 'size': share_size} share_capability = {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'infinite', 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_share_capability(share_capability) fake_host.consume_from_share(fake_share) self.assertEqual(fake_host.total_capacity_gb, 'infinite') self.assertEqual(fake_host.free_capacity_gb, 'infinite') def test_consume_from_share_unknown_capability(self): fake_host = host_manager.HostState('host1') share_size = 1000 fake_share = {'id': 'foo', 'size': share_size} share_capability = {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, 'timestamp': None} fake_host.update_from_share_capability(share_capability) fake_host.consume_from_share(fake_share) self.assertEqual(fake_host.total_capacity_gb, 'infinite') self.assertEqual(fake_host.free_capacity_gb, 'unknown') manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/test_filter_scheduler.py0000664000175000017500000001363012301410454027030 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Filter Scheduler. """ from manila import context from manila import exception from manila import test from manila.openstack.common.scheduler import weights from manila.scheduler import filter_scheduler from manila.scheduler import host_manager from manila.tests.scheduler import fakes from manila.tests.scheduler import test_scheduler from manila.tests import utils as test_utils def fake_get_filtered_hosts(hosts, filter_properties): return list(hosts) class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Test case for Filter Scheduler.""" driver_cls = filter_scheduler.FilterScheduler @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed (try setup.py develop') def test_create_share_no_hosts(self): """ Ensure empty hosts & child_zones result in NoValidHosts exception. """ def _fake_empty_call_zone_method(*args, **kwargs): return [] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'share_properties': {'project_id': 1, 'size': 1}, 'share_type': {'name': 'LVM_NFS'}, 'share_id': ['fake-id1']} self.assertRaises(exception.NoValidHost, sched.schedule_create_share, fake_context, request_spec, {}) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed (try setup.py develop') def test_create_share_non_admin(self): """Test creating share passing a non-admin context. DB actions should work.""" self.was_admin = False def fake_get(context, *args, **kwargs): # make sure this is called with admin context, even though # we're using user context below self.was_admin = context.is_admin return {} sched = fakes.FakeFilterScheduler() self.stubs.Set(sched.host_manager, 'get_all_host_states_share', fake_get) fake_context = context.RequestContext('user', 'project') request_spec = {'share_properties': {'project_id': 1, 'size': 1}, 'share_type': {'name': 'LVM_NFS'}, 'share_id': ['fake-id1']} self.assertRaises(exception.NoValidHost, sched.schedule_create_share, fake_context, request_spec, {}) self.assertTrue(self.was_admin) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed (try setup.py develop') def test_schedule_happy_day_share(self): """Make sure there's nothing glaringly wrong with _schedule_share() by doing a happy day pass through.""" self.next_weight = 1.0 def _fake_weigh_objects(_self, functions, hosts, options): self.next_weight += 2.0 host_state = hosts[0] return [weights.WeighedHost(host_state, self.next_weight)] sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) self.stubs.Set(sched.host_manager, 'get_filtered_hosts', fake_get_filtered_hosts) self.stubs.Set(weights.HostWeightHandler, 'get_weighed_objects', _fake_weigh_objects) fakes.mox_host_manager_db_calls_share(self.mox, fake_context) request_spec = {'share_type': {'name': 'LVM_NFS'}, 'sharee_properties': {'project_id': 1, 'size': 1}} self.mox.ReplayAll() weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertTrue(weighed_host.obj is not None) def test_max_attempts(self): self.flags(scheduler_max_attempts=4) sched = fakes.FakeFilterScheduler() self.assertEqual(4, sched._max_attempts()) def test_invalid_max_attempts(self): self.flags(scheduler_max_attempts=0) self.assertRaises(exception.InvalidParameterValue, fakes.FakeFilterScheduler) def test_add_retry_host(self): retry = dict(num_attempts=1, hosts=[]) filter_properties = dict(retry=retry) host = "fakehost" sched = fakes.FakeFilterScheduler() sched._add_retry_host(filter_properties, host) hosts = filter_properties['retry']['hosts'] self.assertEqual(1, len(hosts)) self.assertEqual(host, hosts[0]) def test_post_select_populate(self): # Test addition of certain filter props after a node is selected. retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} sched = fakes.FakeFilterScheduler() host_state = host_manager.HostState('host') host_state.total_capacity_gb = 1024 sched._post_select_populate_filter_properties(filter_properties, host_state) self.assertEqual('host', filter_properties['retry']['hosts'][0]) self.assertEqual(1024, host_state.total_capacity_gb) manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/test_scheduler_options.py0000664000175000017500000001176112301410454027241 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For PickledScheduler. """ import datetime import StringIO from manila.openstack.common import jsonutils from manila.scheduler import scheduler_options from manila import test class FakeSchedulerOptions(scheduler_options.SchedulerOptions): def __init__(self, last_checked, now, file_old, file_now, data, filedata): super(FakeSchedulerOptions, self).__init__() # Change internals ... self.last_modified = file_old self.last_checked = last_checked self.data = data # For overrides ... self._time_now = now self._file_now = file_now self._file_data = filedata self.file_was_loaded = False def _get_file_timestamp(self, filename): return self._file_now def _get_file_handle(self, filename): self.file_was_loaded = True return StringIO.StringIO(self._file_data) def _get_time_now(self): return self._time_now class SchedulerOptionsTestCase(test.TestCase): def test_get_configuration_first_time_no_flag(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEquals({}, fake.get_configuration()) self.assertFalse(fake.file_was_loaded) def test_get_configuration_first_time_empty_file(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = "" fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEquals({}, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_first_time_happy_day(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEquals(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_second_time_no_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, data, jdata) self.assertEquals(data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_too_fast(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2011, 1, 1, 1, 1, 2) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEquals(old_data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEquals(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/test_scheduler.py0000664000175000017500000003230412301410454025462 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler """ from mox import IsA from manila import context from manila import db from manila import exception from manila.openstack.common import timeutils from manila.scheduler import driver from manila.scheduler import manager from manila.scheduler import simple from manila import test from manila import utils from oslo.config import cfg CONF = cfg.CONF class SchedulerManagerTestCase(test.TestCase): """Test case for scheduler manager.""" manager_cls = manager.SchedulerManager driver_cls = driver.Scheduler driver_cls_name = 'manila.scheduler.driver.Scheduler' class AnException(Exception): pass def setUp(self): super(SchedulerManagerTestCase, self).setUp() self.flags(scheduler_driver=self.driver_cls_name) self.manager = self.manager_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' self.fake_args = (1, 2, 3) self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} def test_1_correct_init(self): # Correct scheduler driver manager = self.manager self.assertTrue(isinstance(manager.driver, self.driver_cls)) def test_update_service_capabilities(self): service_name = 'fake_service' host = 'fake_host' self.mox.StubOutWithMock(self.manager.driver, 'update_service_capabilities') # Test no capabilities passes empty dictionary self.manager.driver.update_service_capabilities(service_name, host, {}) self.mox.ReplayAll() result = self.manager.update_service_capabilities( self.context, service_name=service_name, host=host) self.mox.VerifyAll() self.mox.ResetAll() # Test capabilities passes correctly capabilities = {'fake_capability': 'fake_value'} self.manager.driver.update_service_capabilities(service_name, host, capabilities) self.mox.ReplayAll() result = self.manager.update_service_capabilities( self.context, service_name=service_name, host=host, capabilities=capabilities) def test_create_share_exception_puts_share_in_error_state(self): """Test that a NoValideHost exception for create_share. Puts the share in 'error' state and eats the exception. """ fake_share_id = 1 self._mox_schedule_method_helper('schedule_create_share') self.mox.StubOutWithMock(db, 'share_update') topic = 'fake_topic' share_id = fake_share_id request_spec = {'share_id': fake_share_id} self.manager.driver.schedule_create_share( self.context, request_spec, {}).AndRaise(exception.NoValidHost(reason="")) db.share_update(self.context, fake_share_id, {'status': 'error'}) self.mox.ReplayAll() self.manager.create_share(self.context, topic, share_id, request_spec=request_spec, filter_properties={}) def _mox_schedule_method_helper(self, method_name): # Make sure the method exists that we're going to test call def stub_method(*args, **kwargs): pass setattr(self.manager.driver, method_name, stub_method) self.mox.StubOutWithMock(self.manager.driver, method_name) class SchedulerTestCase(test.TestCase): """Test case for base scheduler driver class.""" # So we can subclass this test and re-use tests if we need. driver_cls = driver.Scheduler def setUp(self): super(SchedulerTestCase, self).setUp() self.driver = self.driver_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' def test_update_service_capabilities(self): service_name = 'fake_service' host = 'fake_host' self.mox.StubOutWithMock(self.driver.host_manager, 'update_service_capabilities') capabilities = {'fake_capability': 'fake_value'} self.driver.host_manager.update_service_capabilities(service_name, host, capabilities) self.mox.ReplayAll() result = self.driver.update_service_capabilities(service_name, host, capabilities) def test_hosts_up(self): service1 = {'host': 'host1'} service2 = {'host': 'host2'} services = [service1, service2] self.mox.StubOutWithMock(db, 'service_get_all_by_topic') self.mox.StubOutWithMock(utils, 'service_is_up') db.service_get_all_by_topic(self.context, self.topic).AndReturn(services) utils.service_is_up(service1).AndReturn(False) utils.service_is_up(service2).AndReturn(True) self.mox.ReplayAll() result = self.driver.hosts_up(self.context, self.topic) self.assertEqual(result, ['host2']) class SchedulerDriverBaseTestCase(SchedulerTestCase): """Test cases for base scheduler driver class methods that can't will fail if the driver is changed""" def test_unimplemented_schedule(self): fake_args = (1, 2, 3) fake_kwargs = {'cat': 'meow'} self.assertRaises(NotImplementedError, self.driver.schedule, self.context, self.topic, 'schedule_something', *fake_args, **fake_kwargs) class SchedulerDriverModuleTestCase(test.TestCase): """Test case for scheduler driver module methods.""" def setUp(self): super(SchedulerDriverModuleTestCase, self).setUp() self.context = context.RequestContext('fake_user', 'fake_project') def test_share_host_update_db(self): self.mox.StubOutWithMock(timeutils, 'utcnow') self.mox.StubOutWithMock(db, 'share_update') timeutils.utcnow().AndReturn('fake-now') db.share_update(self.context, 31337, {'host': 'fake_host', 'scheduled_at': 'fake-now'}) self.mox.ReplayAll() driver.share_update_db(self.context, 31337, 'fake_host') class SimpleSchedulerSharesTestCase(test.TestCase): """Test case for simple scheduler create share method.""" driver = simple.SimpleScheduler() def setUp(self): super(SimpleSchedulerSharesTestCase, self).setUp() self.context = context.RequestContext('fake_user', 'fake_project') self.admin_context = context.RequestContext('fake_admin_user', 'fake_project') self.admin_context.is_admin = True def test_create_share_if_two_services_up(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 1} fake_service_1 = {'disabled': False, 'host': 'fake_host1'} fake_service_2 = {'disabled': False, 'host': 'fake_host2'} fake_result = [(fake_service_1, 2), (fake_service_2, 1)] self.mox.StubOutWithMock(db, 'service_get_all_share_sorted') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(driver, 'share_update_db') fake_request_spec = {'share_id': share_id, 'share_properties': fake_share} db.service_get_all_share_sorted(IsA(context.RequestContext))\ .AndReturn(fake_result) utils.service_is_up(IsA(dict)).AndReturn(True) driver.share_update_db(IsA(context.RequestContext), share_id, 'fake_host1').AndReturn(fake_share) self.mox.ReplayAll() self.driver.schedule_create_share(self.context, fake_request_spec, {}) def test_create_share_if_services_not_available(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 1} fake_result = [] fake_request_spec = {'share_id': share_id, 'share_properties': fake_share} self.mox.StubOutWithMock(db, 'service_get_all_share_sorted') db.service_get_all_share_sorted(IsA(context.RequestContext))\ .AndReturn(fake_result) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.driver.schedule_create_share, self.context, fake_request_spec, {}) def test_create_share_if_max_gigabytes_exceeded(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 10001} fake_service_1 = {'disabled': False, 'host': 'fake_host1'} fake_service_2 = {'disabled': False, 'host': 'fake_host2'} fake_result = [(fake_service_1, 5), (fake_service_2, 7)] fake_request_spec = {'share_id': share_id, 'share_properties': fake_share} self.mox.StubOutWithMock(db, 'service_get_all_share_sorted') db.service_get_all_share_sorted(IsA(context.RequestContext))\ .AndReturn(fake_result) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.driver.schedule_create_share, self.context, fake_request_spec, {}) def test_create_share_availability_zone(self): share_id = 'fake' fake_share = {'id': share_id, 'availability_zone': 'fake:fake', 'size': 1} fake_service_1 = {'disabled': False, 'host': 'fake_host1', 'availability_zone': 'fake'} fake_service_2 = {'disabled': False, 'host': 'fake_host2', 'availability_zone': 'super_fake'} fake_result = [(fake_service_1, 0), (fake_service_2, 1)] fake_request_spec = {'share_id': share_id, 'share_properties': fake_share} self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(driver, 'share_update_db') self.mox.StubOutWithMock(db, 'service_get_all_share_sorted') db.service_get_all_share_sorted(IsA(context.RequestContext))\ .AndReturn(fake_result) utils.service_is_up(fake_service_1).AndReturn(True) driver.share_update_db(IsA(context.RequestContext), share_id, fake_service_1['host']).AndReturn(fake_share) self.mox.ReplayAll() self.driver.schedule_create_share(self.context, fake_request_spec, {}) def test_create_share_availability_zone_on_host(self): share_id = 'fake' fake_share = {'id': share_id, 'availability_zone': 'fake:fake', 'size': 1} fake_request_spec = {'share_id': share_id, 'share_properties': fake_share} self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_by_args') self.mox.StubOutWithMock(driver, 'share_update_db') db.service_get_by_args(IsA(context.RequestContext), 'fake', 'manila-share').AndReturn('fake_service') utils.service_is_up('fake_service').AndReturn(True) driver.share_update_db(IsA(context.RequestContext), share_id, 'fake').AndReturn(fake_share) self.mox.ReplayAll() self.driver.schedule_create_share(self.admin_context, fake_request_spec, {}) def test_create_share_availability_zone_if_service_down(self): share_id = 'fake' fake_share = {'id': share_id, 'availability_zone': 'fake:fake', 'size': 1} fake_request_spec = {'share_id': share_id, 'share_properties': fake_share} self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_by_args') db.service_get_by_args(IsA(context.RequestContext), 'fake', 'manila-share').AndReturn('fake_service') utils.service_is_up('fake_service').AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.WillNotSchedule, self.driver.schedule_create_share, self.admin_context, fake_request_spec, {}) manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/test_rpcapi.py0000664000175000017500000000562212301410454024765 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for manila.scheduler.rpcapi """ from manila import context from manila.openstack.common import rpc from manila.scheduler import rpcapi as scheduler_rpcapi from manila import test from oslo.config import cfg CONF = cfg.CONF class SchedulerRpcAPITestCase(test.TestCase): def setUp(self): super(SchedulerRpcAPITestCase, self).setUp() def tearDown(self): super(SchedulerRpcAPITestCase, self).tearDown() def _test_scheduler_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = scheduler_rpcapi.SchedulerAPI() expected_retval = 'foo' if method == 'call' else None expected_version = kwargs.pop('version', rpcapi.RPC_API_VERSION) expected_msg = rpcapi.make_msg(method, **kwargs) expected_msg['version'] = expected_version self.fake_args = None self.fake_kwargs = None def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval self.stubs.Set(rpc, rpc_method, _fake_rpc_method) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) expected_args = [ctxt, CONF.scheduler_topic, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(arg, expected_arg) def test_update_service_capabilities(self): self._test_scheduler_api('update_service_capabilities', rpc_method='fanout_cast', service_name='fake_name', host='fake_host', capabilities='fake_capabilities') def test_create_share(self): self._test_scheduler_api('create_share', rpc_method='cast', topic='topic', share_id='share_id', snapshot_id='snapshot_id', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.3') manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/test_capacity_weigher.py0000664000175000017500000000742412301410454027020 0ustar chuckchuck00000000000000# Copyright 2011-2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Capacity Weigher. """ from manila import context from manila.openstack.common.scheduler.weights import HostWeightHandler from manila import test from manila.tests.scheduler import fakes from manila.tests import utils as test_utils class CapacityWeigherTestCase(test.TestCase): def setUp(self): super(CapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = HostWeightHandler('manila.scheduler.weights') self.weight_classes = self.weight_handler.get_all_classes() def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {} return self.weight_handler.get_weighed_objects(self.weight_classes, hosts, weight_properties)[0] def _get_all_hosts(self): ctxt = context.get_admin_context() fakes.mox_host_manager_db_calls(self.mox, ctxt) self.mox.ReplayAll() host_states = self.host_manager.get_all_host_states(ctxt) self.mox.VerifyAll() self.mox.ResetAll() return host_states @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: free_capacity_gb=1024, free=1024*(1-0.1) # host2: free_capacity_gb=300, free=300*(1-0.1) # host3: free_capacity_gb=512, free=512 # host4: free_capacity_gb=200, free=200*(1-0.05) # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(weighed_host.weight, 921.0) self.assertEqual(weighed_host.obj.host, 'host1') @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_capacity_weight_multiplier1(self): self.flags(capacity_weight_multiplier=-1.0) hostinfo_list = self._get_all_hosts() # host1: free_capacity_gb=1024, free=-1024*(1-0.1) # host2: free_capacity_gb=300, free=-300*(1-0.1) # host3: free_capacity_gb=512, free=-512 # host4: free_capacity_gb=200, free=-200*(1-0.05) # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(weighed_host.weight, -190.0) self.assertEqual(weighed_host.obj.host, 'host4') @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_capacity_weight_multiplier2(self): self.flags(capacity_weight_multiplier=2.0) hostinfo_list = self._get_all_hosts() # host1: free_capacity_gb=1024, free=1024*(1-0.1)*2 # host2: free_capacity_gb=300, free=300*(1-0.1)*2 # host3: free_capacity_gb=512, free=512*2 # host4: free_capacity_gb=200, free=200*(1-0.05)*2 # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(weighed_host.weight, 921.0 * 2) self.assertEqual(weighed_host.obj.host, 'host1') manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/__init__.py0000664000175000017500000000141212301410454024200 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work from manila.tests import * manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/fakes.py0000664000175000017500000000774612301410454023552 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Scheduler tests. """ import mox from manila import db from manila.openstack.common import timeutils from manila.scheduler import filter_scheduler from manila.scheduler import host_manager VOLUME_SERVICES = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), dict(id=4, host='host4', topic='volume', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), # service on host5 is disabled dict(id=5, host='host5', topic='volume', disabled=True, availability_zone='zone4', updated_at=timeutils.utcnow()), ] SHARE_SERVICES = [ dict(id=1, host='host1', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3', topic='share', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), dict(id=4, host='host4', topic='share', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), # service on host5 is disabled dict(id=5, host='host5', topic='share', disabled=True, availability_zone='zone4', updated_at=timeutils.utcnow()), ] class FakeFilterScheduler(filter_scheduler.FilterScheduler): def __init__(self, *args, **kwargs): super(FakeFilterScheduler, self).__init__(*args, **kwargs) self.host_manager = host_manager.HostManager() class FakeHostManager(host_manager.HostManager): def __init__(self): super(FakeHostManager, self).__init__() self.service_states = { 'host1': {'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'reserved_percentage': 10, 'timestamp': None}, 'host2': {'total_capacity_gb': 2048, 'free_capacity_gb': 300, 'reserved_percentage': 10, 'timestamp': None}, 'host3': {'total_capacity_gb': 512, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None}, 'host4': {'total_capacity_gb': 2048, 'free_capacity_gb': 200, 'reserved_percentage': 5, 'timestamp': None}, } class FakeHostState(host_manager.HostState): def __init__(self, host, attribute_dict): super(FakeHostState, self).__init__(host) for (key, val) in attribute_dict.iteritems(): setattr(self, key, val) def mox_host_manager_db_calls(mock, context): mock.StubOutWithMock(db, 'service_get_all_by_topic') db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(VOLUME_SERVICES) def mox_host_manager_db_calls_share(mock, context): mock.StubOutWithMock(db, 'service_get_all_by_topic') db.service_get_all_by_topic(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(SHARE_SERVICES) manila-2013.2.dev175.gbf1a399/manila/tests/scheduler/test_host_filters.py0000664000175000017500000001443012301410454026211 0ustar chuckchuck00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ import httplib import stubout from manila import context from manila import db from manila import exception from manila.openstack.common import jsonutils from manila.openstack.common.scheduler import filters from manila import test from manila.tests.scheduler import fakes from manila.tests import utils as test_utils from manila import utils DATA = '' def stub_out_https_backend(stubs): """ Stubs out the httplib.HTTPRequest.getresponse to return faked-out data instead of grabbing actual contents of a resource The stubbed getresponse() returns an iterator over the data "I am a teapot, short and stout\n" :param stubs: Set of stubout stubs """ class FakeHTTPResponse(object): def read(self): return DATA def fake_do_request(self, *args, **kwargs): return httplib.OK, FakeHTTPResponse() class HostFiltersTestCase(test.TestCase): """Test case for host filters.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.stubs = stubout.StubOutForTesting() stub_out_https_backend(self.stubs) self.context = context.RequestContext('fake', 'fake') self.json_query = jsonutils.dumps( ['and', ['>=', '$free_capacity_gb', 1024], ['>=', '$total_capacity_gb', 10 * 1024]]) # This has a side effect of testing 'get_filter_classes' # when specifying a method (in this case, our standard filters) filter_handler = filters.HostFilterHandler('manila.scheduler.filters') classes = filter_handler.get_all_classes() self.class_map = {} for cls in classes: self.class_map[cls.__name__] = cls def _stub_service_is_up(self, ret_value): def fake_service_is_up(service): return ret_value self.stubs.Set(utils, 'service_is_up', fake_service_is_up) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_capacity_filter_passes(self): self._stub_service_is_up(True) filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 200, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_capacity_filter_fails(self): self._stub_service_is_up(True) filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 120, 'reserved_percentage': 20, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_capacity_filter_passes_infinite(self): self._stub_service_is_up(True) filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'infinite', 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_capacity_filter_passes_unknown(self): self._stub_service_is_up(True) filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'unknown', 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_retry_filter_disabled(self): # Test case where retry/re-scheduling is disabled. filt_cls = self.class_map['RetryFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {} self.assertTrue(filt_cls.host_passes(host, filter_properties)) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_retry_filter_pass(self): # Node not previously tried. filt_cls = self.class_map['RetryFilter']() host = fakes.FakeHostState('host1', {}) retry = dict(num_attempts=2, hosts=['host2']) filter_properties = dict(retry=retry) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @test.skip_if(not test_utils.is_manila_installed(), 'Test requires Manila installed') def test_retry_filter_fail(self): # Node was already tried. filt_cls = self.class_map['RetryFilter']() host = fakes.FakeHostState('host1', {}) retry = dict(num_attempts=1, hosts=['host1']) filter_properties = dict(retry=retry) self.assertFalse(filt_cls.host_passes(host, filter_properties)) manila-2013.2.dev175.gbf1a399/manila/tests/test_skip_examples.py0000664000175000017500000000345512301410454024377 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import test class ExampleSkipTestCase(test.TestCase): test_counter = 0 @test.skip_test("Example usage of @test.skip_test()") def test_skip_test_example(self): self.fail("skip_test failed to work properly.") @test.skip_if(True, "Example usage of @test.skip_if()") def test_skip_if_example(self): self.fail("skip_if failed to work properly.") @test.skip_unless(False, "Example usage of @test.skip_unless()") def test_skip_unless_example(self): self.fail("skip_unless failed to work properly.") @test.skip_if(False, "This test case should never be skipped.") def test_001_increase_test_counter(self): ExampleSkipTestCase.test_counter += 1 @test.skip_unless(True, "This test case should never be skipped.") def test_002_increase_test_counter(self): ExampleSkipTestCase.test_counter += 1 def test_003_verify_test_counter(self): self.assertEquals(ExampleSkipTestCase.test_counter, 2, "Tests were not skipped appropriately") manila-2013.2.dev175.gbf1a399/manila/tests/test_policy.py0000664000175000017500000002121412301410454023023 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Policy Engine For Manila.""" import os.path import StringIO import urllib2 from manila import context from manila import exception import manila.openstack.common.policy from manila.openstack.common import policy as common_policy from manila import policy from manila import test from manila import utils from oslo.config import cfg CONF = cfg.CONF class PolicyFileTestCase(test.TestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() # since is_admin is defined by policy, create context before reset self.context = context.RequestContext('fake', 'fake') policy.reset() self.target = {} def tearDown(self): super(PolicyFileTestCase, self).tearDown() policy.reset() def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, 'policy') self.flags(policy_file=tmpfilename) action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": []}""") policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ["false:false"]}""") # NOTE(vish): reset stored policy cache so we don't have to # sleep(1) policy._POLICY_CACHE = {} self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class PolicyTestCase(test.TestCase): def setUp(self): super(PolicyTestCase, self).setUp() policy.reset() # NOTE(vish): preload rules to circumvent reloading from file policy.init() rules = { "true": [], "example:allowed": [], "example:denied": [["false:false"]], "example:get_http": [["http:http://www.example.com"]], "example:my_file": [["role:compute_admin"], ["project_id:%(project_id)s"]], "example:early_and_fail": [["false:false", "rule:true"]], "example:early_or_success": [["rule:true"], ["false:false"]], "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], } # NOTE(vish): then overload underlying brain common_policy.set_brain(common_policy.HttpBrain(rules)) self.context = context.RequestContext('fake', 'fake', roles=['member']) self.target = {} def tearDown(self): policy.reset() super(PolicyTestCase, self).tearDown() def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_good_action(self): action = "example:allowed" policy.enforce(self.context, action, self.target) def test_enforce_http_true(self): def fakeurlopen(url, post_data): return StringIO.StringIO("True") self.stubs.Set(urllib2, 'urlopen', fakeurlopen) action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertEqual(result, None) def test_enforce_http_false(self): def fakeurlopen(url, post_data): return StringIO.StringIO("False") self.stubs.Set(urllib2, 'urlopen', fakeurlopen) action = "example:get_http" target = {} self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, target) def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.RequestContext('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) class DefaultPolicyTestCase(test.TestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() policy.reset() policy.init() self.rules = { "default": [], "example:exist": [["false:false"]] } self._set_brain('default') self.context = context.RequestContext('fake', 'fake') def _set_brain(self, default_rule): brain = manila.openstack.common.policy.HttpBrain(self.rules, default_rule) manila.openstack.common.policy.set_brain(brain) def tearDown(self): super(DefaultPolicyTestCase, self).tearDown() policy.reset() def test_policy_called(self): self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) def test_default_not_found(self): self._set_brain("default_noexist") self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:noexist", {}) class ContextIsAdminPolicyTestCase(test.TestCase): def setUp(self): super(ContextIsAdminPolicyTestCase, self).setUp() policy.reset() policy.init() def test_default_admin_role_is_admin(self): ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assertFalse(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assert_(ctx.is_admin) def test_custom_admin_role_is_admin(self): # define explict rules for context_is_admin rules = { 'context_is_admin': [["role:administrator"], ["role:johnny-admin"]] } brain = common_policy.Brain(rules, CONF.policy_default_rule) common_policy.set_brain(brain) ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assert_(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['administrator']) self.assert_(ctx.is_admin) # default rule no longer applies ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assertFalse(ctx.is_admin) def test_context_is_admin_undefined(self): rules = { "admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]], "default": [["rule:admin_or_owner"]], } brain = common_policy.Brain(rules, CONF.policy_default_rule) common_policy.set_brain(brain) ctx = context.RequestContext('fake', 'fake') self.assertFalse(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assert_(ctx.is_admin) manila-2013.2.dev175.gbf1a399/manila/tests/test_share_glusterfs.py0000664000175000017500000004633512301410454024737 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from mock import Mock from mock import patch import os import subprocess from manila import context from manila.db.sqlalchemy import models from manila import exception from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.share import configuration as config from manila.share.drivers import glusterfs from manila import test from manila.tests.db import fakes as db_fakes from manila.tests import fake_utils from oslo.config import cfg CONF = cfg.CONF gluster_address_attrs = { 'export': '127.0.0.1:/testvol', 'host': '127.0.0.1', 'qualified': 'testuser@127.0.0.1:/testvol', 'remote_user': 'testuser', 'volume': 'testvol', } def fake_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/testvol', } share.update(kwargs) return db_fakes.FakeModel(share) class GlusterAddressTestCase(test.TestCase): """Tests GlusterAddress.""" _gluster_args = ('foo', 'bar', "b'a'z") def test_gluster_address_init(self): self._gluster_address = glusterfs.GlusterAddress( 'testuser@127.0.0.1:/testvol') self.assertEqual(self._gluster_address.remote_user, gluster_address_attrs['remote_user']) self.assertEqual(self._gluster_address.host, gluster_address_attrs['host']) self.assertEqual(self._gluster_address.volume, gluster_address_attrs['volume']) self.assertEqual(self._gluster_address.qualified, gluster_address_attrs['qualified']) self.assertEqual(self._gluster_address.export, gluster_address_attrs['export']) def test_gluster_address_invalid(self): self.assertRaises(exception.GlusterfsException, glusterfs.GlusterAddress, '127.0.0.1:vol') def test_gluster_address_make_gluster_args_local(self): self._gluster_address = glusterfs.GlusterAddress( '127.0.0.1:/testvol') ret = self._gluster_address.make_gluster_args(*self._gluster_args) self.assertEqual(ret, (('gluster',) + self._gluster_args, {'run_as_root': True})) def test_gluster_address_make_gluster_args_remote(self): self._gluster_address = glusterfs.GlusterAddress( 'testuser@127.0.0.1:/testvol') ret = self._gluster_address.make_gluster_args(*self._gluster_args) self.assertEqual(len(ret), 2) self.assertEqual(len(ret[0]), 3) # python 2.6 compat thingy check_output = lambda cmd:\ subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).\ communicate()[0] # shell unescaping thru echo(1) self.assertEqual(check_output('echo ' + ' '.join(ret[0]),)[:-1], 'ssh testuser@127.0.0.1 gluster ' + ' '.join(self._gluster_args)) self.assertEqual(ret[1], {}) class GlusterfsShareDriverTestCase(test.TestCase): """Tests GlusterfsShareDriver.""" def setUp(self): super(GlusterfsShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self.stubs) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() CONF.set_default('glusterfs_mount_point_base', '/mnt/nfs') self.fake_conf = config.Configuration(None) self._db = Mock() self._driver = glusterfs.GlusterfsShareDriver( self._db, execute=self._execute, configuration=self.fake_conf) self._driver.gluster_address = Mock(**gluster_address_attrs) self.share = fake_share() def tearDown(self): super(GlusterfsShareDriverTestCase, self).tearDown() fake_utils.fake_execute_set_repliers([]) fake_utils.fake_execute_clear_log() def test_do_setup(self): self._driver._read_gluster_vol_from_config =\ Mock(return_value='testuser@127.0.0.1:/testvol/fakename') self._driver._ensure_gluster_vol_mounted = Mock() expected_exec = ['mount.glusterfs'] self._driver.do_setup(self._context) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self._driver._ensure_gluster_vol_mounted.assert_called_once_with() def test_do_setup_mount_glusterfs_not_installed(self): self._driver._read_gluster_vol_from_config =\ Mock(return_value='testuser@127.0.0.1:/testvol/fakename') def exec_runner(*ignore_args, **ignore_kw): raise OSError(errno.ENOENT, os.strerror(errno.ENOENT)) expected_exec = ['mount.glusterfs'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.GlusterfsException, self._driver.do_setup, self._context) def test_do_mount(self): expected_exec = ['true'] ret = self._driver._do_mount(expected_exec, False) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, None) def test_do_mount_mounted_noensure(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='already mounted') expected_exec = ['true'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.GlusterfsException, self._driver._do_mount, expected_exec, False) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_do_mount_mounted_ensure(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='already mounted') expected_exec = ['true'] glusterfs.LOG.warn = Mock() fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) ret = self._driver._do_mount(expected_exec, True) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, None) glusterfs.LOG.warn.assert_called_with( "%s is already mounted", self._driver.gluster_address.export) def test_do_mount_fail_noensure(self): def exec_runner(*ignore_args, **ignore_kwargs): raise RuntimeError('fake error') expected_exec = ['true'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(RuntimeError, self._driver._do_mount, expected_exec, False) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_do_mount_fail_ensure(self): def exec_runner(*ignore_args, **ignore_kwargs): raise RuntimeError('fake error') expected_exec = ['true'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(RuntimeError, self._driver._do_mount, expected_exec, True) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_mount_gluster_vol(self): mount_path = '/mnt/nfs/testvol' self._driver._do_mount = Mock() cmd = ['mount', '-t', 'glusterfs', gluster_address_attrs['export'], mount_path] expected_exec = ['mkdir -p %s' % (mount_path)] self._driver._mount_gluster_vol(mount_path) self._driver._do_mount.assert_called_with(cmd, False) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_ensure_gluster_vol_mounted(self): mount_path = 'mnt/nfs/testvol' self._driver._mount_gluster_vol = Mock() self._driver._ensure_gluster_vol_mounted() self.assertTrue(self._driver._mount_gluster_vol.called) def test_ensure_gluster_vol_mounted_error(self): self._driver._mount_gluster_vol =\ Mock(side_effect=exception.GlusterfsException) self.assertRaises(exception.GlusterfsException, self._driver._ensure_gluster_vol_mounted) def test_get_export_dir_list_empty_volinfo(self): self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) expected_exec = ['true'] self.assertRaises(exception.GlusterfsException, self._driver._get_export_dir_list) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_get_export_dir_list_failing_volinfo(self): self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) def exec_runner(*ignore_args, **ignore_kwargs): raise RuntimeError('fake error') expected_exec = ['true'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(RuntimeError, self._driver._get_export_dir_list) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_get_export_dir_list_ambiguous_volinfo(self): self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) def exec_runner(*ignore_args, **ignore_kwargs): return """\ 0 """, '' expected_exec = ['true'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.InvalidShare, self._driver._get_export_dir_list) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_get_export_dir_list_trivial_volinfo(self): self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) def exec_runner(*ignore_args, **ignore_kwargs): return """\ 1 """, '' expected_exec = ['true'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) ret = self._driver._get_export_dir_list() self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, []) def test_get_export_dir_list(self): self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) def exec_runner(*ignore_args, **ignore_kwargs): return """\ 1 """, '' expected_exec = ['true'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) ret = self._driver._get_export_dir_list() self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, ['foo', 'bar']) def test_get_local_share_path(self): with patch.object(os, 'access', return_value=True): expected_ret = '/mnt/nfs/testvol/fakename' ret = self._driver._get_local_share_path(self.share) self.assertEqual(ret, expected_ret) def test_local_share_path_not_exists(self): with patch.object(os, 'access', return_value=False): self.assertRaises(exception.GlusterfsException, self._driver._get_local_share_path, self.share) def test_create_share(self): self._driver._get_local_share_path =\ Mock(return_value='/mnt/nfs/testvol/fakename') expected_exec = ['mkdir /mnt/nfs/testvol/fakename', ] expected_ret = 'testuser@127.0.0.1:/testvol/fakename' ret = self._driver.create_share(self._context, self.share) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, expected_ret) def test_cannot_create_share(self): self._driver._get_local_share_path =\ Mock(return_value='/mnt/nfs/testvol/fakename') def exec_runner(*ignore_args, **ignore_kw): raise exception.ProcessExecutionError expected_exec = ['mkdir %s' % (self._driver._get_local_share_path())] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.ProcessExecutionError, self._driver.create_share, self._context, self.share) def test_delete_share(self): self._driver._get_local_share_path =\ Mock(return_value='/mnt/nfs/testvol/fakename') expected_exec = ['rm -rf /mnt/nfs/testvol/fakename'] self._driver.delete_share(self._context, self.share) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_cannot_delete_share(self): self._driver._get_local_share_path =\ Mock(return_value='/mnt/nfs/testvol/fakename') def exec_runner(*ignore_args, **ignore_kw): raise exception.ProcessExecutionError expected_exec = ['rm -rf %s' % (self._driver._get_local_share_path())] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.ProcessExecutionError, self._driver.delete_share, self._context, self.share) def test_manage_access_bad_access_type(self): cbk = Mock() access = {'access_type': 'bad'} self.assertRaises(exception.InvalidShareAccess, self._driver._manage_access, self._context, self.share, access, cbk) def test_manage_access_noop(self): cbk = Mock(return_value=True) access = {'access_type': 'ip', 'access_to': '0.0.0.0'} self._driver._get_export_dir_list = Mock() self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) expected_exec = [] ret = self._driver._manage_access(self._context, self.share, access, cbk) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, None) def test_manage_access_adding_entry(self): cbk = list.append access = {'access_type': 'ip', 'access_to': '0.0.0.0'} self._driver._get_export_dir_list =\ Mock(return_value=['/example.com(0.0.0.0)']) self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) expected_exec = ['true'] ret = self._driver._manage_access(self._context, self.share, access, cbk) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, None) self.assertTrue(self._driver.gluster_address.make_gluster_args.called) self.assertEqual( self._driver.gluster_address.make_gluster_args.call_args[0][-1], '/example.com(0.0.0.0),/fakename(0.0.0.0)') def test_manage_access_adding_entry_cmd_fail(self): cbk = list.append access = {'access_type': 'ip', 'access_to': '0.0.0.0'} self._driver._get_export_dir_list =\ Mock(return_value=['/example.com(0.0.0.0)']) self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) expected_exec = ['true'] def exec_runner(*ignore_args, **ignore_kw): raise exception.ProcessExecutionError fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.ProcessExecutionError, self._driver._manage_access, self._context, self.share, access, cbk) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertTrue(self._driver.gluster_address.make_gluster_args.called) self.assertEqual( self._driver.gluster_address.make_gluster_args.call_args[0][-1], '/example.com(0.0.0.0),/fakename(0.0.0.0)') def test_allow_access_with_share_having_noaccess(self): access = {'access_type': 'ip', 'access_to': '0.0.0.0'} self._driver._get_export_dir_list =\ Mock(return_value=['/example.com(0.0.0.0)']) self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) self._driver.allow_access(self._context, self.share, access) self.assertTrue(self._driver.gluster_address.make_gluster_args.called) self.assertEqual( self._driver.gluster_address.make_gluster_args.call_args[0][-1], '/example.com(0.0.0.0),/fakename(0.0.0.0)') def test_allow_access_with_share_having_access(self): access = {'access_type': 'ip', 'access_to': '0.0.0.0'} self._driver._get_export_dir_list = \ Mock(return_value=['/fakename(0.0.0.0)']) self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) self._driver.allow_access(self._context, self.share, access) self.assertFalse(self._driver.gluster_address.make_gluster_args.called) def test_deny_access_with_share_having_noaccess(self): access = {'access_type': 'ip', 'access_to': '0.0.0.0'} self._driver._get_export_dir_list = Mock(return_value=[]) self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) self._driver.deny_access(self._context, self.share, access) self.assertFalse(self._driver.gluster_address.make_gluster_args.called) def test_deny_access_with_share_having_access(self): access = {'access_type': 'ip', 'access_to': '0.0.0.0'} self._driver._get_export_dir_list = \ Mock(return_value=['/fakename(0.0.0.0)', '/example.com(0.0.0.0)']) self._driver.gluster_address = Mock(make_gluster_args= Mock(return_value=(('true',), {}))) self._driver.deny_access(self._context, self.share, access) self.assertTrue(self._driver.gluster_address.make_gluster_args.called) self.assertEqual( self._driver.gluster_address.make_gluster_args.call_args[0][-1], '/example.com(0.0.0.0)') manila-2013.2.dev175.gbf1a399/manila/tests/test_share.py0000664000175000017500000003372312301410454022636 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Share Code. """ import mock from manila import context from manila import db from manila import exception from manila.openstack.common import importutils import manila.policy from manila.share import manager from manila import test from oslo.config import cfg CONF = cfg.CONF class FakeShareDriver(object): def __init__(self, db, **kwargs): self.db = db def create_snapshot(self, context, snapshot): pass def delete_snapshot(self, context, snapshot): pass def create_share(self, context, share): pass def create_share_from_snapshot(self, context, share, snapshot): pass def delete_share(self, context, share): pass def ensure_share(self, context, share): pass def allow_access(self, context, share, access): pass def deny_access(self, context, share, access): pass def check_for_setup_error(self): pass def get_share_stats(self, refresh=False): return None def do_setup(self, context): pass def setup_network(self, context, network, policy=None): pass def get_network_allocations_number(self): pass class ShareTestCase(test.TestCase): """Test Case for shares.""" def setUp(self): super(ShareTestCase, self).setUp() self.flags(connection_type='fake', share_driver='manila.tests.test_share.FakeShareDriver') self.share = importutils.import_object(CONF.share_manager) self.context = context.get_admin_context() @staticmethod def _create_share(status="creating", size=0, snapshot_id=None): """Create a share object.""" share = {} share['share_proto'] = "NFS" share['size'] = size share['snapshot_id'] = snapshot_id share['user_id'] = 'fake' share['project_id'] = 'fake' share['metadata'] = {'fake_key': 'fake_value'} share['availability_zone'] = CONF.storage_availability_zone share['status'] = status share['host'] = CONF.host return db.share_create(context.get_admin_context(), share) @staticmethod def _create_snapshot(status="creating", size=0, share_id=None): """Create a snapshot object.""" snapshot = {} snapshot['share_proto'] = "NFS" snapshot['size'] = size snapshot['share_id'] = share_id snapshot['user_id'] = 'fake' snapshot['project_id'] = 'fake' snapshot['status'] = status return db.share_snapshot_create(context.get_admin_context(), snapshot) @staticmethod def _create_access(state='new', share_id=None): """Create a access rule object.""" access = {} access['access_type'] = 'fake_type' access['access_to'] = 'fake_IP' access['share_id'] = share_id access['state'] = state return db.share_access_create(context.get_admin_context(), access) def test_init_host_ensuring_shares(self): """Test init_host for ensuring shares and access rules.""" share = self._create_share(status='available') share_id = share['id'] another_share = self._create_share(status='error') access = self._create_access(share_id=share_id, state='active') context.get_admin_context = mock.Mock(return_value=self.context) db.share_get_all_by_host = mock.Mock( return_value=[share, another_share]) driver = mock.Mock() driver.get_share_stats.return_value = {} self.share.driver = driver self.share.init_host() driver.ensure_share.assert_called_once_with(self.context, share) driver.allow_access.assert_called_once_with( self.context, share, mock.ANY) driver.get_share_stats.assert_called_once_with(refresh=True) def test_create_share_from_snapshot(self): """Test share can be created from snapshot.""" share = self._create_share() share_id = share['id'] snapshot = self._create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] self.share.create_share(self.context, share_id, snapshot_id=snapshot_id) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEquals(shr['status'], 'available') def test_create_delete_share_snapshot(self): """Test share's snapshot can be created and deleted.""" def _fake_create_snapshot(self, context, snapshot): snapshot['progress'] = '99%' return snapshot self.stubs.Set(FakeShareDriver, "create_snapshot", _fake_create_snapshot) share = self._create_share() share_id = share['id'] snapshot = self._create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] self.share.create_snapshot(self.context, share_id, snapshot_id) self.assertEqual(share_id, db.share_snapshot_get(context.get_admin_context(), snapshot_id).share_id) snap = db.share_snapshot_get(self.context, snapshot_id) self.assertEquals(snap['status'], 'available') self.share.delete_snapshot(self.context, snapshot_id) self.assertRaises(exception.NotFound, db.share_snapshot_get, self.context, snapshot_id) def test_create_delete_share_snapshot_error(self): """Test snapshot can be created and deleted with error.""" def _fake_create_delete_snapshot(self, context, snapshot): raise exception.NotFound() self.stubs.Set(FakeShareDriver, "create_snapshot", _fake_create_delete_snapshot) self.stubs.Set(FakeShareDriver, "delete_snapshot", _fake_create_delete_snapshot) share = self._create_share() share_id = share['id'] snapshot = self._create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] self.assertRaises(exception.NotFound, self.share.create_snapshot, self.context, share_id, snapshot_id) snap = db.share_snapshot_get(self.context, snapshot_id) self.assertEquals(snap['status'], 'error') self.assertRaises(exception.NotFound, self.share.delete_snapshot, self.context, snapshot_id) self.assertEquals('error_deleting', db.share_snapshot_get( self.context, snapshot_id).status) def test_delete_share_if_busy(self): """Test snapshot could not be deleted if busy.""" def _fake_delete_snapshot(self, context, snapshot): raise exception.ShareSnapshotIsBusy(snapshot_name='fakename') self.stubs.Set(FakeShareDriver, "delete_snapshot", _fake_delete_snapshot) snapshot = self._create_snapshot(share_id='fake_id') snapshot_id = snapshot['id'] self.share.delete_snapshot(self.context, snapshot_id) snap = db.share_snapshot_get(self.context, snapshot_id) self.assertEquals(snap['status'], 'available') def test_create_delete_share(self): """Test share can be created and deleted.""" share = self._create_share() share_id = share['id'] self._create_access(share_id=share_id) self.share.create_share(self.context, share_id) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEquals(shr['status'], 'available') self.share.delete_share(self.context, share_id) self.assertRaises(exception.NotFound, db.share_get, self.context, share_id) def test_create_delete_share_error(self): """Test share can be created and deleted with error.""" def _fake_create_share(self, context, share): raise exception.NotFound() def _fake_delete_share(self, context, share): raise exception.NotFound() self.stubs.Set(FakeShareDriver, "create_share", _fake_create_share) self.stubs.Set(FakeShareDriver, "delete_share", _fake_delete_share) share = self._create_share() share_id = share['id'] self.assertRaises(exception.NotFound, self.share.create_share, self.context, share_id) shr = db.share_get(self.context, share_id) self.assertEquals(shr['status'], 'error') self.assertRaises(exception.NotFound, self.share.delete_share, self.context, share_id) shr = db.share_get(self.context, share_id) self.assertEquals(shr['status'], 'error_deleting') def test_allow_deny_access(self): """Test access rules to share can be created and deleted.""" share = self._create_share() share_id = share['id'] access = self._create_access(share_id=share_id) access_id = access['id'] self.share.allow_access(self.context, access_id) self.assertEqual('active', db.share_access_get(self.context, access_id).state) self.share.deny_access(self.context, access_id) self.assertRaises(exception.NotFound, db.share_access_get, self.context, access_id) def test_allow_deny_access_error(self): """Test access rules to share can be created and deleted with error.""" def _fake_allow_access(self, context, share, access): raise exception.NotFound() def _fake_deny_access(self, context, share, access): raise exception.NotFound() self.stubs.Set(FakeShareDriver, "allow_access", _fake_allow_access) self.stubs.Set(FakeShareDriver, "deny_access", _fake_deny_access) share = self._create_share() share_id = share['id'] access = self._create_access(share_id=share_id) access_id = access['id'] self.assertRaises(exception.NotFound, self.share.allow_access, self.context, access_id) acs = db.share_access_get(self.context, access_id) self.assertEquals(acs['state'], 'error') self.assertRaises(exception.NotFound, self.share.deny_access, self.context, access_id) acs = db.share_access_get(self.context, access_id) self.assertEquals(acs['state'], 'error') def test_create_delete_share_with_metadata(self): """Test share can be created with metadata and deleted.""" test_meta = {'fake_key': 'fake_value'} share = self._create_share() share_id = share['id'] self.share.create_share(self.context, share_id) result_meta = { share.share_metadata[0].key: share.share_metadata[0].value} self.assertEqual(result_meta, test_meta) self.share.delete_share(self.context, share_id) self.assertRaises(exception.NotFound, db.share_get, self.context, share_id) def test_create_share_with_invalid_metadata(self): """Test share create with too much metadata fails.""" share_api = manila.share.api.API() test_meta = {'fake_key': 'fake_value' * 1025} self.assertRaises(exception.InvalidShareMetadataSize, share_api.create, self.context, 'nfs', 1, 'name', 'description', metadata=test_meta) def test_setup_share_network(self): network_info = {'fake': 'fake'} self.share.driver.get_network_allocations_number = mock.Mock( return_value=555) self.share.network_api.allocate_network = mock.Mock( return_value={'network_info': 'network_info'}) self.share.driver.setup_network = mock.Mock() self.share._setup_share_network(self.context, network_info) self.share.network_api.allocate_network.assert_called_once_with( self.context, network_info, count=555) self.share.driver.setup_network.assert_called_once_with( {'network_info': 'network_info'}) def test_setup_share_network_error(self): network_info = {'fake': 'fake', 'id': 'fakeid'} self.share.driver.get_network_allocations_number = mock.Mock( return_value=555) self.share.network_api.allocate_network = mock.Mock( return_value={'network_info': 'network_info'}) self.share.driver.setup_network = mock.Mock( side_effect=exception.Invalid) self.share.db.share_network_update = mock.Mock() self.assertRaises(exception.Invalid, self.share._setup_share_network, self.context, network_info) self.share.db.share_network_update.assert_called_once_with( self.context, 'fakeid', {'status': 'error'}) manila-2013.2.dev175.gbf1a399/manila/tests/test_share_rpcapi.py0000664000175000017500000001266012301410454024171 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 NetApp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for manila.volume.rpcapi. """ from manila import context from manila import db from manila.openstack.common import jsonutils from manila.openstack.common import rpc from manila.share import rpcapi as share_rpcapi from manila import test from oslo.config import cfg CONF = cfg.CONF class ShareRpcAPITestCase(test.TestCase): def setUp(self): self.context = context.get_admin_context() shr = {} shr['host'] = 'fake_host' shr['availability_zone'] = CONF.storage_availability_zone shr['status'] = "available" share = db.share_create(self.context, shr) acs = {} acs['access_type'] = "ip" acs['access_to'] = "123.123.123.123" acs['share_id'] = share['id'] access = db.share_access_create(self.context, acs) snap = {} snap['share_id'] = share['id'] snapshot = db.share_snapshot_create(self.context, snap) self.fake_share = jsonutils.to_primitive(share) self.fake_access = jsonutils.to_primitive(access) self.fake_snapshot = jsonutils.to_primitive(snapshot) super(ShareRpcAPITestCase, self).setUp() def test_serialized_share_has_id(self): self.assertTrue('id' in self.fake_share) def _test_share_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') if 'rpcapi_class' in kwargs: rpcapi_class = kwargs['rpcapi_class'] del kwargs['rpcapi_class'] else: rpcapi_class = share_rpcapi.ShareAPI rpcapi = rpcapi_class() expected_retval = 'foo' if method == 'call' else None expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION) expected_msg = rpcapi.make_msg(method, **kwargs) if 'share' in expected_msg['args']: share = expected_msg['args']['share'] del expected_msg['args']['share'] expected_msg['args']['share_id'] = share['id'] if 'access' in expected_msg['args']: access = expected_msg['args']['access'] del expected_msg['args']['access'] expected_msg['args']['access_id'] = access['id'] del expected_msg['args']['share_id'] if 'host' in expected_msg['args']: del expected_msg['args']['host'] if 'snapshot' in expected_msg['args']: snapshot = expected_msg['args']['snapshot'] del expected_msg['args']['snapshot'] expected_msg['args']['snapshot_id'] = snapshot['id'] expected_msg['version'] = expected_version if 'host' in kwargs: host = kwargs['host'] else: host = kwargs['share']['host'] expected_topic = '%s.%s' % (CONF.share_topic, host) self.fake_args = None self.fake_kwargs = None def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval self.stubs.Set(rpc, rpc_method, _fake_rpc_method) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) expected_args = [ctxt, expected_topic, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(arg, expected_arg) def test_create_share(self): self._test_share_api('create_share', rpc_method='cast', share=self.fake_share, host='fake_host1', snapshot_id='fake_snapshot_id', filter_properties=None, request_spec=None) def test_delete_share(self): self._test_share_api('delete_share', rpc_method='cast', share=self.fake_share) def test_allow_access(self): self._test_share_api('allow_access', rpc_method='cast', share=self.fake_share, access=self.fake_access) def test_deny_access(self): self._test_share_api('deny_access', rpc_method='cast', share=self.fake_share, access=self.fake_access) def test_create_snapshot(self): self._test_share_api('create_snapshot', rpc_method='cast', share=self.fake_share, snapshot=self.fake_snapshot) def test_delete_snapshot(self): self._test_share_api('delete_snapshot', rpc_method='cast', snapshot=self.fake_snapshot, host='fake_host') manila-2013.2.dev175.gbf1a399/manila/tests/test_share_netapp.py0000664000175000017500000005324312301410454024204 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the NetApp NAS driver module.""" from mox import IgnoreArg import random import suds from manila import context from manila import exception from manila.share.configuration import Configuration from manila.share.drivers.netapp import driver as netapp from manila import test class FakeObject(object): pass class FakeRequest(object): def __init__(self, name=None, args=None): self.Name = name self.Args = args class FakeStartResp(object): def __init__(self): self.Tag = random.randint(1, 100) self.Records = random.randint(1, 10) class FakeStatus(object): def __init__(self, status): self.Status = status class FakeAggregates(object): def __init__(self, max_aggr_id): class AggrSizeAvail(object): def __init__(self, filer_id, avail): self.AggregateSize = FakeObject() self.FilerId = filer_id self.AggregateName = 'filer%d:aggr0' % filer_id setattr(self.AggregateSize, 'SizeAvailable', avail) class AggregateInfo(object): def __init__(self): self.AggregateInfo = [AggrSizeAvail(1, 10), AggrSizeAvail(2, 20), AggrSizeAvail(3, 1), AggrSizeAvail(max_aggr_id, 50), AggrSizeAvail(5, 15)] self.Aggregates = AggregateInfo() class FakeSnapshots(object): def __init__(self, snapshot_name, is_busy='false'): class Result(object): def __init__(self): self.snapshots = [{}] self.snapshots[0]['snapshot-info'] = [ {'name': [snapshot_name], 'busy': [is_busy]}, {'name': ['fakesnapname1'], 'busy': [is_busy]}, {'name': ['fakesnapname2'], 'busy': ['true']}, ] self.Results = Result() class FakeNfsRules(object): def __init__(self): class Rules(object): def __init__(self): self.rules = [ {'exports-rule-info-2': [ {'security-rules': [ {'security-rule-info': [ {'root': [ {'exports-hostname-info': [ {'name': 'allowed_host'}, {'name': 'disallowed_host'}]} ]} ]} ]} ]} ] self.Results = Rules() class FakeHost(object): def __init__(self, id): self.HostId = id class FakeHostInfo(object): def __init__(self): self.Hosts = FakeObject() setattr(self.Hosts, 'HostInfo', [FakeHost(1), FakeHost(2)]) class FakeFilter(object): def __init__(self, id=0): self.ObjectNameOrId = id class FakeTimestamp(object): def __init__(self, monitor_name='file_system', last_stamp=1): self.MonitorName = monitor_name self.LastMonitoringTimestamp = last_stamp class NetAppShareDriverTestCase(test.TestCase): """Tests Netapp-specific share driver. """ def setUp(self): super(NetAppShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._db = self.mox.CreateMockAnything() self._driver = netapp.NetAppShareDriver( self._db, configuration=Configuration(None)) self._driver._client = self.mox.CreateMock(netapp.NetAppApiClient) cifs_helper = self.mox.CreateMock(netapp.NetAppCIFSHelper) nfs_helper = self.mox.CreateMock(netapp.NetAppNFSHelper) self._driver._helpers = {'CIFS': cifs_helper, 'NFS': nfs_helper} def test_setup_check(self): self._driver._client.do_setup() self.mox.ReplayAll() self._driver.do_setup(self._context) def test_load_balancer(self): drv = self._driver max_aggr_id = 123 drv._client.get_available_aggregates().AndReturn( FakeAggregates(max_aggr_id)) self.mox.ReplayAll() aggr = drv._find_best_aggregate() self.assertEquals(max_aggr_id, aggr.FilerId) def test_allocate_container(self): drv = self._driver client = drv._client share = {'id': 'fakeshareid', 'size': 1} max_aggr_id = 123 client.get_available_aggregates().AndReturn( FakeAggregates(max_aggr_id)) client.send_request_to(max_aggr_id, 'volume-create', IgnoreArg()) self.mox.ReplayAll() drv.allocate_container(self._context, share) self.assertEqual(max_aggr_id, drv._share_table[share['id']]) def test_allocate_container_from_snapshot(self): drv = self._driver client = drv._client share_id = 'fakeshareid' share = {'id': share_id, 'size': 1} snapshot = {'id': 'fakesnapshotid', 'size': 1, 'share_id': share_id} max_aggr_id = 123 drv._share_table[share_id] = max_aggr_id client.send_request_to(max_aggr_id, 'volume-clone-create', IgnoreArg()) self.mox.ReplayAll() drv.allocate_container_from_snapshot(self._context, share, snapshot) self.assertEqual(max_aggr_id, drv._share_table[share['id']]) def test_deallocate_container_target_exists(self): drv = self._driver client = drv._client share_id = 'share-vol_id' share = {'id': share_id, 'size': 1} max_aggr_id = 123 client.get_available_aggregates().AndReturn( FakeAggregates(max_aggr_id)) client.send_request_to(max_aggr_id, 'volume-create', IgnoreArg()) client.send_request_to(max_aggr_id, 'volume-offline', IgnoreArg()) client.send_request_to(max_aggr_id, 'volume-destroy', IgnoreArg()) self.mox.ReplayAll() drv.allocate_container(self._context, share) drv.deallocate_container(self._context, share) self.assertEquals(len(drv._share_table.keys()), 0) def test_share_create(self): drv = self._driver ctx = self._context share_proto = 'CIFS' share = {'id': '1234-abcd-5678', 'share_proto': share_proto, 'size': 1} drv._helpers[share_proto].create_share(IgnoreArg(), share) self.mox.ReplayAll() drv.create_share(ctx, share) def test_share_delete(self): drv = self._driver ctx = self._context share_proto = 'NFS' helper = drv._helpers[share_proto] ip = '172.10.0.1' export = '/export_path' share = {'id': 'abcd-1234', 'share_proto': share_proto, 'export_location': ':'.join([ip, export])} fake_access_rules = [1, 2, 3] helper.get_target(share).AndReturn(ip) helper.delete_share(share) self.mox.ReplayAll() drv.delete_share(ctx, share) def test_create_snapshot(self): drv = self._driver client = drv._client share_id = 'fakeshareid' share = {'id': share_id, 'size': 1} snapshot = {'id': 'fakesnapshotid', 'size': 1, 'share_id': share_id} max_aggr_id = 123 drv._share_table[share_id] = max_aggr_id client.send_request_to(max_aggr_id, 'snapshot-create', IgnoreArg()) self.mox.ReplayAll() drv.create_snapshot(self._context, snapshot) def test_delete_snapshot(self): drv = self._driver client = drv._client share_id = 'fakeshareid' share = {'id': share_id, 'size': 1} snapshot = {'id': 'fakesnapshotid', 'size': 1, 'share_id': share_id} max_aggr_id = 123 drv._share_table[share_id] = max_aggr_id client.send_request_to(max_aggr_id, 'snapshot-list-info', IgnoreArg(), do_response_check=False).\ AndReturn(FakeSnapshots(netapp._get_valid_snapshot_name( snapshot['id']))) client.send_request_to(max_aggr_id, 'snapshot-delete', IgnoreArg()) self.mox.ReplayAll() drv.delete_snapshot(self._context, snapshot) def test_delete_snapshot_if_busy(self): drv = self._driver client = drv._client share_id = 'fakeshareid' share = {'id': share_id, 'size': 1} snapshot = {'id': 'fakesnapshotid', 'size': 1, 'share_id': share_id} max_aggr_id = 123 drv._share_table[share_id] = max_aggr_id client.send_request_to(max_aggr_id, 'snapshot-list-info', IgnoreArg(), do_response_check=False).\ AndReturn(FakeSnapshots(netapp._get_valid_snapshot_name( snapshot['id']), is_busy='true')) self.mox.ReplayAll() self.assertRaises(exception.ShareSnapshotIsBusy, drv.delete_snapshot, self._context, snapshot) def test_allow_access(self): drv = self._driver share_proto = 'CIFS' ctx = self._context share = {'share_proto': share_proto} access = {} drv._helpers[share_proto].allow_access(ctx, share, access) self.mox.ReplayAll() drv.allow_access(ctx, share, access) def test_deny_access(self): drv = self._driver share_proto = 'CIFS' ctx = self._context share = {'share_proto': share_proto} access = {} drv._helpers[share_proto].deny_access(ctx, share, access) self.mox.ReplayAll() drv.deny_access(ctx, share, access) def test_no_aggregates_available(self): drv = self._driver ctx = self._context share = None drv._client.get_available_aggregates().AndReturn(None) self.mox.ReplayAll() self.assertRaises(exception.Error, drv.allocate_container, ctx, share) class NetAppNfsHelperTestCase(test.TestCase): """ Tests Netapp-specific NFS driver. """ def setUp(self): super(NetAppNfsHelperTestCase, self).setUp() fake_client = self.mox.CreateMock(netapp.NetAppApiClient) fake_conf = self.mox.CreateMock(Configuration) self._driver = netapp.NetAppNFSHelper(fake_client, fake_conf) def test_create_share(self): drv = self._driver client = drv._client target = 123 share = {'id': 'abc-1234-567'} client.send_request_to(target, 'nfs-exportfs-append-rules-2', IgnoreArg()) client.get_host_ip_by(target).AndReturn('host:export') self.mox.ReplayAll() export = drv.create_share(target, share) self.assertEquals(export.find('-'), -1) def test_delete_share(self): drv = self._driver client = drv._client share = {'export_location': 'host:export'} client.send_request_to(IgnoreArg(), 'nfs-exportfs-delete-rules', IgnoreArg()) self.mox.ReplayAll() drv.delete_share(share) def test_invalid_allow_access(self): drv = self._driver share = None access = {'access_type': 'passwd'} # passwd type is not supported self.assertRaises(exception.Error, drv.allow_access, context, share, access) def test_allow_access(self): drv = self._driver client = drv._client share = {'export_location': 'host:export'} access = {'access_to': ['127.0.0.1', '127.0.0.2'], 'access_type': 'ip'} client.send_request_to(IgnoreArg(), 'nfs-exportfs-list-rules-2', IgnoreArg()).AndReturn(FakeNfsRules()) client.send_request_to(IgnoreArg(), 'nfs-exportfs-append-rules-2', IgnoreArg()) self.mox.ReplayAll() drv.allow_access(context, share, access) def test_deny_access(self): drv = self._driver client = drv._client share = {'export_location': 'host:export'} access = {'access_to': ['127.0.0.1', '127.0.0.2']} client.send_request_to(IgnoreArg(), 'nfs-exportfs-list-rules-2', IgnoreArg()).AndReturn(FakeNfsRules()) client.send_request_to(IgnoreArg(), 'nfs-exportfs-append-rules-2', IgnoreArg()) self.mox.ReplayAll() drv.deny_access(context, share, access) def test_get_target(self): drv = self._driver ip = '172.18.0.1' export_path = '/home' share = {'export_location': ':'.join([ip, export_path])} self.assertEquals(drv.get_target(share), ip) class NetAppCifsHelperTestCase(test.TestCase): """ Tests Netapp-specific CIFS driver. """ def setUp(self): super(NetAppCifsHelperTestCase, self).setUp() fake_client = self.mox.CreateMock(netapp.NetAppApiClient) fake_conf = self.mox.CreateMock(Configuration) self._driver = netapp.NetAppCIFSHelper(fake_client, fake_conf) def tearDown(self): super(NetAppCifsHelperTestCase, self).tearDown() def test_create_share(self): drv = self._driver client = drv._client target = 123 share = {'id': 'abc-1234-567'} ip = '172.0.0.1' client.send_request_to(target, 'cifs-status').AndReturn( FakeStatus('stopped')) client.send_request_to(target, 'cifs-start', do_response_check=False) client.send_request_to(target, 'system-cli', IgnoreArg()) client.send_request_to(target, 'cifs-share-add', IgnoreArg()) client.send_request_to(target, 'cifs-share-ace-delete', IgnoreArg()) client.get_host_ip_by(target).AndReturn(ip) self.mox.ReplayAll() export = drv.create_share(target, share) self.assertEquals(export.find('-'), -1) self.assertTrue(export.startswith('//' + ip)) def test_delete_share(self): drv = self._driver client = drv._client ip = '172.10.0.1' export = 'home' share = {'export_location': '//%s/%s' % (ip, export)} client.send_request_to(IgnoreArg(), 'cifs-share-delete', IgnoreArg()) self.mox.ReplayAll() drv.delete_share(share) def test_allow_access_by_ip(self): drv = self._driver access = {'access_type': 'ip', 'access_to': '123.123.123.123'} share = None self.assertRaises(exception.Error, drv.allow_access, context, share, access) def test_allow_access_by_passwd_invalid_user(self): drv = self._driver client = drv._client access = {'access_type': 'passwd', 'access_to': 'user:pass'} ip = '172.0.0.1' export = 'export_path' share = {'export_location': '//%s/%s' % (ip, export)} status = FakeStatus('failed') client.send_request_to(ip, 'useradmin-user-list', IgnoreArg(), do_response_check=False).AndReturn(status) self.mox.ReplayAll() self.assertRaises(exception.Error, drv.allow_access, context, share, access) def test_allow_access_by_passwd_existing_user(self): drv = self._driver client = drv._client access = {'access_type': 'passwd', 'access_to': 'user:pass'} ip = '172.0.0.1' export = 'export_path' share = {'export_location': '//%s/%s' % (ip, export)} status = FakeStatus('passed') client.send_request_to(ip, 'useradmin-user-list', IgnoreArg(), do_response_check=False).AndReturn(status) client.send_request_to(ip, 'cifs-share-ace-set', IgnoreArg()) self.mox.ReplayAll() drv.allow_access(context, share, access) def test_deny_access(self): drv = self._driver client = drv._client access = {'access_type': 'passwd', 'access_to': 'user:pass'} ip = '172.0.0.1' export = 'export_path' share = {'export_location': '//%s/%s' % (ip, export)} client.send_request_to(ip, 'cifs-share-ace-delete', IgnoreArg()) self.mox.ReplayAll() drv.deny_access(context, share, access) def test_get_target(self): drv = self._driver ip = '172.10.0.1' export = 'export_path' share = {'export_location': '//%s/%s' % (ip, export)} self.assertEquals(drv.get_target(share), ip) class NetAppNASHelperTestCase(test.TestCase): def setUp(self): super(NetAppNASHelperTestCase, self).setUp() fake_client = self.mox.CreateMock(suds.client.Client) fake_conf = self.mox.CreateMock(Configuration) self._driver = netapp.NetAppNASHelperBase(fake_client, fake_conf) def tearDown(self): super(NetAppNASHelperTestCase, self).tearDown() def test_create_share(self): drv = self._driver target_id = None share = None self.assertRaises(NotImplementedError, drv.create_share, target_id, share) def test_delete_share(self): drv = self._driver share = None self.assertRaises(NotImplementedError, drv.delete_share, share) def test_allow_access(self): drv = self._driver share = None ctx = None access = None self.assertRaises(NotImplementedError, drv.allow_access, ctx, share, access) def test_deny_access(self): drv = self._driver share = None ctx = None access = None self.assertRaises(NotImplementedError, drv.deny_access, ctx, share, access) def test_get_target(self): drv = self._driver share = None self.assertRaises(NotImplementedError, drv.get_target, share) class NetAppApiClientTestCase(test.TestCase): """Tests for NetApp DFM API client. """ def setUp(self): super(NetAppApiClientTestCase, self).setUp() self.fake_conf = self.mox.CreateMock(Configuration) self._context = context.get_admin_context() self._driver = netapp.NetAppApiClient(self.fake_conf) self._driver._client = self.mox.CreateMock(suds.client.Client) self._driver._client.factory = self.mox.CreateMock(suds.client.Factory) # service object is generated dynamically from XML self._driver._client.service = self.mox.CreateMockAnything( suds.client.ServiceSelector) def test_get_host_by_ip(self): drv = self._driver client = drv._client service = client.service host_id = 123 # can't use 'filter' because it's predefined in Python fltr = client.factory.create('HostListInfoIterStart').AndReturn( FakeFilter()) resp = service.HostListInfoIterStart(HostListInfoIterStart=fltr) resp = resp.AndReturn(FakeStartResp()) service_list = service.HostListInfoIterNext(Tag=resp.Tag, Maximum=resp.Records) service_list.AndReturn(FakeHostInfo()) service.HostListInfoIterEnd(Tag=resp.Tag) self.mox.ReplayAll() drv.get_host_ip_by(host_id) def test_get_available_aggregates(self): drv = self._driver client = drv._client service = client.service resp = service.AggregateListInfoIterStart().AndReturn(FakeStartResp()) service.AggregateListInfoIterNext(Tag=resp.Tag, Maximum=resp.Records) service.AggregateListInfoIterEnd(resp.Tag) self.mox.ReplayAll() drv.get_available_aggregates() def test_send_successfull_request(self): drv = self._driver client = drv._client service = client.service factory = client.factory target = 1 args = '' responce_check = False request = factory.create('Request').AndReturn(FakeRequest()) service.ApiProxy(Target=target, Request=request) self.mox.ReplayAll() drv.send_request_to(target, request, args, responce_check) def test_send_failing_request(self): drv = self._driver client = drv._client service = client.service factory = client.factory target = 1 args = '' responce_check = True request = factory.create('Request').AndReturn(FakeRequest()) service.ApiProxy(Target=target, Request=request).AndRaise( exception.Error()) self.mox.ReplayAll() self.assertRaises(exception.Error, drv.send_request_to, target, request, args, responce_check) def test_successfull_setup(self): drv = self._driver for flag in drv.REQUIRED_FLAGS: setattr(netapp.CONF, flag, 'val') conf_obj = Configuration(netapp.CONF) drv.check_configuration(conf_obj) def test_failing_setup(self): drv = self._driver self.assertRaises(exception.Error, drv.check_configuration, Configuration(netapp.CONF)) manila-2013.2.dev175.gbf1a399/manila/tests/test_utils.py0000664000175000017500000006754512301410454022705 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import __builtin__ import datetime import hashlib import os import os.path import StringIO import tempfile import uuid import mock import mox from oslo.config import cfg import paramiko import manila from manila import exception from manila.openstack.common import strutils from manila.openstack.common import timeutils from manila import test from manila import utils CONF = cfg.CONF class ExecuteTestCase(test.TestCase): def test_retry_on_failure(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If stdin fails to get passed during one of the runs, make a note. if ! grep -q foo then echo 'failure' > "$1" fi # If stdin has failed to get passed during this or a previous run, exit early. if grep failure "$1" then exit 1 fi runs="$(cat $1)" if [ -z "$runs" ] then runs=0 fi runs=$(($runs + 1)) echo $runs > "$1" exit 1 ''') fp.close() os.chmod(tmpfilename, 0o755) self.assertRaises(exception.ProcessExecutionError, utils.execute, tmpfilename, tmpfilename2, attempts=10, process_input='foo', delay_on_retry=False) fp = open(tmpfilename2, 'r+') runs = fp.read() fp.close() self.assertNotEquals(runs.strip(), 'failure', 'stdin did not ' 'always get passed ' 'correctly') runs = int(runs.strip()) self.assertEquals(runs, 10, 'Ran %d times instead of 10.' % (runs,)) finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) def test_unknown_kwargs_raises_error(self): self.assertRaises(exception.Error, utils.execute, '/usr/bin/env', 'true', this_is_not_a_valid_kwarg=True) def test_check_exit_code_boolean(self): utils.execute('/usr/bin/env', 'false', check_exit_code=False) self.assertRaises(exception.ProcessExecutionError, utils.execute, '/usr/bin/env', 'false', check_exit_code=True) def test_no_retry_on_success(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If we've already run, bail out. grep -q foo "$1" && exit 1 # Mark that we've run before. echo foo > "$1" # Check that stdin gets passed correctly. grep foo ''') fp.close() os.chmod(tmpfilename, 0o755) utils.execute(tmpfilename, tmpfilename2, process_input='foo', attempts=2) finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) class GetFromPathTestCase(test.TestCase): def test_tolerates_nones(self): f = utils.get_from_path input = [] self.assertEquals([], f(input, "a")) self.assertEquals([], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [None] self.assertEquals([], f(input, "a")) self.assertEquals([], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': None}] self.assertEquals([], f(input, "a")) self.assertEquals([], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': {'b': None}}] self.assertEquals([{'b': None}], f(input, "a")) self.assertEquals([], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}] self.assertEquals([{'b': {'c': None}}], f(input, "a")) self.assertEquals([{'c': None}], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}, {'a': None}] self.assertEquals([{'b': {'c': None}}], f(input, "a")) self.assertEquals([{'c': None}], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a")) self.assertEquals([{'c': None}], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) def test_does_select(self): f = utils.get_from_path input = [{'a': 'a_1'}] self.assertEquals(['a_1'], f(input, "a")) self.assertEquals([], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': {'b': 'b_1'}}] self.assertEquals([{'b': 'b_1'}], f(input, "a")) self.assertEquals(['b_1'], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}] self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) self.assertEquals(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) self.assertEquals(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': {'b': None}}] self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) self.assertEquals(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': {'b': {'c': 'c_2'}}}] self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], f(input, "a")) self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c")) self.assertEquals([], f(input, "a/b/c/d")) self.assertEquals([], f(input, "c/a/b/d")) self.assertEquals([], f(input, "i/r/t")) def test_flattens_lists(self): f = utils.get_from_path input = [{'a': [1, 2, 3]}] self.assertEquals([1, 2, 3], f(input, "a")) self.assertEquals([], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': {'b': [1, 2, 3]}}] self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) self.assertEquals([1, 2, 3], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = [{'a': [1, 2, {'b': 'b_1'}]}] self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) self.assertEquals(['b_1'], f(input, "a/b")) def test_bad_xpath(self): f = utils.get_from_path self.assertRaises(exception.Error, f, [], None) self.assertRaises(exception.Error, f, [], "") self.assertRaises(exception.Error, f, [], "/") self.assertRaises(exception.Error, f, [], "/a") self.assertRaises(exception.Error, f, [], "/a/") self.assertRaises(exception.Error, f, [], "//") self.assertRaises(exception.Error, f, [], "//a") self.assertRaises(exception.Error, f, [], "a//a") self.assertRaises(exception.Error, f, [], "a//a/") self.assertRaises(exception.Error, f, [], "a/a/") def test_real_failure1(self): # Real world failure case... # We weren't coping when the input was a Dictionary instead of a List # This led to test_accepts_dictionaries f = utils.get_from_path inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], 'address': '192.168.0.3'}, 'hostname': ''} private_ips = f(inst, 'fixed_ip/address') public_ips = f(inst, 'fixed_ip/floating_ips/address') self.assertEquals(['192.168.0.3'], private_ips) self.assertEquals(['1.2.3.4'], public_ips) def test_accepts_dictionaries(self): f = utils.get_from_path input = {'a': [1, 2, 3]} self.assertEquals([1, 2, 3], f(input, "a")) self.assertEquals([], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = {'a': {'b': [1, 2, 3]}} self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) self.assertEquals([1, 2, 3], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) self.assertEquals([], f(input, "a/b/c")) input = {'a': [1, 2, {'b': 'b_1'}]} self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) self.assertEquals(['b_1'], f(input, "a/b")) class GenericUtilsTestCase(test.TestCase): def test_hostname_unicode_sanitization(self): hostname = u"\u7684.test.example.com" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_periods(self): hostname = "....test.example.com..." self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_dashes(self): hostname = "----test.example.com---" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_characters(self): hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" self.assertEqual("91----test-host.example.com-0", utils.sanitize_hostname(hostname)) def test_hostname_translate(self): hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" self.assertEqual("hello", utils.sanitize_hostname(hostname)) def test_generate_glance_url(self): generated_url = utils.generate_glance_url() actual_url = "http://%s:%d" % (CONF.glance_host, CONF.glance_port) self.assertEqual(generated_url, actual_url) def test_read_cached_file(self): self.mox.StubOutWithMock(os.path, "getmtime") os.path.getmtime(mox.IgnoreArg()).AndReturn(1) self.mox.ReplayAll() cache_data = {"data": 1123, "mtime": 1} data = utils.read_cached_file("/this/is/a/fake", cache_data) self.assertEqual(cache_data["data"], data) def test_read_modified_cached_file(self): self.mox.StubOutWithMock(os.path, "getmtime") self.mox.StubOutWithMock(__builtin__, 'open') os.path.getmtime(mox.IgnoreArg()).AndReturn(2) fake_contents = "lorem ipsum" fake_file = self.mox.CreateMockAnything() fake_file.read().AndReturn(fake_contents) fake_context_manager = self.mox.CreateMockAnything() fake_context_manager.__enter__().AndReturn(fake_file) fake_context_manager.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) __builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager) self.mox.ReplayAll() cache_data = {"data": 1123, "mtime": 1} self.reload_called = False def test_reload(reloaded_data): self.assertEqual(reloaded_data, fake_contents) self.reload_called = True data = utils.read_cached_file("/this/is/a/fake", cache_data, reload_func=test_reload) self.assertEqual(data, fake_contents) self.assertTrue(self.reload_called) def test_generate_password(self): password = utils.generate_password() self.assertTrue([c for c in password if c in '0123456789']) self.assertTrue([c for c in password if c in 'abcdefghijklmnopqrstuvwxyz']) self.assertTrue([c for c in password if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) def test_read_file_as_root(self): def fake_execute(*args, **kwargs): if args[1] == 'bad': raise exception.ProcessExecutionError return 'fakecontents', None self.stubs.Set(utils, 'execute', fake_execute) contents = utils.read_file_as_root('good') self.assertEqual(contents, 'fakecontents') self.assertRaises(exception.FileNotFound, utils.read_file_as_root, 'bad') def test_strcmp_const_time(self): self.assertTrue(utils.strcmp_const_time('abc123', 'abc123')) self.assertFalse(utils.strcmp_const_time('a', 'aaaaa')) self.assertFalse(utils.strcmp_const_time('ABC123', 'abc123')) def test_temporary_chown(self): def fake_execute(*args, **kwargs): if args[0] == 'chown': fake_execute.uid = args[1] self.stubs.Set(utils, 'execute', fake_execute) with tempfile.NamedTemporaryFile() as f: with utils.temporary_chown(f.name, owner_uid=2): self.assertEqual(fake_execute.uid, 2) self.assertEqual(fake_execute.uid, os.getuid()) def test_service_is_up(self): fts_func = datetime.datetime.fromtimestamp fake_now = 1000 down_time = 5 self.flags(service_down_time=down_time) self.mox.StubOutWithMock(timeutils, 'utcnow') # Up (equal) timeutils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - down_time), 'created_at': fts_func(fake_now - down_time)} self.mox.ReplayAll() result = utils.service_is_up(service) self.assertTrue(result) self.mox.ResetAll() # Up timeutils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - down_time + 1), 'created_at': fts_func(fake_now - down_time + 1)} self.mox.ReplayAll() result = utils.service_is_up(service) self.assertTrue(result) self.mox.ResetAll() # Down timeutils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - down_time - 1), 'created_at': fts_func(fake_now - down_time - 1)} self.mox.ReplayAll() result = utils.service_is_up(service) self.assertFalse(result) def test_safe_parse_xml(self): normal_body = ('' 'heythere') def killer_body(): return ((""" ]> %(d)s """) % { 'a': 'A' * 10, 'b': '&a;' * 10, 'c': '&b;' * 10, 'd': '&c;' * 9999, }).strip() dom = utils.safe_minidom_parse_string(normal_body) # Some versions of minidom inject extra newlines so we ignore them result = str(dom.toxml()).replace('\n', '') self.assertEqual(normal_body, result) self.assertRaises(ValueError, utils.safe_minidom_parse_string, killer_body()) def test_xhtml_escape(self): self.assertEqual('"foo"', utils.xhtml_escape('"foo"')) self.assertEqual(''foo'', utils.xhtml_escape("'foo'")) def test_hash_file(self): data = 'Mary had a little lamb, its fleece as white as snow' flo = StringIO.StringIO(data) h1 = utils.hash_file(flo) h2 = hashlib.sha1(data).hexdigest() self.assertEquals(h1, h2) class MonkeyPatchTestCase(test.TestCase): """Unit test for utils.monkey_patch().""" def setUp(self): super(MonkeyPatchTestCase, self).setUp() self.example_package = 'manila.tests.monkey_patch_example.' self.flags( monkey_patch=True, monkey_patch_modules=[self.example_package + 'example_a' + ':' + self.example_package + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() manila.tests.monkey_patch_example.CALLED_FUNCTION = [] from manila.tests.monkey_patch_example import example_a from manila.tests.monkey_patch_example import example_b self.assertEqual('Example function', example_a.example_function_a()) exampleA = example_a.ExampleClassA() exampleA.example_method() ret_a = exampleA.example_method_add(3, 5) self.assertEqual(ret_a, 8) self.assertEqual('Example function', example_b.example_function_b()) exampleB = example_b.ExampleClassB() exampleB.example_method() ret_b = exampleB.example_method_add(3, 5) self.assertEqual(ret_b, 8) package_a = self.example_package + 'example_a.' self.assertTrue(package_a + 'example_function_a' in manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertTrue(package_a + 'ExampleClassA.example_method' in manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertTrue(package_a + 'ExampleClassA.example_method_add' in manila.tests.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' self.assertFalse(package_b + 'example_function_b' in manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertFalse(package_b + 'ExampleClassB.example_method' in manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertFalse(package_b + 'ExampleClassB.example_method_add' in manila.tests.monkey_patch_example.CALLED_FUNCTION) class AuditPeriodTest(test.TestCase): def setUp(self): super(AuditPeriodTest, self).setUp() #a fairly random time to test with self.test_time = datetime.datetime(second=23, minute=12, hour=8, day=5, month=3, year=2012) self.patcher = mock.patch.object(timeutils, 'utcnow') self.mock_utcnow = self.patcher.start() self.mock_utcnow.return_value = self.test_time def tearDown(self): self.patcher.stop() super(AuditPeriodTest, self).tearDown() def test_hour(self): begin, end = utils.last_completed_audit_period(unit='hour') self.assertEquals(begin, datetime.datetime(hour=7, day=5, month=3, year=2012)) self.assertEquals(end, datetime.datetime(hour=8, day=5, month=3, year=2012)) def test_hour_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='hour@10') self.assertEquals(begin, datetime.datetime(minute=10, hour=7, day=5, month=3, year=2012)) self.assertEquals(end, datetime.datetime(minute=10, hour=8, day=5, month=3, year=2012)) def test_hour_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='hour@30') self.assertEquals(begin, datetime.datetime(minute=30, hour=6, day=5, month=3, year=2012)) self.assertEquals(end, datetime.datetime(minute=30, hour=7, day=5, month=3, year=2012)) def test_day(self): begin, end = utils.last_completed_audit_period(unit='day') self.assertEquals(begin, datetime.datetime(day=4, month=3, year=2012)) self.assertEquals(end, datetime.datetime(day=5, month=3, year=2012)) def test_day_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='day@6') self.assertEquals(begin, datetime.datetime(hour=6, day=4, month=3, year=2012)) self.assertEquals(end, datetime.datetime(hour=6, day=5, month=3, year=2012)) def test_day_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='day@10') self.assertEquals(begin, datetime.datetime(hour=10, day=3, month=3, year=2012)) self.assertEquals(end, datetime.datetime(hour=10, day=4, month=3, year=2012)) def test_month(self): begin, end = utils.last_completed_audit_period(unit='month') self.assertEquals(begin, datetime.datetime(day=1, month=2, year=2012)) self.assertEquals(end, datetime.datetime(day=1, month=3, year=2012)) def test_month_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='month@2') self.assertEquals(begin, datetime.datetime(day=2, month=2, year=2012)) self.assertEquals(end, datetime.datetime(day=2, month=3, year=2012)) def test_month_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='month@15') self.assertEquals(begin, datetime.datetime(day=15, month=1, year=2012)) self.assertEquals(end, datetime.datetime(day=15, month=2, year=2012)) def test_year(self): begin, end = utils.last_completed_audit_period(unit='year') self.assertEquals(begin, datetime.datetime(day=1, month=1, year=2011)) self.assertEquals(end, datetime.datetime(day=1, month=1, year=2012)) def test_year_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='year@2') self.assertEquals(begin, datetime.datetime(day=1, month=2, year=2011)) self.assertEquals(end, datetime.datetime(day=1, month=2, year=2012)) def test_year_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='year@6') self.assertEquals(begin, datetime.datetime(day=1, month=6, year=2010)) self.assertEquals(end, datetime.datetime(day=1, month=6, year=2011)) class FakeSSHClient(object): def __init__(self): self.id = uuid.uuid4() self.transport = FakeTransport() def set_missing_host_key_policy(self, policy): pass def connect(self, ip, port=22, username=None, password=None, pkey=None, timeout=10): pass def get_transport(self): return self.transport def close(self): pass def __call__(self, *args, **kwargs): pass class FakeSock(object): def settimeout(self, timeout): pass class FakeTransport(object): def __init__(self): self.active = True self.sock = FakeSock() def set_keepalive(self, timeout): pass def is_active(self): return self.active class SSHPoolTestCase(test.TestCase): """Unit test for SSH Connection Pool.""" def setup(self): self.mox.StubOutWithMock(paramiko, "SSHClient") paramiko.SSHClient().AndReturn(FakeSSHClient()) self.mox.ReplayAll() def test_single_ssh_connect(self): self.setup() sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id self.assertEqual(first_id, second_id) def test_closed_reopend_ssh_connections(self): self.setup() sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=2) with sshpool.item() as ssh: first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id # Close the connection and test for a new connection ssh.get_transport().active = False self.assertEqual(first_id, second_id) # The mox items are not getting setup in a new pool connection, # so had to reset and set again. self.mox.UnsetStubs() self.setup() with sshpool.item() as ssh: third_id = ssh.id self.assertNotEqual(first_id, third_id) manila-2013.2.dev175.gbf1a399/manila/tests/compute/0000775000175000017500000000000012301410516021566 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/compute/test_nova.py0000664000175000017500000002000412301410454024137 0ustar chuckchuck00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.compute import nova from manila import context from manila import exception from manila import test from manila.volume import cinder from novaclient import exceptions as nova_exception from novaclient.v1_1 import servers as nova_servers class Volume(object): def __init__(self, volume_id): self.id = volume_id self.display_name = volume_id class FakeNovaClient(object): class Servers(object): def get(self, instance_id): return {'id': instance_id} def list(self, *args, **kwargs): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None class Volumes(object): def get(self, volume_id): return Volume(volume_id) def list(self, detailed, *args, **kwargs): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None def __init__(self): self.servers = self.Servers() self.volumes = self.Volumes() self.keypairs = self.servers class NovaApiTestCase(test.TestCase): def setUp(self): super(NovaApiTestCase, self).setUp() self.api = nova.API() self.novaclient = FakeNovaClient() self.ctx = context.get_admin_context() self.stubs.Set(nova, 'novaclient', mock.Mock(return_value=self.novaclient)) self.stubs.Set(nova, '_untranslate_server_summary_view', lambda server: server) def test_server_create(self): result = self.api.server_create(self.ctx, 'server_name', 'fake_image', 'fake_flavor', None, None, None) self.assertEqual(result['id'], 'created_id') def test_server_delete(self): self.stubs.Set(self.novaclient.servers, 'delete', mock.Mock()) self.api.server_delete(self.ctx, 'id1') self.novaclient.servers.delete.assert_called_once_with('id1') def test_server_get(self): instance_id = 'instance_id1' result = self.api.server_get(self.ctx, instance_id) self.assertEqual(result['id'], instance_id) def test_server_get_failed(self): nova.novaclient.side_effect = nova_exception.NotFound(404) instance_id = 'instance_id' self.assertRaises(exception.InstanceNotFound, self.api.server_get, self.ctx, instance_id) def test_server_list(self): self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.server_list(self.ctx)) def test_server_pause(self): self.stubs.Set(self.novaclient.servers, 'pause', mock.Mock()) self.api.server_pause(self.ctx, 'id1') self.novaclient.servers.pause.assert_called_once_with('id1') def test_server_unpause(self): self.stubs.Set(self.novaclient.servers, 'unpause', mock.Mock()) self.api.server_unpause(self.ctx, 'id1') self.novaclient.servers.unpause.assert_called_once_with('id1') def test_server_suspend(self): self.stubs.Set(self.novaclient.servers, 'suspend', mock.Mock()) self.api.server_suspend(self.ctx, 'id1') self.novaclient.servers.suspend.assert_called_once_with('id1') def test_server_resume(self): self.stubs.Set(self.novaclient.servers, 'resume', mock.Mock()) self.api.server_resume(self.ctx, 'id1') self.novaclient.servers.resume.assert_called_once_with('id1') def test_server_reboot_hard(self): self.stubs.Set(self.novaclient.servers, 'reboot', mock.Mock()) self.api.server_reboot(self.ctx, 'id1') self.novaclient.servers.reboot.assert_called_once_with('id1', nova_servers.REBOOT_HARD) def test_server_reboot_soft(self): self.stubs.Set(self.novaclient.servers, 'reboot', mock.Mock()) self.api.server_reboot(self.ctx, 'id1', True) self.novaclient.servers.reboot.assert_called_once_with('id1', nova_servers.REBOOT_SOFT) def test_server_rebuild(self): self.stubs.Set(self.novaclient.servers, 'rebuild', mock.Mock()) self.api.server_rebuild(self.ctx, 'id1', 'fake_image') self.novaclient.servers.rebuild.assert_called_once_with('id1', 'fake_image', None) def test_instance_volume_attach(self): self.stubs.Set(self.novaclient.volumes, 'create_server_volume', mock.Mock()) self.api.instance_volume_attach(self.ctx, 'instance_id', 'vol_id', 'device') self.novaclient.volumes.create_server_volume.\ assert_called_once_with('instance_id', 'vol_id', 'device') def test_instance_volume_detach(self): self.stubs.Set(self.novaclient.volumes, 'delete_server_volume', mock.Mock()) self.api.instance_volume_detach(self.ctx, 'instance_id', 'att_id') self.novaclient.volumes.delete_server_volume.\ assert_called_once_with('instance_id', 'att_id') def test_instance_volumes_list(self): self.stubs.Set(self.novaclient.volumes, 'get_server_volumes', mock.Mock(return_value=[Volume('id1'), Volume('id2')])) self.cinderclient = self.novaclient self.stubs.Set(cinder, 'cinderclient', mock.Mock(return_value=self.novaclient)) result = self.api.instance_volumes_list(self.ctx, 'instance_id') self.assertEqual(len(result), 2) self.assertEqual(result[0].id, 'id1') self.assertEqual(result[1].id, 'id2') def test_server_update(self): self.stubs.Set(self.novaclient.servers, 'update', mock.Mock()) self.api.server_update(self.ctx, 'id1', 'new_name') self.novaclient.servers.update.assert_called_once_with('id1', name='new_name') def test_update_server_volume(self): self.stubs.Set(self.novaclient.volumes, 'update_server_volume', mock.Mock()) self.api.update_server_volume(self.ctx, 'instance_id', 'att_id', 'new_vol_id') self.novaclient.volumes.update_server_volume.\ assert_called_once_with('instance_id', 'att_id', 'new_vol_id') def test_keypair_create(self): self.stubs.Set(self.novaclient.keypairs, 'create', mock.Mock()) self.api.keypair_create(self.ctx, 'keypair_name') self.novaclient.keypairs.create.assert_called_once_with('keypair_name') def test_keypair_import(self): self.stubs.Set(self.novaclient.keypairs, 'create', mock.Mock()) self.api.keypair_import(self.ctx, 'keypair_name', 'fake_pub_key') self.novaclient.keypairs.create.\ assert_called_once_with('keypair_name', 'fake_pub_key') def test_keypair_delete(self): self.stubs.Set(self.novaclient.keypairs, 'delete', mock.Mock()) self.api.keypair_delete(self.ctx, 'fake_keypair_id') self.novaclient.keypairs.delete.\ assert_called_once_with('fake_keypair_id') def test_keypair_list(self): self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.keypair_list(self.ctx)) manila-2013.2.dev175.gbf1a399/manila/tests/compute/__init__.py0000664000175000017500000000000012301410454023666 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/test_quota.py0000664000175000017500000020701412301410454022661 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo.config import cfg from manila import context from manila import db from manila.db.sqlalchemy import api as sqa_api from manila.db.sqlalchemy import models as sqa_models from manila import exception from manila.openstack.common import rpc from manila.openstack.common import timeutils from manila import quota from manila import share from manila import test import manila.tests.image.fake CONF = cfg.CONF class QuotaIntegrationTestCase(test.TestCase): def setUp(self): super(QuotaIntegrationTestCase, self).setUp() self.flags(quota_shares=2, quota_gigabytes=20) self.user_id = 'admin' self.project_id = 'admin' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) orig_rpc_call = rpc.call def rpc_call_wrapper(context, topic, msg, timeout=None): return orig_rpc_call(context, topic, msg) self.stubs.Set(rpc, 'call', rpc_call_wrapper) def tearDown(self): super(QuotaIntegrationTestCase, self).tearDown() manila.tests.image.fake.FakeImageService_reset() def _create_share(self, size=10): """Create a test share.""" share = {} share['user_id'] = self.user_id share['project_id'] = self.project_id share['size'] = size share['status'] = 'available' return db.share_create(self.context, share) def _create_snapshot(self, share): snapshot = {} snapshot['user_id'] = self.user_id snapshot['project_id'] = self.project_id snapshot['share_id'] = share['id'] snapshot['share_size'] = share['size'] snapshot['status'] = 'available' return db.share_snapshot_create(self.context, snapshot) @test.skip_test("SQLAlchemy sqlite insert bug") def test_too_many_shares(self): share_ids = [] for i in range(CONF.quota_shares): share_ref = self._create_share() share_ids.append(share_ref['id']) self.assertRaises(exception.QuotaError, share.API().create, self.context, 'nfs', 10, '', '', None) for share_id in share_ids: db.share_delete(self.context, share_id) @test.skip_test("SQLAlchemy sqlite insert bug") def test_too_many_gigabytes(self): share_ids = [] share_ref = self._create_share(size=20) share_ids.append(share_ref['id']) self.assertRaises(exception.QuotaError, share.API().create, self.context, 'cifs', 10, '', '', None) for share_id in share_ids: db.share_delete(self.context, share_id) class FakeContext(object): def __init__(self, project_id, quota_class): self.is_admin = False self.user_id = 'fake_user' self.project_id = project_id self.quota_class = quota_class self.read_deleted = 'no' def elevated(self): elevated = self.__class__(self.project_id, self.quota_class) elevated.is_admin = True return elevated class FakeDriver(object): def __init__(self, by_project=None, by_class=None, reservations=None): self.called = [] self.by_project = by_project or {} self.by_class = by_class or {} self.reservations = reservations or [] def get_by_project(self, context, project_id, resource): self.called.append(('get_by_project', context, project_id, resource)) try: return self.by_project[project_id][resource] except KeyError: raise exception.ProjectQuotaNotFound(project_id=project_id) def get_by_class(self, context, quota_class, resource): self.called.append(('get_by_class', context, quota_class, resource)) try: return self.by_class[quota_class][resource] except KeyError: raise exception.QuotaClassNotFound(class_name=quota_class) def get_defaults(self, context, resources): self.called.append(('get_defaults', context, resources)) return resources def get_class_quotas(self, context, resources, quota_class, defaults=True): self.called.append(('get_class_quotas', context, resources, quota_class, defaults)) return resources def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): self.called.append(('get_project_quotas', context, resources, project_id, quota_class, defaults, usages, remains)) return resources def limit_check(self, context, resources, values, project_id=None, user_id=None): self.called.append(('limit_check', context, resources, values, project_id, user_id)) def reserve(self, context, resources, deltas, expire=None, project_id=None, user_id=None): self.called.append(('reserve', context, resources, deltas, expire, project_id, user_id)) return self.reservations def commit(self, context, reservations, project_id=None, user_id=None): self.called.append(('commit', context, reservations, project_id, user_id)) def rollback(self, context, reservations, project_id=None, user_id=None): self.called.append(('rollback', context, reservations, project_id, user_id)) def destroy_all_by_project_and_user(self, context, project_id, user_id): self.called.append(('destroy_all_by_project_and_user', context, project_id, user_id)) def destroy_all_by_project(self, context, project_id): self.called.append(('destroy_all_by_project', context, project_id)) def expire(self, context): self.called.append(('expire', context)) class BaseResourceTestCase(test.TestCase): def test_no_flag(self): resource = quota.BaseResource('test_resource') self.assertEqual(resource.name, 'test_resource') self.assertEqual(resource.flag, None) self.assertEqual(resource.default, -1) def test_with_flag(self): # We know this flag exists, so use it... self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') self.assertEqual(resource.name, 'test_resource') self.assertEqual(resource.flag, 'quota_shares') self.assertEqual(resource.default, 10) def test_with_flag_no_quota(self): self.flags(quota_shares=-1) resource = quota.BaseResource('test_resource', 'quota_shares') self.assertEqual(resource.name, 'test_resource') self.assertEqual(resource.flag, 'quota_shares') self.assertEqual(resource.default, -1) def test_quota_no_project_no_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver() context = FakeContext(None, None) quota_value = resource.quota(driver, context) self.assertEqual(quota_value, 10) def test_quota_with_project_no_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver( by_project=dict( test_project=dict(test_resource=15), )) context = FakeContext('test_project', None) quota_value = resource.quota(driver, context) self.assertEqual(quota_value, 15) def test_quota_no_project_with_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver( by_class=dict( test_class=dict(test_resource=20), )) context = FakeContext(None, 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(quota_value, 20) def test_quota_with_project_with_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), ), by_class=dict(test_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(quota_value, 15) def test_quota_override_project_with_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), override_project=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, project_id='override_project') self.assertEqual(quota_value, 20) def test_quota_with_project_override_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver(by_class=dict( test_class=dict(test_resource=15), override_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, quota_class='override_class') self.assertEqual(quota_value, 20) class QuotaEngineTestCase(test.TestCase): def test_init(self): quota_obj = quota.QuotaEngine() self.assertEqual(quota_obj._resources, {}) self.assertTrue(isinstance(quota_obj._driver, quota.DbQuotaDriver)) def test_init_override_string(self): quota_obj = quota.QuotaEngine( quota_driver_class='manila.tests.test_quota.FakeDriver') self.assertEqual(quota_obj._resources, {}) self.assertTrue(isinstance(quota_obj._driver, FakeDriver)) def test_init_override_obj(self): quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver) self.assertEqual(quota_obj._resources, {}) self.assertEqual(quota_obj._driver, FakeDriver) def test_register_resource(self): quota_obj = quota.QuotaEngine() resource = quota.AbsoluteResource('test_resource') quota_obj.register_resource(resource) self.assertEqual(quota_obj._resources, dict(test_resource=resource)) def test_register_resources(self): quota_obj = quota.QuotaEngine() resources = [ quota.AbsoluteResource('test_resource1'), quota.AbsoluteResource('test_resource2'), quota.AbsoluteResource('test_resource3'), ] quota_obj.register_resources(resources) self.assertEqual(quota_obj._resources, dict(test_resource1=resources[0], test_resource2=resources[1], test_resource3=resources[2], )) def test_sync_predeclared(self): quota_obj = quota.QuotaEngine() def spam(*args, **kwargs): pass resource = quota.ReservableResource('test_resource', spam) quota_obj.register_resource(resource) self.assertEqual(resource.sync, spam) def test_sync_multi(self): quota_obj = quota.QuotaEngine() def spam(*args, **kwargs): pass resources = [ quota.ReservableResource('test_resource1', spam), quota.ReservableResource('test_resource2', spam), quota.ReservableResource('test_resource3', spam), quota.ReservableResource('test_resource4', spam), ] quota_obj.register_resources(resources[:2]) self.assertEqual(resources[0].sync, spam) self.assertEqual(resources[1].sync, spam) self.assertEqual(resources[2].sync, spam) self.assertEqual(resources[3].sync, spam) def test_get_by_project(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver( by_project=dict( test_project=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_project(context, 'test_project', 'test_resource') self.assertEqual(driver.called, [('get_by_project', context, 'test_project', 'test_resource'), ]) self.assertEqual(result, 42) def test_get_by_class(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver( by_class=dict( test_class=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_class(context, 'test_class', 'test_resource') self.assertEqual(driver.called, [('get_by_class', context, 'test_class', 'test_resource'), ]) self.assertEqual(result, 42) def _make_quota_obj(self, driver): quota_obj = quota.QuotaEngine(quota_driver_class=driver) resources = [ quota.AbsoluteResource('test_resource4'), quota.AbsoluteResource('test_resource3'), quota.AbsoluteResource('test_resource2'), quota.AbsoluteResource('test_resource1'), ] quota_obj.register_resources(resources) return quota_obj def test_get_defaults(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result = quota_obj.get_defaults(context) self.assertEqual(driver.called, [('get_defaults', context, quota_obj._resources), ]) self.assertEqual(result, quota_obj._resources) def test_get_class_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_class_quotas(context, 'test_class') result2 = quota_obj.get_class_quotas(context, 'test_class', False) self.assertEqual(driver.called, [ ('get_class_quotas', context, quota_obj._resources, 'test_class', True), ('get_class_quotas', context, quota_obj._resources, 'test_class', False), ]) self.assertEqual(result1, quota_obj._resources) self.assertEqual(result2, quota_obj._resources) def test_get_project_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_project_quotas(context, 'test_project') result2 = quota_obj.get_project_quotas(context, 'test_project', quota_class='test_class', defaults=False, usages=False) self.assertEqual(driver.called, [ ('get_project_quotas', context, quota_obj._resources, 'test_project', None, True, True, False), ('get_project_quotas', context, quota_obj._resources, 'test_project', 'test_class', False, False, False), ]) self.assertEqual(result1, quota_obj._resources) self.assertEqual(result2, quota_obj._resources) def test_count_no_resource(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) self.assertRaises(exception.QuotaResourceUnknown, quota_obj.count, context, 'test_resource5', True, foo='bar') def test_count_wrong_resource(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) self.assertRaises(exception.QuotaResourceUnknown, quota_obj.count, context, 'test_resource1', True, foo='bar') def test_count(self): def fake_count(context, *args, **kwargs): self.assertEqual(args, (True,)) self.assertEqual(kwargs, dict(foo='bar')) return 5 context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.register_resource(quota.CountableResource('test_resource5', fake_count)) result = quota_obj.count(context, 'test_resource5', True, foo='bar') self.assertEqual(result, 5) def test_limit_check(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.limit_check(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) self.assertEqual(driver.called, [ ('limit_check', context, quota_obj._resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1,), None, None), ]) def test_reserve(self): context = FakeContext(None, None) driver = FakeDriver(reservations=['resv-01', 'resv-02', 'resv-03', 'resv-04', ]) quota_obj = self._make_quota_obj(driver) result1 = quota_obj.reserve(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) result2 = quota_obj.reserve(context, expire=3600, test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) result3 = quota_obj.reserve(context, project_id='fake_project', test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) self.assertEqual(driver.called, [ ('reserve', context, quota_obj._resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1, ), None, None, None), ('reserve', context, quota_obj._resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), 3600, None, None), ('reserve', context, quota_obj._resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), None, 'fake_project', None), ]) self.assertEqual(result1, ['resv-01', 'resv-02', 'resv-03', 'resv-04', ]) self.assertEqual(result2, ['resv-01', 'resv-02', 'resv-03', 'resv-04', ]) self.assertEqual(result3, ['resv-01', 'resv-02', 'resv-03', 'resv-04', ]) def test_commit(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual(driver.called, [('commit', context, ['resv-01', 'resv-02', 'resv-03'], None, None), ]) def test_rollback(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual(driver.called, [('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None, None), ]) def test_destroy_all_by_project_and_user(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.destroy_all_by_project_and_user(context, 'test_project', 'fake_user') self.assertEqual(driver.called, [ ('destroy_all_by_project_and_user', context, 'test_project', 'fake_user'), ]) def test_destroy_all_by_project(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.destroy_all_by_project(context, 'test_project') self.assertEqual(driver.called, [('destroy_all_by_project', context, 'test_project'), ]) def test_expire(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.expire(context) self.assertEqual(driver.called, [('expire', context), ]) def test_resources(self): quota_obj = self._make_quota_obj(None) self.assertEqual(quota_obj.resources, ['test_resource1', 'test_resource2', 'test_resource3', 'test_resource4']) class DbQuotaDriverTestCase(test.TestCase): def setUp(self): super(DbQuotaDriverTestCase, self).setUp() self.flags(quota_shares=10, quota_snapshots=10, quota_gigabytes=1000, reservation_expire=86400, until_refresh=0, max_age=0, ) self.driver = quota.DbQuotaDriver() self.calls = [] self.patcher = mock.patch.object(timeutils, 'utcnow') self.mock_utcnow = self.patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def tearDown(self): self.patcher.stop() super(DbQuotaDriverTestCase, self).tearDown() def test_get_defaults(self): # Use our pre-defined resources result = self.driver.get_defaults(None, quota.QUOTAS._resources) self.assertEqual( result, dict( shares=10, gigabytes=1000, snapshots=10)) def _stub_quota_class_get_all_by_name(self): # Stub out quota_class_get_all_by_name def fake_qcgabn(context, quota_class): self.calls.append('quota_class_get_all_by_name') self.assertEqual(quota_class, 'test_class') return dict(gigabytes=500, shares=10, ) self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn) def test_get_class_quotas(self): self._stub_quota_class_get_all_by_name() result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, 'test_class') self.assertEqual(self.calls, ['quota_class_get_all_by_name']) self.assertEqual(result, dict(shares=10, gigabytes=500, snapshots=10)) def test_get_class_quotas_no_defaults(self): self._stub_quota_class_get_all_by_name() result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, 'test_class', False) self.assertEqual(self.calls, ['quota_class_get_all_by_name']) self.assertEqual(result, dict(shares=10, gigabytes=500)) def _stub_get_by_project_and_user(self): def fake_qgabpu(context, project_id, user_id): self.calls.append('quota_get_all_by_project_and_user') self.assertEqual(project_id, 'test_project') self.assertEqual(user_id, 'fake_user') return dict(shares=10, gigabytes=50, reserved=0) def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual(project_id, 'test_project') return dict(shares=10, gigabytes=50, reserved=0) def fake_qugabpu(context, project_id, user_id): self.calls.append('quota_usage_get_all_by_project_and_user') self.assertEqual(project_id, 'test_project') self.assertEqual(user_id, 'fake_user') return dict(shares=dict(in_use=2, reserved=0), gigabytes=dict(in_use=10, reserved=0), ) self.stubs.Set(db, 'quota_get_all_by_project_and_user', fake_qgabpu) self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp) self.stubs.Set(db, 'quota_usage_get_all_by_project_and_user', fake_qugabpu) self._stub_quota_class_get_all_by_name() def test_get_user_quotas(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user') self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict(shares=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), snapshots=dict(limit=10, in_use=0, reserved=0, ), )) def _stub_get_by_project(self): def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual(project_id, 'test_project') return dict(shares=10, gigabytes=50, reserved=0) def fake_qugabp(context, project_id): self.calls.append('quota_usage_get_all_by_project') self.assertEqual(project_id, 'test_project') return dict(shares=dict(in_use=2, reserved=0), gigabytes=dict(in_use=10, reserved=0), ) self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp) self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp) self._stub_quota_class_get_all_by_name() def test_get_project_quotas(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project') self.assertEqual(self.calls, ['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict(shares=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), snapshots=dict(limit=10, in_use=0, reserved=0, ), )) def test_get_user_quotas_alt_context_no_class(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('other_project', None), quota.QUOTAS._resources, 'test_project', 'fake_user') self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', ]) self.assertEqual(result, dict(shares=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), snapshots=dict(limit=10, in_use=0, reserved=0, ), )) def test_get_project_quotas_alt_context_no_class(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('other_project', None), quota.QUOTAS._resources, 'test_project') self.assertEqual(self.calls, ['quota_get_all_by_project', 'quota_usage_get_all_by_project', ]) self.assertEqual(result, dict(shares=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), snapshots=dict(limit=10, in_use=0, reserved=0, ), )) def test_get_user_quotas_alt_context_with_class(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', quota_class='test_class') self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict(shares=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), snapshots=dict(limit=10, in_use=0, reserved=0, ), )) def test_get_project_quotas_alt_context_with_class(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS._resources, 'test_project', quota_class='test_class') self.assertEqual(self.calls, ['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict(shares=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), snapshots=dict(limit=10, in_use=0, reserved=0, ), )) def test_get_user_quotas_no_defaults(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', defaults=False) self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict(gigabytes=dict(limit=50, in_use=10, reserved=0, ), shares=dict(limit=10, in_use=2, reserved=0, ), )) def test_get_project_quotas_no_defaults(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', defaults=False) self.assertEqual(self.calls, ['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict(gigabytes=dict(limit=50, in_use=10, reserved=0, ), shares=dict(limit=10, in_use=2, reserved=0, ), )) def test_get_user_quotas_no_usages(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False) self.assertEqual(self.calls, [ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict(shares=dict(limit=10, ), gigabytes=dict(limit=50, ), snapshots=dict(limit=10))) def test_get_project_quotas_no_usages(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', usages=False) self.assertEqual(self.calls, ['quota_get_all_by_project', 'quota_class_get_all_by_name', ]) self.assertEqual(result, dict(shares=dict(limit=10, ), gigabytes=dict(limit=50, ), snapshots=dict(limit=10))) def _stub_get_settable_quotas(self): def fake_get_project_quotas(context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): self.calls.append('get_project_quotas') result = {} for k, v in resources.items(): remains = v.default in_use = 0 result[k] = {'limit': v.default, 'in_use': in_use, 'reserved': 0, 'remains': remains} return result def fake_get_user_quotas(context, resources, project_id, user_id, quota_class=None, defaults=True, usages=True): self.calls.append('get_user_quotas') result = {} for k, v in resources.items(): in_use = 0 result[k] = {'limit': v.default, 'in_use': in_use, 'reserved': 0} return result def fake_qgabpau(context, project_id, user_id): self.calls.append('quota_get_all_by_project_and_user') return {'shares': 2} self.stubs.Set(self.driver, 'get_project_quotas', fake_get_project_quotas) self.stubs.Set(self.driver, 'get_user_quotas', fake_get_user_quotas) self.stubs.Set(db, 'quota_get_all_by_project_and_user', fake_qgabpau) def test_get_settable_quotas_with_user(self): self._stub_get_settable_quotas() result = self.driver.get_settable_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', user_id='test_user') self.assertEqual(self.calls, [ 'get_project_quotas', 'get_user_quotas', 'quota_get_all_by_project_and_user', ]) self.assertEqual(result, dict(shares=dict(maximum=12, minimum=0, ), gigabytes=dict(maximum=1000, minimum=0, ), snapshots=dict(maximum=10, minimum=0, ), )) def test_get_settable_quotas_without_user(self): self._stub_get_settable_quotas() result = self.driver.get_settable_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project') self.assertEqual(self.calls, [ 'get_project_quotas', ]) self.assertEqual(result, dict(shares=dict(maximum=-1, minimum=0, ), gigabytes=dict(maximum=-1, minimum=0, ), snapshots=dict(maximum=-1, minimum=0, ), )) def _stub_get_project_quotas(self): def fake_get_project_quotas(context, resources, project_id, quota_class=None, defaults=True, usages=True): self.calls.append('get_project_quotas') return dict((k, dict(limit=v.default)) for k, v in resources.items()) self.stubs.Set(self.driver, 'get_project_quotas', fake_get_project_quotas) def test_get_quotas_has_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['unknown'], True) self.assertEqual(self.calls, []) def test_get_quotas_no_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['unknown'], False) self.assertEqual(self.calls, []) def test_get_quotas_has_sync_no_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['metadata_items'], True) self.assertEqual(self.calls, []) def test_get_quotas_no_sync_has_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['shares'], False) self.assertEqual(self.calls, []) def test_get_quotas_has_sync(self): self._stub_get_project_quotas() result = self.driver._get_quotas(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, ['shares', 'gigabytes'], True) self.assertEqual(self.calls, ['get_project_quotas']) self.assertEqual(result, dict(shares=10, gigabytes=1000, )) def _stub_quota_reserve(self): def fake_quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): self.calls.append(('quota_reserve', expire, until_refresh, max_age)) return ['resv-1', 'resv-2', 'resv-3'] self.stubs.Set(db, 'quota_reserve', fake_quota_reserve) def test_reserve_bad_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.assertRaises(exception.InvalidReservationExpiration, self.driver.reserve, FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire='invalid') self.assertEqual(self.calls, []) def test_reserve_default_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2)) expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) self.assertEqual(self.calls, ['get_project_quotas', ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_int_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=3600) expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.assertEqual(self.calls, ['get_project_quotas', ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_timedelta_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() expire_delta = datetime.timedelta(seconds=60) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=expire_delta) expire = timeutils.utcnow() + expire_delta self.assertEqual(self.calls, ['get_project_quotas', ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_datetime_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=expire) self.assertEqual(self.calls, ['get_project_quotas', ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_until_refresh(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.flags(until_refresh=500) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=expire) self.assertEqual(self.calls, ['get_project_quotas', ('quota_reserve', expire, 500, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_max_age(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.flags(max_age=86400) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=expire) self.assertEqual(self.calls, ['get_project_quotas', ('quota_reserve', expire, 0, 86400), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def _stub_quota_delete_all_by_project(self): def fake_quota_delete_all_by_project(context, project_id): self.calls.append(('quota_destroy_all_by_project', project_id)) return None self.stubs.Set(sqa_api, 'quota_destroy_all_by_project', fake_quota_delete_all_by_project) def test_delete_by_project(self): self._stub_quota_delete_all_by_project() self.driver.destroy_all_by_project(FakeContext('test_project', 'test_class'), 'test_project') self.assertEqual(self.calls, [('quota_destroy_all_by_project', ('test_project')), ]) class FakeSession(object): def begin(self): return self def add(self, instance): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): return False class FakeUsage(sqa_models.QuotaUsage): def save(self, *args, **kwargs): pass class QuotaReserveSqlAlchemyTestCase(test.TestCase): # manila.db.sqlalchemy.api.quota_reserve is so complex it needs its # own test case, and since it's a quota manipulator, this is the # best place to put it... def setUp(self): super(QuotaReserveSqlAlchemyTestCase, self).setUp() self.sync_called = set() def make_sync(res_name): def sync(context, project_id, user_id, session): self.sync_called.add(res_name) if res_name in self.usages: if self.usages[res_name].in_use < 0: return {res_name: 2} else: return {res_name: self.usages[res_name].in_use - 1} return {res_name: 0} return sync self.resources = {} for res_name in ('shares', 'gigabytes'): method_name = '_sync_%s' % res_name sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name) res = quota.ReservableResource(res_name, '_sync_%s' % res_name) self.resources[res_name] = res self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.usages = {} self.usages_created = {} self.reservations_created = {} def fake_get_session(): return FakeSession() def fake_get_project_quota_usages(context, session, project_id): return self.usages.copy() def fake_get_user_quota_usages(context, session, project_id, user_id): return self.usages.copy() def fake_quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh, session=None, save=True): quota_usage_ref = self._make_quota_usage( project_id, user_id, resource, in_use, reserved, until_refresh, timeutils.utcnow(), timeutils.utcnow()) self.usages_created[resource] = quota_usage_ref return quota_usage_ref def fake_reservation_create(context, uuid, usage_id, project_id, user_id, resource, delta, expire, session=None): reservation_ref = self._make_reservation( uuid, usage_id, project_id, user_id, resource, delta, expire, timeutils.utcnow(), timeutils.utcnow()) self.reservations_created[resource] = reservation_ref return reservation_ref self.stubs.Set(sqa_api, 'get_session', fake_get_session) self.stubs.Set(sqa_api, '_get_project_quota_usages', fake_get_project_quota_usages) self.stubs.Set(sqa_api, '_get_user_quota_usages', fake_get_user_quota_usages) self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create) self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create) self.patcher = mock.patch.object(timeutils, 'utcnow') self.mock_utcnow = self.patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def tearDown(self): self.patcher.stop() super(QuotaReserveSqlAlchemyTestCase, self).tearDown() def _make_quota_usage(self, project_id, user_id, resource, in_use, reserved, until_refresh, created_at, updated_at): quota_usage_ref = FakeUsage() quota_usage_ref.id = len(self.usages) + len(self.usages_created) quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.created_at = created_at quota_usage_ref.updated_at = updated_at quota_usage_ref.deleted_at = None quota_usage_ref.deleted = False return quota_usage_ref def init_usage(self, project_id, user_id, resource, in_use, reserved, until_refresh=None, created_at=None, updated_at=None): if created_at is None: created_at = timeutils.utcnow() if updated_at is None: updated_at = timeutils.utcnow() quota_usage_ref = self._make_quota_usage(project_id, user_id, resource, in_use, reserved, until_refresh, created_at, updated_at) self.usages[resource] = quota_usage_ref def compare_usage(self, usage_dict, expected): for usage in expected: resource = usage['resource'] for key, value in usage.items(): actual = getattr(usage_dict[resource], key) self.assertEqual(actual, value, "%s != %s on usage for resource %s" % (actual, value, resource)) def _make_reservation(self, uuid, usage_id, project_id, user_id, resource, delta, expire, created_at, updated_at): reservation_ref = sqa_models.Reservation() reservation_ref.id = len(self.reservations_created) reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.created_at = created_at reservation_ref.updated_at = updated_at reservation_ref.deleted_at = None reservation_ref.deleted = False return reservation_ref def compare_reservation(self, reservations, expected): reservations = set(reservations) for resv in expected: resource = resv['resource'] resv_obj = self.reservations_created[resource] self.assertIn(resv_obj.uuid, reservations) reservations.discard(resv_obj.uuid) for key, value in resv.items(): actual = getattr(resv_obj, key) self.assertEqual(actual, value, "%s != %s on reservation for resource %s" % (actual, value, resource)) self.assertEqual(len(reservations), 0) def test_quota_reserve_create_usages(self): context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set(['shares', 'gigabytes'])) self.compare_usage(self.usages_created, [dict(resource='shares', project_id='test_project', in_use=0, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=0, reserved=2 * 1024, until_refresh=None), ]) self.compare_reservation( result, [dict(resource='shares', usage_id=self.usages_created['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages_created['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_negative_in_use(self): self.init_usage('test_project', 'test_user', 'shares', -1, 0, until_refresh=1) self.init_usage('test_project', 'test_user', 'gigabytes', -1, 0, until_refresh=1) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 5, 0) self.assertEqual(self.sync_called, set(['shares', 'gigabytes'])) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=2, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=5), ]) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_until_refresh(self): self.init_usage('test_project', 'test_user', 'shares', 3, 0, until_refresh=1) self.init_usage('test_project', 'test_user', 'gigabytes', 3, 0, until_refresh=1) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 5, 0) self.assertEqual(self.sync_called, set(['shares', 'gigabytes'])) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=2, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=5), ]) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_max_age(self): max_age = 3600 record_created = (timeutils.utcnow() - datetime.timedelta(seconds=max_age)) self.init_usage('test_project', 'test_user', 'shares', 3, 0, created_at=record_created, updated_at=record_created) self.init_usage('test_project', 'test_user', 'gigabytes', 3, 0, created_at=record_created, updated_at=record_created) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, max_age) self.assertEqual(self.sync_called, set(['shares', 'gigabytes'])) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=2, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_no_refresh(self): self.init_usage('test_project', 'test_user', 'shares', 3, 0) self.init_usage('test_project', 'test_user', 'gigabytes', 3, 0) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=3, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=3, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_unders(self): self.init_usage('test_project', 'test_user', 'shares', 1, 0) self.init_usage('test_project', 'test_user', 'gigabytes', 1 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=-2, gigabytes=-2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=1, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=1 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=-2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=-2 * 1024), ]) def test_quota_reserve_overs(self): self.init_usage('test_project', 'test_user', 'shares', 4, 0) self.init_usage('test_project', 'test_user', 'gigabytes', 10 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) self.assertRaises(exception.OverQuota, sqa_api.quota_reserve, context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=4, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=10 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual(self.usages_created, {}) self.assertEqual(self.reservations_created, {}) def test_quota_reserve_reduction(self): self.init_usage('test_project', 'test_user', 'shares', 10, 0) self.init_usage('test_project', 'test_user', 'gigabytes', 20 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=-2, gigabytes=-2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=10, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=20 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual(self.usages_created, {}) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=-2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], project_id='test_project', delta=-2 * 1024), ]) manila-2013.2.dev175.gbf1a399/manila/tests/__init__.py0000664000175000017500000000526212301410454022231 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`manila.tests` -- Manila Unittests ===================================================== .. automodule:: manila.tests :platform: Unix .. moduleauthor:: Jesse Andrews .. moduleauthor:: Devin Carlen .. moduleauthor:: Vishvananda Ishaya .. moduleauthor:: Joshua McKenty .. moduleauthor:: Manish Singh .. moduleauthor:: Andy Smith """ import eventlet eventlet.monkey_patch() from oslo.config import cfg # See http://code.google.com/p/python-nose/issues/detail?id=373 # The code below enables nosetests to work with i18n _() blocks import __builtin__ setattr(__builtin__, '_', lambda x: x) import os import shutil from manila.db.sqlalchemy.session import get_engine CONF = cfg.CONF _DB = None def reset_db(): if CONF.sql_connection == "sqlite://": engine = get_engine() engine.dispose() conn = engine.connect() conn.connection.executescript(_DB) else: shutil.copyfile(os.path.join(CONF.state_path, CONF.sqlite_clean_db), os.path.join(CONF.state_path, CONF.sqlite_db)) def setup(): import mox # Fail fast if you don't have mox. Workaround for bug 810424 from manila.db import migration from manila.tests import conf_fixture conf_fixture.set_defaults(CONF) if CONF.sql_connection == "sqlite://": if migration.db_version() > 1: return else: testdb = os.path.join(CONF.state_path, CONF.sqlite_db) if os.path.exists(testdb): return migration.db_sync() if CONF.sql_connection == "sqlite://": global _DB engine = get_engine() conn = engine.connect() _DB = "".join(line for line in conn.connection.iterdump()) else: cleandb = os.path.join(CONF.state_path, CONF.sqlite_clean_db) shutil.copyfile(testdb, cleandb) manila-2013.2.dev175.gbf1a399/manila/tests/db/0000775000175000017500000000000012301410516020477 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/db/__init__.py0000664000175000017500000000133512301410454022613 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`db` -- Stubs for DB API ============================= """ manila-2013.2.dev175.gbf1a399/manila/tests/db/fakes.py0000664000175000017500000000262412301410454022147 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite.""" from manila import db class FakeModel(object): """Stubs out for model.""" def __init__(self, values): self.values = values def __getattr__(self, name): return self.values[name] def __getitem__(self, key): if key in self.values: return self.values[key] else: raise NotImplementedError() def __repr__(self): return '' % self.values def stub_out(stubs, funcs): """Set the stubs in mapping in the db api.""" for func in funcs: func_name = '_'.join(func.__name__.split('_')[1:]) stubs.Set(db, func_name, func) manila-2013.2.dev175.gbf1a399/manila/tests/test_wsgi.py0000664000175000017500000002063012301410454022476 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for `manila.wsgi`.""" import os.path import ssl import tempfile import unittest import urllib2 from oslo.config import cfg import webob import webob.dec from manila.api.middleware import fault from manila import exception from manila import test from manila import utils import manila.wsgi CONF = cfg.CONF TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'var')) class TestLoaderNothingExists(test.TestCase): """Loader tests where os.path.exists always returns False.""" def setUp(self): super(TestLoaderNothingExists, self).setUp() self.stubs.Set(os.path, 'exists', lambda _: False) def test_config_not_found(self): self.assertRaises( manila.exception.ConfigNotFound, manila.wsgi.Loader, ) class TestLoaderNormalFilesystem(unittest.TestCase): """Loader tests with normal filesystem (unmodified os.path module).""" _paste_config = """ [app:test_app] use = egg:Paste#static document_root = /tmp """ def setUp(self): self.config = tempfile.NamedTemporaryFile(mode="w+t") self.config.write(self._paste_config.lstrip()) self.config.seek(0) self.config.flush() self.loader = manila.wsgi.Loader(self.config.name) def test_config_found(self): self.assertEquals(self.config.name, self.loader.config_path) def test_app_not_found(self): self.assertRaises( manila.exception.PasteAppNotFound, self.loader.load_app, "non-existent app", ) def test_app_found(self): url_parser = self.loader.load_app("test_app") self.assertEquals("/tmp", url_parser.directory) def tearDown(self): self.config.close() class TestWSGIServer(unittest.TestCase): """WSGI server tests.""" def _ipv6_configured(): try: out, err = utils.execute('cat', '/proc/net/if_inet6') except exception.ProcessExecutionError: return False if not out: return False return True def test_no_app(self): server = manila.wsgi.Server("test_app", None) self.assertEquals("test_app", server.name) def test_start_random_port(self): server = manila.wsgi.Server("test_random_port", None, host="127.0.0.1") self.assertEqual(0, server.port) server.start() self.assertNotEqual(0, server.port) server.stop() server.wait() @test.skip_if(not _ipv6_configured(), "Test requires an IPV6 configured interface") def test_start_random_port_with_ipv6(self): server = manila.wsgi.Server("test_random_port", None, host="::1") server.start() self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() def test_app(self): greetings = 'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = manila.wsgi.Server("test_app", hello_world) server.start() response = urllib2.urlopen('http://127.0.0.1:%d/' % server.port) self.assertEquals(greetings, response.read()) server.stop() def test_app_using_ssl(self): CONF.set_default("ssl_cert_file", os.path.join(TEST_VAR_DIR, 'certificate.crt')) CONF.set_default("ssl_key_file", os.path.join(TEST_VAR_DIR, 'privatekey.key')) greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = manila.wsgi.Server("test_app", hello_world) server.start() response = urllib2.urlopen('https://127.0.0.1:%d/' % server.port) self.assertEquals(greetings, response.read()) server.stop() @test.skip_if(not _ipv6_configured(), "Test requires an IPV6 configured interface") def test_app_using_ipv6_and_ssl(self): CONF.set_default("ssl_cert_file", os.path.join(TEST_VAR_DIR, 'certificate.crt')) CONF.set_default("ssl_key_file", os.path.join(TEST_VAR_DIR, 'privatekey.key')) greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = manila.wsgi.Server("test_app", hello_world, host="::1", port=0) server.start() response = urllib2.urlopen('https://[::1]:%d/' % server.port) self.assertEquals(greetings, response.read()) server.stop() class ExceptionTest(test.TestCase): def _wsgi_app(self, inner_app): return fault.FaultWrapper(inner_app) def _do_test_exception_safety_reflected_in_faults(self, expose): class ExceptionWithSafety(exception.ManilaException): safe = expose @webob.dec.wsgify def fail(req): raise ExceptionWithSafety('some explanation') api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertTrue('{"computeFault' in resp.body, resp.body) expected = ('ExceptionWithSafety: some explanation' if expose else 'The server has either erred or is incapable ' 'of performing the requested operation.') self.assertTrue(expected in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) def test_safe_exceptions_are_described_in_faults(self): self._do_test_exception_safety_reflected_in_faults(True) def test_unsafe_exceptions_are_not_described_in_faults(self): self._do_test_exception_safety_reflected_in_faults(False) def _do_test_exception_mapping(self, exception_type, msg): @webob.dec.wsgify def fail(req): raise exception_type(msg) api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertTrue(msg in resp.body, resp.body) self.assertEqual(resp.status_int, exception_type.code, resp.body) if hasattr(exception_type, 'headers'): for (key, value) in exception_type.headers.iteritems(): self.assertTrue(key in resp.headers) self.assertEquals(resp.headers[key], value) def test_quota_error_mapping(self): self._do_test_exception_mapping(exception.QuotaError, 'too many used') def test_non_manila_notfound_exception_mapping(self): class ExceptionWithCode(Exception): code = 404 self._do_test_exception_mapping(ExceptionWithCode, 'NotFound') def test_non_manila_exception_mapping(self): class ExceptionWithCode(Exception): code = 417 self._do_test_exception_mapping(ExceptionWithCode, 'Expectation failed') def test_exception_with_none_code_throws_500(self): class ExceptionWithNoneCode(Exception): code = None msg = 'Internal Server Error' @webob.dec.wsgify def fail(req): raise ExceptionWithNoneCode() api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertEqual(500, resp.status_int) manila-2013.2.dev175.gbf1a399/manila/tests/monkey_patch_example/0000775000175000017500000000000012301410516024306 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/monkey_patch_example/example_b.py0000664000175000017500000000166712301410454026627 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module B for testing utils.monkey_patch().""" def example_function_b(): return 'Example function' class ExampleClassB(): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 manila-2013.2.dev175.gbf1a399/manila/tests/monkey_patch_example/example_a.py0000664000175000017500000000166612301410454026625 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module A for testing utils.monkey_patch().""" def example_function_a(): return 'Example function' class ExampleClassA(): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 manila-2013.2.dev175.gbf1a399/manila/tests/monkey_patch_example/__init__.py0000664000175000017500000000217712301410454026427 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module for testing utils.monkey_patch().""" CALLED_FUNCTION = [] def example_decorator(name, function): """decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): CALLED_FUNCTION.append(name) return function(*args, **kwarg) return wrapped_func manila-2013.2.dev175.gbf1a399/manila/tests/test_share_api.py0000664000175000017500000006312712301410454023470 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Share API module.""" import datetime import random import uuid import mock import mox import suds from manila import context from manila import db as db_driver from manila import exception from manila.openstack.common import timeutils from manila import quota from manila.scheduler import rpcapi as scheduler_rpcapi from manila import share from manila.share import api as share_api from manila.share import rpcapi as share_rpcapi from manila import test from manila.tests.db import fakes as db_fakes def fake_share(id, **kwargs): share = { 'id': id, 'size': 1, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'snapshot_id': None, 'share_network_id': None, 'availability_zone': 'fakeaz', 'status': 'fakestatus', 'display_name': 'fakename', 'metadata': None, 'display_description': 'fakedesc', 'share_proto': 'nfs', 'export_location': 'fake_location', 'host': 'fakehost', 'scheduled_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'launched_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'terminated_at': datetime.datetime(1, 1, 1, 1, 1, 1) } share.update(kwargs) return share def fake_snapshot(id, **kwargs): snapshot = { 'id': id, 'share_size': 1, 'size': 1, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'share_id': None, 'availability_zone': 'fakeaz', 'status': 'fakestatus', 'display_name': 'fakename', 'display_description': 'fakedesc', 'share_proto': 'nfs', 'export_location': 'fake_location', 'progress': 'fakeprogress99%', 'scheduled_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'launched_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'terminated_at': datetime.datetime(1, 1, 1, 1, 1, 1) } snapshot.update(kwargs) return snapshot def fake_access(id, **kwargs): access = { 'id': id, 'share_id': 'fakeshareid', 'access_type': 'fakeacctype', 'access_to': 'fakeaccto', 'state': 'fakeactive', 'STATE_NEW': 'fakenew', 'STATE_ACTIVE': 'fakeactive', 'STATE_DELETING': 'fakedeleting', 'STATE_DELETED': 'fakedeleted', 'STATE_ERROR': 'fakeerror', } access.update(kwargs) return db_fakes.FakeModel(access) class ShareAPITestCase(test.TestCase): def setUp(self): super(ShareAPITestCase, self).setUp() self.context = context.get_admin_context() self.scheduler_rpcapi = self.mox.CreateMock( scheduler_rpcapi.SchedulerAPI) self.share_rpcapi = self.mox.CreateMock(share_rpcapi.ShareAPI) self.api = share.API() self.stubs.Set(self.api, 'scheduler_rpcapi', self.scheduler_rpcapi) self.stubs.Set(self.api, 'share_rpcapi', self.share_rpcapi) self.stubs.Set(quota.QUOTAS, 'reserve', lambda *args, **kwargs: None) self.patcher = mock.patch.object(timeutils, 'utcnow') self.mock_utcnow = self.patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def tearDown(self): self.patcher.stop() super(ShareAPITestCase, self).tearDown() def test_create(self): date = datetime.datetime(1, 1, 1, 1, 1, 1) self.mock_utcnow.return_value = date share = fake_share('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status='creating') options = share.copy() for name in ('id', 'export_location', 'host', 'launched_at', 'terminated_at'): options.pop(name, None) request_spec = {'share_properties': options, 'share_proto': share['share_proto'], 'share_id': share['id'], 'snapshot_id': share['snapshot_id'], } self.mox.StubOutWithMock(db_driver, 'share_create') db_driver.share_create(self.context, options).AndReturn(share) self.scheduler_rpcapi.create_share(self.context, mox.IgnoreArg(), share['id'], share['snapshot_id'], request_spec=request_spec, filter_properties={}) self.mox.ReplayAll() self.api.create(self.context, 'nfs', '1', 'fakename', 'fakedesc', availability_zone='fakeaz') def test_create_snapshot(self): date = datetime.datetime(1, 1, 1, 1, 1, 1) self.mock_utcnow.return_value = date share = fake_share('fakeid', status='available') snapshot = fake_snapshot('fakesnapshotid', share_id=share['id'], status='creating') fake_name = 'fakename' fake_desc = 'fakedesc' options = {'share_id': share['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': "creating", 'progress': '0%', 'share_size': share['size'], 'size': 1, 'display_name': fake_name, 'display_description': fake_desc, 'share_proto': share['share_proto'], 'export_location': share['export_location']} self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'create_snapshot', share) self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') quota.QUOTAS.reserve(self.context, snapshots=1, gigabytes=1).\ AndReturn('reservation') self.mox.StubOutWithMock(db_driver, 'share_snapshot_create') db_driver.share_snapshot_create(self.context, options).AndReturn(snapshot) self.mox.StubOutWithMock(quota.QUOTAS, 'commit') quota.QUOTAS.commit(self.context, 'reservation') self.share_rpcapi.create_snapshot(self.context, share, snapshot) self.mox.ReplayAll() self.api.create_snapshot(self.context, share, fake_name, fake_desc) def test_delete_snapshot(self): date = datetime.datetime(1, 1, 1, 1, 1, 1) self.mock_utcnow.return_value = date share = fake_share('fakeid') snapshot = fake_snapshot('fakesnapshotid', share_id=share['id'], status='available') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy( self.context, 'share', 'delete_snapshot', snapshot) self.mox.StubOutWithMock(db_driver, 'share_snapshot_update') db_driver.share_snapshot_update(self.context, snapshot['id'], {'status': 'deleting'}) self.mox.StubOutWithMock(db_driver, 'share_get') db_driver.share_get(self.context, snapshot['share_id']).AndReturn(share) self.share_rpcapi.delete_snapshot(self.context, snapshot, share['host']) self.mox.ReplayAll() self.api.delete_snapshot(self.context, snapshot) def test_delete_snapshot_wrong_status(self): snapshot = fake_snapshot('fakesnapshotid', share_id='fakeshareid', status='creating') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy( self.context, 'share', 'delete_snapshot', snapshot) self.mox.ReplayAll() self.assertRaises(exception.InvalidShareSnapshot, self.api.delete_snapshot, self.context, snapshot) def test_create_snapshot_if_share_not_available(self): share = fake_share('fakeid', status='error') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'create_snapshot', share) self.mox.ReplayAll() self.assertRaises(exception.InvalidShare, self.api.create_snapshot, self.context, share, 'fakename', 'fakedesc') def test_create_from_snapshot_available(self): date = datetime.datetime(1, 1, 1, 1, 1, 1) self.mock_utcnow.return_value = date snapshot = fake_snapshot('fakesnapshotid', share_id='fakeshare_id', status='available') share = fake_share('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, snapshot_id=snapshot['id'], status='creating') options = share.copy() for name in ('id', 'export_location', 'host', 'launched_at', 'terminated_at'): options.pop(name, None) request_spec = {'share_properties': options, 'share_proto': share['share_proto'], 'share_id': share['id'], 'snapshot_id': share['snapshot_id'], } self.mox.StubOutWithMock(db_driver, 'share_create') db_driver.share_create(self.context, options).AndReturn(share) self.scheduler_rpcapi.create_share(self.context, mox.IgnoreArg(), share['id'], share['snapshot_id'], request_spec=request_spec, filter_properties={}) self.mox.ReplayAll() self.api.create(self.context, 'nfs', '1', 'fakename', 'fakedesc', snapshot=snapshot, availability_zone='fakeaz') def test_get_snapshot(self): fake_get_snap = {'fake_key': 'fake_val'} self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'get_snapshot') self.mox.StubOutWithMock(db_driver, 'share_snapshot_get') db_driver.share_snapshot_get(self.context, 'fakeid').AndReturn(fake_get_snap) self.mox.ReplayAll() rule = self.api.get_snapshot(self.context, 'fakeid') self.assertEqual(rule, fake_get_snap) def test_create_from_snapshot_not_available(self): snapshot = fake_snapshot('fakesnapshotid', share_id='fakeshare_id', status='error') self.mox.ReplayAll() self.assertRaises(exception.InvalidShareSnapshot, self.api.create, self.context, 'nfs', '1', 'fakename', 'fakedesc', snapshot=snapshot, availability_zone='fakeaz') def test_create_from_snapshot_larger_size(self): snapshot = fake_snapshot(1, size=100, status='available') self.mox.ReplayAll() self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 1, 'fakename', 'fakedesc', availability_zone='fakeaz', snapshot=snapshot) def test_create_wrong_size_0(self): self.mox.ReplayAll() self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 0, 'fakename', 'fakedesc', availability_zone='fakeaz') def test_create_wrong_size_some(self): self.mox.ReplayAll() self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 'some', 'fakename', 'fakedesc', availability_zone='fakeaz') def test_delete_available(self): date = datetime.datetime(2, 2, 2, 2, 2, 2) self.mock_utcnow.return_value = date share = fake_share('fakeid', status='available') options = {'status': 'deleting', 'terminated_at': date} deleting_share = share.copy() deleting_share.update(options) self.mox.StubOutWithMock(db_driver, 'share_update') db_driver.share_update(self.context, share['id'], options).\ AndReturn(deleting_share) self.share_rpcapi.delete_share(self.context, deleting_share) self.mox.ReplayAll() self.api.delete(self.context, share) self.mox.UnsetStubs() self.mox.VerifyAll() def test_delete_error(self): date = datetime.datetime(2, 2, 2, 2, 2, 2) self.mock_utcnow.return_value = date share = fake_share('fakeid', status='error') options = {'status': 'deleting', 'terminated_at': date} deleting_share = share.copy() deleting_share.update(options) self.mox.StubOutWithMock(db_driver, 'share_update') db_driver.share_update(self.context, share['id'], options).\ AndReturn(deleting_share) self.share_rpcapi.delete_share(self.context, deleting_share) self.mox.ReplayAll() self.api.delete(self.context, share) self.mox.UnsetStubs() self.mox.VerifyAll() def test_delete_wrong_status(self): share = fake_share('fakeid') self.mox.ReplayAll() self.assertRaises(exception.InvalidShare, self.api.delete, self.context, share) def test_delete_no_host(self): share = fake_share('fakeid') share['host'] = None self.mox.StubOutWithMock(db_driver, 'share_delete') db_driver.share_delete(mox.IsA(context.RequestContext), 'fakeid') self.mox.ReplayAll() self.api.delete(self.context, share) def test_get(self): self.mox.StubOutWithMock(db_driver, 'share_get') db_driver.share_get(self.context, 'fakeid').AndReturn('fakeshare') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'get', 'fakeshare') self.mox.ReplayAll() result = self.api.get(self.context, 'fakeid') self.assertEqual(result, 'fakeshare') def test_get_all_admin_not_all_tenants(self): ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=True) self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(ctx, 'share', 'get_all') self.mox.StubOutWithMock(db_driver, 'share_get_all_by_project') db_driver.share_get_all_by_project(ctx, 'fakepid') self.mox.ReplayAll() self.api.get_all(ctx) def test_get_all_admin_all_tenants(self): self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'get_all') self.mox.StubOutWithMock(db_driver, 'share_get_all') db_driver.share_get_all(self.context) self.mox.ReplayAll() self.api.get_all(self.context, search_opts={'all_tenants': 1}) def test_get_all_not_admin(self): ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=False) self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(ctx, 'share', 'get_all') self.mox.StubOutWithMock(db_driver, 'share_get_all_by_project') db_driver.share_get_all_by_project(ctx, 'fakepid') self.mox.ReplayAll() self.api.get_all(ctx) def test_get_all_not_admin_search_opts(self): search_opts = {'size': 'fakesize'} fake_objs = [{'name': 'fakename1'}, search_opts] ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=False) self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(ctx, 'share', 'get_all') self.mox.StubOutWithMock(db_driver, 'share_get_all_by_project') db_driver.share_get_all_by_project(ctx, 'fakepid').AndReturn(fake_objs) self.mox.ReplayAll() result = self.api.get_all(ctx, search_opts) self.assertEqual([search_opts], result) def test_get_all_snapshots_admin_not_all_tenants(self): ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=True) self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(ctx, 'share', 'get_all_snapshots') self.mox.StubOutWithMock(db_driver, 'share_snapshot_get_all_by_project') db_driver.share_snapshot_get_all_by_project(ctx, 'fakepid') self.mox.ReplayAll() self.api.get_all_snapshots(ctx) def test_get_all_snapshots_admin_all_tenants(self): self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'get_all_snapshots') self.mox.StubOutWithMock(db_driver, 'share_snapshot_get_all') db_driver.share_snapshot_get_all(self.context) self.mox.ReplayAll() self.api.get_all_snapshots(self.context, search_opts={'all_tenants': 1}) def test_get_all_snapshots_not_admin(self): ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=False) self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(ctx, 'share', 'get_all_snapshots') self.mox.StubOutWithMock(db_driver, 'share_snapshot_get_all_by_project') db_driver.share_snapshot_get_all_by_project(ctx, 'fakepid') self.mox.ReplayAll() self.api.get_all_snapshots(ctx) def test_get_all_snapshots_not_admin_search_opts(self): search_opts = {'size': 'fakesize'} fake_objs = [{'name': 'fakename1'}, search_opts] ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=False) self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(ctx, 'share', 'get_all_snapshots') self.mox.StubOutWithMock(db_driver, 'share_snapshot_get_all_by_project') db_driver.share_snapshot_get_all_by_project(ctx, 'fakepid').\ AndReturn(fake_objs) self.mox.ReplayAll() result = self.api.get_all_snapshots(ctx, search_opts) self.assertEqual([search_opts], result) def test_allow_access(self): share = fake_share('fakeid', status='available') values = {'share_id': share['id'], 'access_type': 'fakeacctype', 'access_to': 'fakeaccto'} self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'allow_access') self.mox.StubOutWithMock(db_driver, 'share_access_create') db_driver.share_access_create(self.context, values).\ AndReturn('fakeacc') self.share_rpcapi.allow_access(self.context, share, 'fakeacc') self.mox.ReplayAll() access = self.api.allow_access(self.context, share, 'fakeacctype', 'fakeaccto') self.assertEqual(access, 'fakeacc') def test_allow_access_status_not_available(self): share = fake_share('fakeid', status='error') self.mox.ReplayAll() self.assertRaises(exception.InvalidShare, self.api.allow_access, self.context, share, 'fakeacctype', 'fakeaccto') def test_allow_access_no_host(self): share = fake_share('fakeid', host=None) self.mox.ReplayAll() self.assertRaises(exception.InvalidShare, self.api.allow_access, self.context, share, 'fakeacctype', 'fakeaccto') def test_deny_access_error(self): share = fake_share('fakeid', status='available') access = fake_access('fakaccid', state='fakeerror') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'deny_access') self.mox.StubOutWithMock(db_driver, 'share_access_delete') db_driver.share_access_delete(self.context, access['id']) self.mox.ReplayAll() self.api.deny_access(self.context, share, access) def test_deny_access_active(self): share = fake_share('fakeid', status='available') access = fake_access('fakaccid', state='fakeactive') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'deny_access') self.mox.StubOutWithMock(db_driver, 'share_access_update') db_driver.share_access_update(self.context, access['id'], {'state': 'fakedeleting'}) self.share_rpcapi.deny_access(self.context, share, access) self.mox.ReplayAll() self.api.deny_access(self.context, share, access) def test_deny_access_not_active_not_error(self): share = fake_share('fakeid', status='available') access = fake_access('fakaccid', state='fakenew') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'deny_access') self.mox.ReplayAll() self.assertRaises(exception.InvalidShareAccess, self.api.deny_access, self.context, share, access) def test_deny_access_status_not_available(self): share = fake_share('fakeid', status='error') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'deny_access') self.mox.ReplayAll() self.assertRaises(exception.InvalidShare, self.api.deny_access, self.context, share, 'fakeacc') def test_deny_access_no_host(self): share = fake_share('fakeid', host=None) self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'deny_access') self.mox.ReplayAll() self.assertRaises(exception.InvalidShare, self.api.deny_access, self.context, share, 'fakeacc') def test_access_get(self): self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'access_get') self.mox.StubOutWithMock(db_driver, 'share_access_get') db_driver.share_access_get(self.context, 'fakeid').AndReturn('fake') self.mox.ReplayAll() rule = self.api.access_get(self.context, 'fakeid') self.assertEqual(rule, 'fake') def test_access_get_all(self): share = fake_share('fakeid') self.mox.StubOutWithMock(share_api.policy, 'check_policy') share_api.policy.check_policy(self.context, 'share', 'access_get_all') self.mox.StubOutWithMock(db_driver, 'share_access_get_all_for_share') db_driver.share_access_get_all_for_share(self.context, 'fakeid').\ AndReturn([fake_access('fakeacc0id', state='fakenew'), fake_access('fakeacc1id', state='fakeerror')]) self.mox.ReplayAll() rules = self.api.access_get_all(self.context, share) self.assertEqual(rules, [{'id': 'fakeacc0id', 'access_type': 'fakeacctype', 'access_to': 'fakeaccto', 'state': 'fakenew'}, {'id': 'fakeacc1id', 'access_type': 'fakeacctype', 'access_to': 'fakeaccto', 'state': 'fakeerror'}]) def test_share_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} share_id = str(uuid.uuid4()) db_driver.share_create(self.context, {'id': share_id, 'metadata': metadata}) self.assertEqual(metadata, db_driver.share_metadata_get(self.context, share_id)) def test_share_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} share_id = str(uuid.uuid4()) db_driver.share_create(self.context, {'id': share_id, 'metadata': metadata1}) db_driver.share_metadata_update(self.context, share_id, metadata2, False) self.assertEqual(should_be, db_driver.share_metadata_get(self.context, share_id)) def test_share_metadata_update_delete(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '4'} should_be = metadata2 share_id = str(uuid.uuid4()) db_driver.share_create(self.context, {'id': share_id, 'metadata': metadata1}) db_driver.share_metadata_update(self.context, share_id, metadata2, True) self.assertEqual(should_be, db_driver.share_metadata_get(self.context, share_id)) manila-2013.2.dev175.gbf1a399/manila/tests/fake_network.py0000664000175000017500000001347512301410454023156 0ustar chuckchuck00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # vim: tabstop=4 shiftwidth=4 softtabstop=4 from oslo.config import cfg from manila.openstack.common import log as logging from manila.openstack.common import uuidutils CONF = cfg.CONF LOG = logging.getLogger(__name__) class FakeNetwork(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_net_id') self.name = kwargs.pop('name', 'net_name') self.subnets = kwargs.pop('subnets', []) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeSubnet(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_subnet_id') self.network_id = kwargs.pop('network_id', 'fake_net_id') self.cidr = kwargs.pop('cidr', 'fake_cidr') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakePort(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_subnet_id') self.network_id = kwargs.pop('network_id', 'fake_net_id') self.fixed_ips = kwargs.pop('fixed_ips', []) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeRouter(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_router_id') self.name = kwargs.pop('name', 'fake_router_name') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) def __setitem__(self, attr, value): setattr(self, attr, value) class API(object): """Fake Network API""" admin_tenant_id = 'fake admin tenant id' network = { "status": "ACTIVE", "subnets": ["fake_subnet_id"], "name": "fake_network", "tenant_id": "fake_tenant_id", "shared": False, "id": "fake_id", "router:external": False, } port = { "status": "ACTIVE", "allowed_address_pairs": [], "admin_state_up": True, "network_id": "fake_network_id", "tenant_id": "fake_tenant_id", "extra_dhcp_opts": [], "device_owner": "fake", "binding:capabilities": {"port_filter": True}, "mac_address": "00:00:00:00:00:00", "fixed_ips": [ {"subnet_id": "56537094-98d7-430a-b513-81c4dc6d9903", "ip_address": "10.12.12.10"} ], "id": "fake_port_id", "security_groups": ["fake_sec_group_id"], "device_id": "fake_device_id" } def get_all_tenant_networks(self, tenant_id): net1 = self.network.copy() net1['tenant_id'] = tenant_id net1['id'] = uuidutils.generate_uuid() net2 = self.network.copy() net2['tenant_id'] = tenant_id net2['id'] = uuidutils.generate_uuid() return [net1, net2] def create_port(self, tenant_id, network_id, subnet_id=None, fixed_ip=None, device_owner=None, device_id=None): port = self.port.copy() port['network_id'] = network_id port['admin_state_up'] = True port['tenant_id'] = tenant_id if fixed_ip: fixed_ip_dict = {'ip_address': fixed_ip} if subnet_id: fixed_ip_dict.update({'subnet_id': subnet_id}) port['fixed_ips'] = [fixed_ip_dict] if device_owner: port['device_owner'] = device_owner if device_id: port['device_id'] = device_id return port def list_ports(self, **search_opts): """List ports for the client based on search options.""" ports = [] for i in range(2): ports.append(self.port.copy()) for port in ports: port['id'] = uuidutils.generate_uuid() for key, val in search_opts.items(): port[key] = val if 'id' in search_opts: return ports return ports def show_port(self, port_id): """Return the port for the client given the port id.""" port = self.port.copy() port['id'] = port_id return port def delete_port(self, port_id): pass def get_subnet(self, subnet_id): pass def subnet_create(self, *args, **kwargs): pass def router_add_interface(self, *args, **kwargs): pass def show_router(self, *args, **kwargs): pass def update_port_fixed_ips(self, *args, **kwargs): pass def get_all_networks(self): """Get all networks for client.""" net1 = self.network.copy() net2 = self.network.copy() net1['id'] = uuidutils.generate_uuid() net2['id'] = uuidutils.generate_uuid() return [net1, net2] def get_network(self, network_uuid): """Get specific network for client.""" network = self.network.copy() network['id'] = network_uuid return network def network_create(self, tenant_id, name): network = self.network.copy() network['tenant_id'] = tenant_id network['name'] = name return network manila-2013.2.dev175.gbf1a399/manila/tests/network/0000775000175000017500000000000012301410516021603 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/network/neutron/0000775000175000017500000000000012301410516023275 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/network/neutron/test_neutron_api.py0000664000175000017500000004766312301410454027252 0ustar chuckchuck00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # vim: tabstop=4 shiftwidth=4 softtabstop=4 import mock from neutronclient.common import exceptions as neutron_client_exc from neutronclient.v2_0 import client as clientv20 from oslo.config import cfg import unittest from manila import context from manila.db import base from manila import exception from manila.network import neutron from manila.network.neutron import api as neutron_api from manila.network.neutron import constants as neutron_constants from manila.tests.db import fakes CONF = cfg.CONF class FakeNeutronClient(object): def create_port(self, body): return body def delete_port(self, port_id): pass def show_port(self, port_id): pass def list_ports(self, **search_opts): pass def list_networks(self): pass def show_network(self, network_uuid): pass def show_subnet(self, subnet_uuid): pass def create_router(self, body): return body def list_routers(self): pass def create_network(self, body): return body def create_subnet(self, body): return body def update_port(self, port_id, body): return body def add_interface_router(self, router_id, subnet_id, port_id): pass def update_router(self, router_id, body): return body def show_router(self, router_id): pass def list_extensions(self): pass class NeutronApiTest(unittest.TestCase): def setUp(self): super(NeutronApiTest, self).setUp() self._create_neutron_api() @mock.patch.object(base, 'Base', fakes.FakeModel) @mock.patch.object(context, 'get_admin_context', mock.Mock(return_value='context')) @mock.patch.object(neutron, 'get_client', mock.Mock(return_value=FakeNeutronClient())) def _create_neutron_api(self): self.neutron_api = neutron_api.API() @mock.patch.object(base, 'Base', fakes.FakeModel) @mock.patch.object(context, 'get_admin_context', mock.Mock(return_value='context')) @mock.patch.object(neutron, 'get_client', mock.Mock()) def test_create_api_object(self): with mock.patch.object(base.Base, '__init__', mock.Mock()): neutron_api_obj = neutron_api.API() base.Base.__init__.assert_called_once() neutron.get_client.assert_called_once_with('context') def test_create_port_with_all_args(self): port_args = {'tenant_id': 'test tenant', 'network_id': 'test net', 'host_id': 'test host', 'subnet_id': 'test subnet', 'fixed_ip': 'test ip', 'device_owner': 'test owner', 'device_id': 'test device', 'mac_address': 'test mac', 'security_group_ids': 'test group', 'dhcp_opts': 'test dhcp'} with mock.patch.object(self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)): port = self.neutron_api.create_port(**port_args) self.assertEqual(port['tenant_id'], port_args['tenant_id']) self.assertEqual(port['network_id'], port_args['network_id']) self.assertEqual(port['binding:host_id'], port_args['host_id']) self.assertEqual(port['fixed_ips'][0]['subnet_id'], port_args['subnet_id']) self.assertEqual(port['fixed_ips'][0]['ip_address'], port_args['fixed_ip']) self.assertEqual(port['device_owner'], port_args['device_owner']) self.assertEqual(port['device_id'], port_args['device_id']) self.assertEqual(port['mac_address'], port_args['mac_address']) self.assertEqual(port['security_groups'], port_args['security_group_ids']) self.assertEqual(port['extra_dhcp_opts'], port_args['dhcp_opts']) def test_create_port_with_required_args(self): port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} with mock.patch.object(self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)): port = self.neutron_api.create_port(**port_args) self.assertEqual(port['tenant_id'], port_args['tenant_id']) self.assertEqual(port['network_id'], port_args['network_id']) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_port_exception(self): port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} client_create_port_mock = mock.Mock(side_effect= neutron_client_exc.NeutronClientException) with mock.patch.object(self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)): with mock.patch.object(self.neutron_api.client, 'create_port', client_create_port_mock): self.assertRaises(exception.NetworkException, self.neutron_api.create_port, **port_args) neutron_api.LOG.exception.assert_called_once() @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_port_exception_status_409(self): port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} client_create_port_mock = mock.Mock(side_effect= neutron_client_exc.NeutronClientException(status_code=409)) with mock.patch.object(self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)): with mock.patch.object(self.neutron_api.client, 'create_port', client_create_port_mock): self.assertRaises(exception.PortLimitExceeded, self.neutron_api.create_port, **port_args) neutron_api.LOG.exception.assert_called_once() def test_delete_port(self): port_id = 'test port id' with mock.patch.object(self.neutron_api.client, 'delete_port', mock.Mock()) as client_delete_port_mock: self.neutron_api.delete_port(port_id) client_delete_port_mock.assert_called_once_with(port_id) def test_list_ports(self): search_opts = {'test_option': 'test_value'} fake_ports = [{'fake port': 'fake port info'}] client_list_ports_mock = mock.Mock(return_value={'ports': fake_ports}) with mock.patch.object(self.neutron_api.client, 'list_ports', client_list_ports_mock): ports = self.neutron_api.list_ports(**search_opts) client_list_ports_mock.assert_called_once_with(**search_opts) self.assertEqual(ports, fake_ports) def test_show_port(self): port_id = 'test port id' fake_port = {'fake port': 'fake port info'} client_show_port_mock = mock.Mock(return_value={'port': fake_port}) with mock.patch.object(self.neutron_api.client, 'show_port', client_show_port_mock): port = self.neutron_api.show_port(port_id) client_show_port_mock.assert_called_once_with(port_id) self.assertEqual(port, fake_port) def test_get_network(self): network_id = 'test network id' fake_network = {'fake network': 'fake network info'} client_show_network_mock = mock.Mock( return_value={'network': fake_network}) with mock.patch.object(self.neutron_api.client, 'show_network', client_show_network_mock): network = self.neutron_api.get_network(network_id) client_show_network_mock.assert_called_once_with(network_id) self.assertEqual(network, fake_network) def test_get_subnet(self): subnet_id = 'fake subnet id' with mock.patch.object(self.neutron_api.client, 'show_subnet', mock.Mock(return_value={'subnet': {}})): subnet = self.neutron_api.get_subnet(subnet_id) self.neutron_api.client.show_subnet.assert_called_once_with( subnet_id) self.assertEqual(subnet, {}) def test_get_all_network(self): fake_networks = [{'fake network': 'fake network info'}] client_list_networks_mock = mock.Mock( return_value={'networks': fake_networks}) with mock.patch.object(self.neutron_api.client, 'list_networks', client_list_networks_mock): networks = self.neutron_api.get_all_networks() client_list_networks_mock.assert_any_call() self.assertEqual(networks, fake_networks) def test_list_extensions(self): extensions = [{'name': neutron_constants.PORTBINDING_EXT}, {'name': neutron_constants.PROVIDER_NW_EXT}] with mock.patch.object( self.neutron_api.client, 'list_extensions', mock.Mock(return_value={'extensions': extensions})): result = self.neutron_api.list_extensions() self.neutron_api.client.list_extensions.assert_any_call() self.assertTrue(neutron_constants.PORTBINDING_EXT in result) self.assertTrue(neutron_constants.PROVIDER_NW_EXT in result) self.assertEqual(result[neutron_constants.PORTBINDING_EXT], extensions[0]) self.assertEqual(result[neutron_constants.PROVIDER_NW_EXT], extensions[1]) def test_create_network(self): net_args = {'tenant_id': 'test tenant', 'name': 'test name'} network = self.neutron_api.network_create(**net_args) self.assertEqual(network['tenant_id'], net_args['tenant_id']) self.assertEqual(network['name'], net_args['name']) def test_create_subnet(self): subnet_args = {'tenant_id': 'test tenant', 'name': 'test name', 'net_id': 'test net id', 'cidr': '10.0.0.0/24'} subnet = self.neutron_api.subnet_create(**subnet_args) self.assertEqual(subnet['tenant_id'], subnet_args['tenant_id']) self.assertEqual(subnet['name'], subnet_args['name']) def test_create_router(self): router_args = {'tenant_id': 'test tenant', 'name': 'test name'} router = self.neutron_api.router_create(**router_args) self.assertEqual(router['tenant_id'], router_args['tenant_id']) self.assertEqual(router['name'], router_args['name']) def test_list_routers(self): fake_routers = [{'fake router': 'fake router info'}] client_list_routers_mock = mock.Mock( return_value={'routers': fake_routers}) with mock.patch.object(self.neutron_api.client, 'list_routers', client_list_routers_mock): networks = self.neutron_api.router_list() client_list_routers_mock.assert_any_call() self.assertEqual(networks, fake_routers) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_network_exception(self): net_args = {'tenant_id': 'test tenant', 'name': 'test name'} client_create_network_mock = mock.Mock(side_effect= neutron_client_exc.NeutronClientException) with mock.patch.object(self.neutron_api.client, 'create_network', client_create_network_mock): self.assertRaises(exception.NetworkException, self.neutron_api.network_create, **net_args) neutron_api.LOG.exception.assert_called_once() @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_subnet_exception(self): subnet_args = {'tenant_id': 'test tenant', 'name': 'test name', 'net_id': 'test net id', 'cidr': '10.0.0.0/24'} client_create_subnet_mock = mock.Mock(side_effect= neutron_client_exc.NeutronClientException) with mock.patch.object(self.neutron_api.client, 'create_subnet', client_create_subnet_mock): self.assertRaises(exception.NetworkException, self.neutron_api.subnet_create, **subnet_args) neutron_api.LOG.exception.assert_called_once() @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_router_exception(self): router_args = {'tenant_id': 'test tenant', 'name': 'test name'} client_create_router_mock = mock.Mock(side_effect= neutron_client_exc.NeutronClientException) with mock.patch.object(self.neutron_api.client, 'create_router', client_create_router_mock): self.assertRaises(exception.NetworkException, self.neutron_api.router_create, **router_args) neutron_api.LOG.exception.assert_called_once() def test_update_port_fixed_ips(self): port_id = 'test_port' fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]} port = self.neutron_api.update_port_fixed_ips(port_id, fixed_ips) self.assertEqual(port, fixed_ips) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_update_port_fixed_ips_exception(self): port_id = 'test_port' fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]} client_update_port_mock = mock.Mock(side_effect= neutron_client_exc.NeutronClientException) with mock.patch.object(self.neutron_api.client, 'update_port', client_update_port_mock): self.assertRaises(exception.NetworkException, self.neutron_api.update_port_fixed_ips, port_id, fixed_ips) neutron_api.LOG.exception.assert_called_once() def test_router_update_routes(self): router_id = 'test_router' routes = {'routes': [{'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8'}]} router = self.neutron_api.router_update_routes(router_id, routes) self.assertEqual(router, routes) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_router_update_routes_exception(self): router_id = 'test_router' routes = {'routes': [{'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8'}]} client_update_router_mock = mock.Mock(side_effect= neutron_client_exc.NeutronClientException) with mock.patch.object(self.neutron_api.client, 'update_router', client_update_router_mock): self.assertRaises(exception.NetworkException, self.neutron_api.router_update_routes, router_id, routes) neutron_api.LOG.exception.assert_called_once() def test_show_router(self): router_id = 'test router id' fake_router = {'fake router': 'fake router info'} client_show_router_mock = mock.Mock(return_value={'router': fake_router}) with mock.patch.object(self.neutron_api.client, 'show_router', client_show_router_mock): port = self.neutron_api.show_router(router_id) client_show_router_mock.assert_called_once_with(router_id) self.assertEqual(port, fake_router) def test_router_add_interface(self): router_id = 'test port id' subnet_id = 'test subnet id' port_id = 'test port id' with mock.patch.object(self.neutron_api.client, 'add_interface_router', mock.Mock()) as client_add_interface_router_mock: self.neutron_api.router_add_interface(router_id, subnet_id, port_id) client_add_interface_router_mock.assert_called_once_with( port_id, {'subnet_id': subnet_id, 'port_id': port_id}) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_router_add_interface_exception(self): router_id = 'test port id' subnet_id = 'test subnet id' port_id = 'test port id' client_add_interface_router_mock = mock.Mock(side_effect= neutron_client_exc.NeutronClientException) with mock.patch.object(self.neutron_api.client, 'add_interface_router', client_add_interface_router_mock): self.assertRaises(exception.NetworkException, self.neutron_api.router_add_interface, router_id, subnet_id, port_id) neutron_api.LOG.exception.assert_called_once() class TestNeutronClient(unittest.TestCase): @mock.patch.object(clientv20.Client, '__init__', mock.Mock(return_value=None)) def test_get_client_with_token(self): client_args = {'endpoint_url': CONF.neutron_url, 'timeout': CONF.neutron_url_timeout, 'insecure': CONF.neutron_api_insecure, 'ca_cert': CONF.neutron_ca_certificates_file, 'token': 'test_token', 'auth_strategy': None} my_context = context.RequestContext('test_user', 'test_tenant', auth_token='test_token', is_admin=False) neutron.get_client(my_context) clientv20.Client.__init__.assert_called_once_with(**client_args) @mock.patch.object(clientv20.Client, '__init__', mock.Mock(return_value=None)) def test_get_client_no_token(self): my_context = context.RequestContext('test_user', 'test_tenant', is_admin=False) self.assertRaises(neutron_client_exc.Unauthorized, neutron.get_client, my_context) @mock.patch.object(clientv20.Client, '__init__', mock.Mock(return_value=None)) def test_get_client_admin_context(self): client_args = {'endpoint_url': CONF.neutron_url, 'timeout': CONF.neutron_url_timeout, 'insecure': CONF.neutron_api_insecure, 'ca_cert': CONF.neutron_ca_certificates_file, 'username': CONF.neutron_admin_username, 'tenant_name': CONF.neutron_admin_tenant_name, 'password': CONF.neutron_admin_password, 'auth_url': CONF.neutron_admin_auth_url, 'auth_strategy': CONF.neutron_auth_strategy} my_context = context.RequestContext('test_user', 'test_tenant', is_admin=True) neutron.get_client(my_context) clientv20.Client.__init__.assert_called_once_with(**client_args) manila-2013.2.dev175.gbf1a399/manila/tests/network/neutron/__init__.py0000664000175000017500000000000012301410454025375 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/network/neutron/test_neutron_plugin.py0000664000175000017500000003172212301410454027764 0ustar chuckchuck00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import unittest from manila.common import constants from manila import context from manila.db import api as db_api from manila import exception from manila.network.neutron import constants as neutron_constants from manila.network.neutron import neutron_network_plugin as plugin fake_neutron_port = { "status": "test_port_status", "allowed_address_pairs": [], "admin_state_up": True, "network_id": "test_net_id", "tenant_id": "fake_tenant_id", "extra_dhcp_opts": [], "device_owner": "test", "binding:capabilities": {"port_filter": True}, "mac_address": "test_mac", "fixed_ips": [ {"subnet_id": "test_subnet_id", "ip_address": "test_ip"} ], "id": "test_port_id", "security_groups": ["fake_sec_group_id"], "device_id": "fake_device_id" } fake_share_network = {'id': 'fake nw info id', 'neutron_subnet_id': 'fake subnet id', 'neutron_net_id': 'fake net id', 'project_id': 'fake project id', 'status': 'test_subnet_status', 'name': 'fake name', 'description': 'fake description', 'network_allocations': [], 'security_services': [], 'shares': []} fake_network_allocation = \ {'id': fake_neutron_port['id'], 'share_network_id': fake_share_network['id'], 'ip_address': fake_neutron_port['fixed_ips'][0]['ip_address'], 'mac_address': fake_neutron_port['mac_address'], 'status': constants.STATUS_ACTIVE} class NeutronNetworkPluginTest(unittest.TestCase): def __init__(self, *args, **kwargs): super(NeutronNetworkPluginTest, self).__init__(*args, **kwargs) self.plugin = plugin.NeutronNetworkPlugin() self.plugin.db = db_api self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_update', mock.Mock(return_value=fake_share_network)) def test_allocate_network_one_allocation(self): has_provider_nw_ext = mock.patch.object(self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() with mock.patch.object(self.plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.plugin.allocate_network( self.fake_context, fake_share_network, allocation_info={'count': 1}) has_provider_nw_ext.assert_any_call() save_nw_data.assert_called_once_with(self.fake_context, fake_share_network) save_subnet_data.assert_called_once_with(self.fake_context, fake_share_network) self.plugin.neutron_api.create_port.assert_called_once_with( fake_share_network['project_id'], network_id=fake_share_network['neutron_net_id'], subnet_id=fake_share_network['neutron_subnet_id'], device_owner='manila:share') db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation) db_api.share_network_update.assert_called_once_with( self.fake_context, fake_share_network['id'], {'status': constants.STATUS_ACTIVE}) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_update', mock.Mock(return_value=fake_share_network)) def test_allocate_network_two_allocation(self): has_provider_nw_ext = mock.patch.object(self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() with mock.patch.object(self.plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.plugin.allocate_network( self.fake_context, fake_share_network, count=2) neutron_api_calls = [ mock.call(fake_share_network['project_id'], network_id=fake_share_network['neutron_net_id'], subnet_id=fake_share_network['neutron_subnet_id'], device_owner='manila:share'), mock.call(fake_share_network['project_id'], network_id=fake_share_network['neutron_net_id'], subnet_id=fake_share_network['neutron_subnet_id'], device_owner='manila:share'), ] db_api_calls = [ mock.call(self.fake_context, fake_network_allocation), mock.call(self.fake_context, fake_network_allocation) ] self.plugin.neutron_api.create_port.assert_has_calls( neutron_api_calls) db_api.network_allocation_create.assert_has_calls(db_api_calls) db_api.share_network_update.assert_called_once_with( self.fake_context, fake_share_network['id'], {'status': constants.STATUS_ACTIVE}) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_allocate_network_create_port_exception(self): has_provider_nw_ext = mock.patch.object(self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() create_port = mock.patch.object(self.plugin.neutron_api, 'create_port').start() create_port.side_effect = exception.NetworkException self.assertRaises(exception.NetworkException, self.plugin.allocate_network, self.fake_context, fake_share_network) db_api.share_network_update.assert_called_once_with( self.fake_context, fake_share_network['id'], {'status': constants.STATUS_ERROR}) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() create_port.stop() @mock.patch.object(db_api, 'network_allocation_delete', mock.Mock()) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_deallocate_network_nominal(self): share_nw = {'id': fake_share_network['id']} share_nw['network_allocations'] = [fake_network_allocation] with mock.patch.object(self.plugin.neutron_api, 'delete_port', mock.Mock()): self.plugin.deallocate_network(self.fake_context, share_nw) self.plugin.neutron_api.delete_port.assert_called_once_with( fake_network_allocation['id']) db_api.network_allocation_delete.assert_called_once_with( self.fake_context, fake_network_allocation['id']) @mock.patch.object(db_api, 'share_network_update', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'network_allocation_update', mock.Mock()) def test_deallocate_network_neutron_api_exception(self): share_nw = {'id': fake_share_network['id']} share_nw['network_allocations'] = [fake_network_allocation] delete_port = mock.patch.object(self.plugin.neutron_api, 'delete_port').start() delete_port.side_effect = exception.NetworkException self.assertRaises(exception.NetworkException, self.plugin.deallocate_network, self.fake_context, share_nw) db_api.network_allocation_update.assert_called_once_with( self.fake_context, fake_network_allocation['id'], {'status': constants.STATUS_ERROR}) db_api.share_network_update.assert_called_once_with( self.fake_context, share_nw['id'], {'status': constants.STATUS_ERROR}) delete_port.stop() @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_save_neutron_network_data(self): neutron_nw_info = {'provider:network_type': 'vlan', 'provider:segmentation_id': 1000} share_nw_update_dict = {'network_type': 'vlan', 'segmentation_id': 1000} with mock.patch.object(self.plugin.neutron_api, 'get_network', mock.Mock(return_value=neutron_nw_info)): self.plugin._save_neutron_network_data(self.fake_context, fake_share_network) self.plugin.neutron_api.get_network.assert_called_once_with( fake_share_network['neutron_net_id']) self.plugin.db.share_network_update.assert_called_once_with( self.fake_context, fake_share_network['id'], share_nw_update_dict) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_save_neutron_subnet_data(self): neutron_subnet_info = {'cidr': '10.0.0.0/24', 'ip_version': 4} with mock.patch.object(self.plugin.neutron_api, 'get_subnet', mock.Mock(return_value=neutron_subnet_info)): self.plugin._save_neutron_subnet_data(self.fake_context, fake_share_network) self.plugin.neutron_api.get_subnet.assert_called_once_with( fake_share_network['neutron_subnet_id']) self.plugin.db.share_network_update.assert_called_once_with( self.fake_context, fake_share_network['id'], neutron_subnet_info) def test_has_network_provider_extension_true(self): extensions = {neutron_constants.PROVIDER_NW_EXT: {}} with mock.patch.object(self.plugin.neutron_api, 'list_extensions', mock.Mock(return_value=extensions)): result = self.plugin._has_provider_network_extension() self.plugin.neutron_api.list_extensions.assert_any_call() self.assertTrue(result) def test_has_network_provider_extension_false(self): with mock.patch.object(self.plugin.neutron_api, 'list_extensions', mock.Mock(return_value={})): result = self.plugin._has_provider_network_extension() self.plugin.neutron_api.list_extensions.assert_any_call() self.assertFalse(result) manila-2013.2.dev175.gbf1a399/manila/tests/network/test_share_network_db.py0000664000175000017500000005140212301410454026537 0ustar chuckchuck00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.common import constants from manila import context from manila.db import api as db_api from manila.db.sqlalchemy import api as sqlalchemy_api from manila.db.sqlalchemy import models from manila import exception from manila import test class ShareNetworkDBTest(test.TestCase): def __init__(self, *args, **kwargs): super(ShareNetworkDBTest, self).__init__(*args, **kwargs) self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) def _check_fields(self, expected, actual): for key in expected: self.assertEqual(actual[key], expected[key]) def setUp(self): super(ShareNetworkDBTest, self).setUp() self.share_nw_dict = {'id': 'fake network id', 'neutron_net_id': 'fake net id', 'neutron_subnet_id': 'fake subnet id', 'project_id': self.fake_context.project_id, 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'whatever', 'description': 'fake description', 'status': constants.STATUS_INACTIVE} self.allocation_dict = {'id': 'fake port id', 'share_network_id': self.share_nw_dict['id'], 'ip_address': 'fake ip address', 'mac_address': 'fake mac address', 'status': constants.STATUS_ACTIVE} def test_create_one_network(self): result = db_api.share_network_create(self.fake_context, self.share_nw_dict) self._check_fields(expected=self.share_nw_dict, actual=result) self.assertEqual(len(result['shares']), 0) self.assertEqual(len(result['security_services']), 0) self.assertEqual(len(result['network_allocations']), 0) def test_create_two_networks(self): share_nw_dict2 = self.share_nw_dict.copy() share_nw_dict2['id'] = None share_nw_dict2['project_id'] = 'fake project 2' result1 = db_api.share_network_create(self.fake_context, self.share_nw_dict) result2 = db_api.share_network_create(self.fake_context, share_nw_dict2) self._check_fields(expected=self.share_nw_dict, actual=result1) self._check_fields(expected=share_nw_dict2, actual=result2) def test_create_same_project_netid_and_subnetid(self): share_nw_dict2 = self.share_nw_dict.copy() share_nw_dict2['id'] = None db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(exception.DBError, db_api.share_network_create, self.fake_context, share_nw_dict2) def test_create_with_duplicated_id(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(exception.DBError, db_api.share_network_create, self.fake_context, self.share_nw_dict) def test_get(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self._check_fields(expected=self.share_nw_dict, actual=result) self.assertEqual(len(result['shares']), 0) self.assertEqual(len(result['security_services']), 0) self.assertEqual(len(result['network_allocations']), 0) def test_get_with_one_share(self): share_dict1 = {'id': 'fake share id1', 'share_network_id': self.share_nw_dict['id']} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_create(self.fake_context, share_dict1) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['shares']), 1) self._check_fields(expected=share_dict1, actual=result['shares'][0]) def test_get_with_two_shares(self): share_dict1 = {'id': 'fake share id1', 'share_network_id': self.share_nw_dict['id']} share_dict2 = {'id': 'fake share id2', 'share_network_id': self.share_nw_dict['id']} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_create(self.fake_context, share_dict1) db_api.share_create(self.fake_context, share_dict2) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['shares']), 2) def test_get_with_one_security_service(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['security_services']), 1) self._check_fields(expected=security_dict1, actual=result['security_services'][0]) def test_get_with_two_security_services(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} security_dict2 = {'id': 'fake security service id2', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.security_service_create(self.fake_context, security_dict2) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict2['id']) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['security_services']), 2) def test_get_with_one_allocation(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.network_allocation_create(self.fake_context, self.allocation_dict) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['network_allocations']), 1) self._check_fields(expected=self.allocation_dict, actual=result['network_allocations'][0]) def test_get_with_two_allocations(self): allocation_dict2 = dict(self.allocation_dict) allocation_dict2['id'] = 'fake port id2' db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.network_allocation_create(self.fake_context, self.allocation_dict) db_api.network_allocation_create(self.fake_context, allocation_dict2) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['network_allocations']), 2) def test_get_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_get, self.fake_context, 'fake id') def test_delete(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_network_delete(self.fake_context, self.share_nw_dict['id']) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_get, self.fake_context, self.share_nw_dict['id']) def test_delete_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_delete, self.fake_context, 'fake id') def test_update(self): new_status = constants.STATUS_ERROR db_api.share_network_create(self.fake_context, self.share_nw_dict) result_update = db_api.share_network_update(self.fake_context, self.share_nw_dict['id'], {'status': new_status}) result_get = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(result_update['status'], new_status) self._check_fields(expected=dict(result_update.iteritems()), actual=dict(result_get.iteritems())) def test_update_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_update, self.fake_context, 'fake id', {}) def test_get_all_one_record(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) result = db_api.share_network_get_all(self.fake_context) self.assertEqual(len(result), 1) self._check_fields(expected=self.share_nw_dict, actual=result[0]) def test_get_all_two_records(self): share_nw_dict2 = dict(self.share_nw_dict) share_nw_dict2['id'] = 'fake subnet id2' share_nw_dict2['neutron_subnet_id'] = 'fake subnet id2' db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_network_create(self.fake_context, share_nw_dict2) result = db_api.share_network_get_all(self.fake_context) self.assertEqual(len(result), 2) def test_get_all_by_project(self): share_nw_dict2 = dict(self.share_nw_dict) share_nw_dict2['id'] = 'fake share nw id2' share_nw_dict2['project_id'] = 'fake project 2' share_nw_dict2['neutron_subnet_id'] = 'fake subnet id2' db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_network_create(self.fake_context, share_nw_dict2) result = db_api.share_network_get_all_by_project( self.fake_context, share_nw_dict2['project_id']) self.assertEqual(len(result), 1) self._check_fields(expected=share_nw_dict2, actual=result[0]) def test_add_security_service(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) result = sqlalchemy_api.model_query( self.fake_context, models.ShareNetworkSecurityServiceAssociation).\ filter_by(security_service_id=security_dict1['id']).\ filter_by(share_network_id=self.share_nw_dict['id']).first() self.assertTrue(result is not None) def test_add_security_service_not_found_01(self): security_service_id = 'unknown security service' db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(exception.SecurityServiceNotFound, db_api.share_network_add_security_service, self.fake_context, self.share_nw_dict['id'], security_service_id) def test_add_security_service_not_found_02(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} share_nw_id = 'unknown share network' db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_add_security_service, self.fake_context, share_nw_id, security_dict1['id']) def test_add_security_service_association_error_already_associated(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) self.assertRaises( exception.ShareNetworkSecurityServiceAssociationError, db_api.share_network_add_security_service, self.fake_context, self.share_nw_dict['id'], security_dict1['id']) def test_add_security_service_association_error_status_active(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_network_update(self.fake_context, self.share_nw_dict['id'], {'status': constants.STATUS_ACTIVE}) db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises( exception.ShareNetworkSecurityServiceAssociationError, db_api.share_network_add_security_service, self.fake_context, self.share_nw_dict['id'], security_dict1['id']) assoc_ref = sqlalchemy_api.model_query( self.fake_context, models.ShareNetworkSecurityServiceAssociation).\ filter_by(security_service_id=security_dict1['id']).\ filter_by(share_network_id=self.share_nw_dict['id']).first() self.assertTrue(assoc_ref is None) def test_remove_security_service(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) db_api.share_network_remove_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) result = sqlalchemy_api.model_query( self.fake_context, models.ShareNetworkSecurityServiceAssociation).\ filter_by(security_service_id=security_dict1['id']).\ filter_by(share_network_id=self.share_nw_dict['id']).first() self.assertTrue(result is None) share_nw_ref = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(share_nw_ref['security_services']), 0) def test_remove_security_service_not_found_01(self): security_service_id = 'unknown security service' db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(exception.SecurityServiceNotFound, db_api.share_network_remove_security_service, self.fake_context, self.share_nw_dict['id'], security_service_id) def test_remove_security_service_not_found_02(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} share_nw_id = 'unknown share network' db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_remove_security_service, self.fake_context, share_nw_id, security_dict1['id']) def test_remove_security_service_dissociation_error(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises( exception.ShareNetworkSecurityServiceDissociationError, db_api.share_network_remove_security_service, self.fake_context, self.share_nw_dict['id'], security_dict1['id']) def test_security_services_relation(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['security_services']), 0) def test_shares_relation(self): share_dict = {'id': 'fake share id1'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_create(self.fake_context, share_dict) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['shares']), 0) def test_network_allocations_relation(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.network_allocation_create(self.fake_context, self.allocation_dict) db_api.network_allocation_delete(self.fake_context, self.allocation_dict['id']) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(result['network_allocations']), 0) manila-2013.2.dev175.gbf1a399/manila/tests/network/__init__.py0000664000175000017500000000000012301410454023703 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/network/test_security_service_db.py0000664000175000017500000001534412301410454027260 0ustar chuckchuck00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.common import constants from manila import context from manila.db import api as db_api from manila import exception from manila import test security_service_dict = {'id': 'fake id', 'project_id': 'fake project', 'type': 'ldap', 'dns_ip': 'fake dns', 'server': 'fake ldap server', 'domain': 'fake ldap domain', 'sid': 'fake sid', "password": "fake password", 'name': 'whatever', 'description': 'nevermind', 'status': constants.STATUS_NEW} class SecurityServiceDBTest(test.TestCase): def __init__(self, *args, **kwargs): super(SecurityServiceDBTest, self).__init__(*args, **kwargs) self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) def _check_expected_fields(self, result, expected): for key in expected: self.assertEqual(result[key], expected[key]) def test_create(self): result = db_api.security_service_create(self.fake_context, security_service_dict) self._check_expected_fields(result, security_service_dict) def test_create_with_duplicated_id(self): db_api.security_service_create(self.fake_context, security_service_dict) self.assertRaises(exception.DBError, db_api.security_service_create, self.fake_context, security_service_dict) def test_get(self): db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_get(self.fake_context, security_service_dict['id']) self._check_expected_fields(result, security_service_dict) def test_get_not_found(self): self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_get, self.fake_context, 'wrong id') def test_delete(self): db_api.security_service_create(self.fake_context, security_service_dict) db_api.security_service_delete(self.fake_context, security_service_dict['id']) self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_get, self.fake_context, security_service_dict['id']) def test_update(self): update_dict = {'dns_ip': 'new dns', 'server': 'new ldap server', 'domain': 'new ldap domain', 'sid': 'new sid', 'password': 'new password', 'name': 'new whatever', 'description': 'new nevermind', 'status': constants.STATUS_ERROR} db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_update(self.fake_context, security_service_dict['id'], update_dict) self._check_expected_fields(result, update_dict) def test_update_no_updates(self): db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_update(self.fake_context, security_service_dict['id'], {}) self._check_expected_fields(result, security_service_dict) def test_update_not_found(self): self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_update, self.fake_context, 'wrong id', {}) def test_get_all_no_records(self): result = db_api.security_service_get_all(self.fake_context) self.assertEqual(len(result), 0) def test_get_all_one_record(self): db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_get_all(self.fake_context) self.assertEqual(len(result), 1) self._check_expected_fields(result[0], security_service_dict) def test_get_all_two_records(self): dict1 = security_service_dict dict2 = security_service_dict.copy() dict2['id'] = 'fake id 2' db_api.security_service_create(self.fake_context, dict1) db_api.security_service_create(self.fake_context, dict2) result = db_api.security_service_get_all(self.fake_context) self.assertEqual(len(result), 2) def test_get_all_by_project(self): dict1 = security_service_dict dict2 = security_service_dict.copy() dict2['id'] = 'fake id 2' dict2['project_id'] = 'fake project 2' db_api.security_service_create(self.fake_context, dict1) db_api.security_service_create(self.fake_context, dict2) result1 = db_api.security_service_get_all_by_project( self.fake_context, dict1['project_id']) self.assertEqual(len(result1), 1) self._check_expected_fields(result1[0], dict1) result2 = db_api.security_service_get_all_by_project( self.fake_context, dict2['project_id']) self.assertEqual(len(result2), 1) self._check_expected_fields(result2[0], dict2) manila-2013.2.dev175.gbf1a399/manila/tests/network/linux/0000775000175000017500000000000012301410516022742 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/network/linux/test_ovs_lib.py0000664000175000017500000000475012301410454026017 0ustar chuckchuck00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.network.linux import ovs_lib from manila import test class OVS_Lib_Test(test.TestCase): """A test suite to excercise the OVS libraries.""" def setUp(self): super(OVS_Lib_Test, self).setUp() self.BR_NAME = "br-int" self.TO = "--timeout=2" self.br = ovs_lib.OVSBridge(self.BR_NAME) self.execute_p = mock.patch('manila.utils.execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(OVS_Lib_Test, self).tearDown() def test_reset_bridge(self): self.br.reset_bridge() self.execute.assert_has_calls([mock.call("ovs-vsctl", self.TO, "--", "--if-exists", "del-br", self.BR_NAME, run_as_root=True), mock.call("ovs-vsctl", self.TO, "add-br", self.BR_NAME, run_as_root=True)]) def test_delete_port(self): pname = "tap5" self.br.delete_port(pname) self.execute.assert_called_once_with("ovs-vsctl", self.TO, "--", "--if-exists", "del-port", self.BR_NAME, pname, run_as_root=True) def test_port_id_regex(self): result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",' ' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",' ' iface-status=active}\nname :' ' "dhc5c1321a7-c7"\nofport : 2\n') match = self.br.re_id.search(result) vif_mac = match.group('vif_mac') vif_id = match.group('vif_id') port_name = match.group('port_name') ofport = int(match.group('ofport')) self.assertEqual(vif_mac, 'fa:16:3e:23:5b:f2') self.assertEqual(vif_id, '5c1321a7-c73f-4a77-95e6-9f86402e5c8f') self.assertEqual(port_name, 'dhc5c1321a7-c7') self.assertEqual(ofport, 2) manila-2013.2.dev175.gbf1a399/manila/tests/network/linux/__init__.py0000664000175000017500000000000012301410454025042 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/network/linux/test_interface.py0000664000175000017500000001731012301410454026316 0ustar chuckchuck00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.network.linux import interface from manila.network.linux import ip_lib from manila import test from manila.tests import conf_fixture from manila import utils class BaseChild(interface.LinuxInterfaceDriver): def plug(*args): pass def unplug(*args): pass FakeSubnet = { 'cidr': '192.168.1.1/24', } FakeAllocation = { 'subnet': FakeSubnet, 'ip_address': '192.168.1.2', 'ip_version': 4, } FakePort = { 'id': 'abcdef01-1234-5678-90ab-ba0987654321', 'fixed_ips': [FakeAllocation], 'device_id': 'cccccccc-cccc-cccc-cccc-cccccccccccc', } class TestBase(test.TestCase): def setUp(self): super(TestBase, self).setUp() self.conf = conf_fixture.CONF self.conf.register_opts(interface.OPTS) self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice') self.ip_dev = self.ip_dev_p.start() self.ip_p = mock.patch.object(ip_lib, 'IPWrapper') self.ip = self.ip_p.start() self.device_exists_p = mock.patch.object(ip_lib, 'device_exists') self.device_exists = self.device_exists_p.start() def tearDown(self): self.ip_dev_p.stop() self.ip_p.stop() self.device_exists_p.stop() super(TestBase, self).tearDown() class TestABCDriver(TestBase): def test_get_device_name(self): bc = BaseChild() device_name = bc.get_device_name(FakePort) self.assertEqual('tapabcdef01-12', device_name) def test_l3_init(self): addresses = [dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild() ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(scope='global', filters=['permanent']), mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255'), mock.call().addr.delete(4, '172.16.77.240/24')]) class TestOVSInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.OVSInterfaceDriver() device_name = br.get_device_name(FakePort) self.assertEqual('tapabcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def test_plug_alt_bridge(self): self._test_plug(bridge='br-foo') def _test_plug(self, additional_expectation=[], bridge=None, namespace=None): if not bridge: bridge = 'br-int' def device_exists(dev, namespace=None): return dev == bridge vsctl_cmd = ['ovs-vsctl', '--', '--may-exist', 'add-port', bridge, 'tap0', '--', 'set', 'Interface', 'tap0', 'type=internal', '--', 'set', 'Interface', 'tap0', 'external-ids:iface-id=port-1234', '--', 'set', 'Interface', 'tap0', 'external-ids:iface-status=active', '--', 'set', 'Interface', 'tap0', 'external-ids:attached-mac=aa:bb:cc:dd:ee:ff'] with mock.patch.object(utils, 'execute') as execute: ovs = interface.OVSInterfaceDriver() self.device_exists.side_effect = device_exists ovs.plug('port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff', bridge=bridge, namespace=namespace) execute.assert_called_once_with(*vsctl_cmd, run_as_root=True) expected = [mock.call(), mock.call().device('tap0'), mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] expected.extend(additional_expectation) if namespace: expected.extend( [mock.call().ensure_namespace(namespace), mock.call().ensure_namespace().add_device_to_namespace( mock.ANY)]) expected.extend([mock.call().device().link.set_up()]) self.ip.assert_has_calls(expected) def test_unplug(self, bridge=None): if not bridge: bridge = 'br-int' with mock.patch('manila.network.linux.ovs_lib.OVSBridge') as ovs_br: ovs = interface.OVSInterfaceDriver() ovs.unplug('tap0') ovs_br.assert_has_calls([mock.call(bridge), mock.call().delete_port('tap0')]) class TestBridgeInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.BridgeInterfaceDriver() device_name = br.get_device_name(FakePort) self.assertEqual('ns-abcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def _test_plug(self, namespace=None, mtu=None): def device_exists(device, root_helper=None, namespace=None): return device.startswith('brq') root_veth = mock.Mock() ns_veth = mock.Mock() self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth)) self.device_exists.side_effect = device_exists br = interface.BridgeInterfaceDriver() mac_address = 'aa:bb:cc:dd:ee:ff' br.plug('port-1234', 'ns-0', mac_address, namespace=namespace) ip_calls = [mock.call(), mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)] ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)]) self.ip.assert_has_calls(ip_calls) root_veth.assert_has_calls([mock.call.link.set_up()]) ns_veth.assert_has_calls([mock.call.link.set_up()]) def test_plug_dev_exists(self): self.device_exists.return_value = True with mock.patch('manila.network.linux.interface.LOG.warn') as log: br = interface.BridgeInterfaceDriver() br.plug('port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff') self.ip_dev.assert_has_calls([]) self.assertEqual(log.call_count, 1) def test_unplug_no_device(self): self.device_exists.return_value = False self.ip_dev().link.delete.side_effect = RuntimeError with mock.patch('manila.network.linux.interface.LOG') as log: br = interface.BridgeInterfaceDriver() br.unplug('tap0') [mock.call(), mock.call('tap0'), mock.call().link.delete()] self.assertEqual(log.error.call_count, 1) def test_unplug(self): self.device_exists.return_value = True with mock.patch('manila.network.linux.interface.LOG.debug') as log: br = interface.BridgeInterfaceDriver() br.unplug('tap0') log.assert_called_once() self.ip_dev.assert_has_calls([mock.call('tap0', None), mock.call().link.delete()]) manila-2013.2.dev175.gbf1a399/manila/tests/network/linux/test_ip_lib.py0000664000175000017500000006600512301410454025621 0ustar chuckchuck00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.network.linux import ip_lib from manila import test NETNS_SAMPLE = [ '12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc'] LINK_SAMPLE = [ '1: lo: mtu 16436 qdisc noqueue state UNKNOWN \\' 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00', '2: eth0: mtu 1500 qdisc mq state UP ' 'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff' '\ alias openvswitch', '3: br-int: mtu 1500 qdisc noop state DOWN ' '\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff', '4: gw-ddc717df-49: mtu 1500 qdisc noop ' 'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff', '5: eth0.50@eth0: mtu 1500 qdisc ' ' noqueue master brq0b24798c-07 state UP mode DEFAULT' '\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff'] ADDR_SAMPLE = (""" 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) ADDR_SAMPLE2 = (""" 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) GATEWAY_SAMPLE1 = (""" default via 10.35.19.254 metric 100 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE2 = (""" default via 10.35.19.254 metric 100 """) GATEWAY_SAMPLE3 = (""" 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE4 = (""" default via 10.35.19.254 """) DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2") SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n" "10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2") SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n" "10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1") class TestSubProcessBase(test.TestCase): def setUp(self): super(TestSubProcessBase, self).setUp() self.execute_p = mock.patch('manila.utils.execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(TestSubProcessBase, self).tearDown() def test_execute_wrapper(self): ip_lib.SubProcessBase._execute('o', 'link', ('list',)) self.execute.assert_called_once_with('ip', '-o', 'link', 'list', run_as_root=False) def test_execute_wrapper_int_options(self): ip_lib.SubProcessBase._execute([4], 'link', ('list',)) self.execute.assert_called_once_with('ip', '-4', 'link', 'list', run_as_root=False) def test_execute_wrapper_no_options(self): ip_lib.SubProcessBase._execute([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'link', 'list', run_as_root=False) def test_run_no_namespace(self): base = ip_lib.SubProcessBase() base._run([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'link', 'list', run_as_root=False) def test_run_namespace(self): base = ip_lib.SubProcessBase('ns') base._run([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True) def test_as_root_namespace(self): base = ip_lib.SubProcessBase('ns') base._as_root([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True) class TestIpWrapper(test.TestCase): def setUp(self): super(TestIpWrapper, self).setUp() self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(TestIpWrapper, self).tearDown() def test_get_devices(self): self.execute.return_value = '\n'.join(LINK_SAMPLE) retval = ip_lib.IPWrapper().get_devices() self.assertEqual(retval, [ip_lib.IPDevice('lo'), ip_lib.IPDevice('eth0'), ip_lib.IPDevice('br-int'), ip_lib.IPDevice('gw-ddc717df-49'), ip_lib.IPDevice('eth0.50')]) self.execute.assert_called_once_with('o', 'link', ('list',), None) def test_get_devices_malformed_line(self): self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish']) retval = ip_lib.IPWrapper().get_devices() self.assertEqual(retval, [ip_lib.IPDevice('lo'), ip_lib.IPDevice('eth0'), ip_lib.IPDevice('br-int'), ip_lib.IPDevice('gw-ddc717df-49'), ip_lib.IPDevice('eth0.50')]) self.execute.assert_called_once_with('o', 'link', ('list',), None) def test_get_namespaces(self): self.execute.return_value = '\n'.join(NETNS_SAMPLE) retval = ip_lib.IPWrapper.get_namespaces() self.assertEqual(retval, ['12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc']) self.execute.assert_called_once_with('', 'netns', ('list',)) def test_add_tuntap(self): ip_lib.IPWrapper().add_tuntap('tap0') self.execute.assert_called_once_with('', 'tuntap', ('add', 'tap0', 'mode', 'tap'), None, as_root=True) def test_add_veth(self): ip_lib.IPWrapper().add_veth('tap0', 'tap1') self.execute.assert_called_once_with('', 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1'), None, as_root=True) def test_add_veth_with_namespaces(self): ns2 = 'ns2' with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en: ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2) en.assert_has_calls([mock.call(ns2)]) self.execute.assert_called_once_with('', 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1', 'netns', ns2), None, as_root=True) def test_get_device(self): dev = ip_lib.IPWrapper('ns').device('eth0') self.assertEqual(dev.namespace, 'ns') self.assertEqual(dev.name, 'eth0') def test_ensure_namespace(self): with mock.patch.object(ip_lib, 'IPDevice') as ip_dev: ip = ip_lib.IPWrapper() with mock.patch.object(ip.netns, 'exists') as ns_exists: ns_exists.return_value = False ip.ensure_namespace('ns') self.execute.assert_has_calls( [mock.call([], 'netns', ('add', 'ns'), None, as_root=True)]) ip_dev.assert_has_calls([mock.call('lo', 'ns'), mock.call().link.set_up()]) def test_ensure_namespace_existing(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd: ip_ns_cmd.exists.return_value = True ns = ip_lib.IPWrapper().ensure_namespace('ns') self.assertFalse(self.execute.called) self.assertEqual(ns.namespace, 'ns') def test_namespace_is_empty_no_devices(self): ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [] self.assertTrue(ip.namespace_is_empty()) get_devices.assert_called_once_with(exclude_loopback=True) def test_namespace_is_empty(self): ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [mock.Mock()] self.assertFalse(ip.namespace_is_empty()) get_devices.assert_called_once_with(exclude_loopback=True) def test_garbage_collect_namespace_does_not_exist(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = False ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: self.assertFalse(ip.garbage_collect_namespace()) ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')]) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.return_value.mock_calls) self.assertEqual(mock_is_empty.mock_calls, []) def test_garbage_collect_namespace_existing_empty_ns(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = True self.assertTrue(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call().exists('ns'), mock.call().delete('ns')] ip_ns_cmd_cls.assert_has_calls(expected) def test_garbage_collect_namespace_existing_not_empty(self): lo_device = mock.Mock() lo_device.name = 'lo' tap_device = mock.Mock() tap_device.name = 'tap1' with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = False self.assertFalse(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call(ip), mock.call().exists('ns')] self.assertEqual(ip_ns_cmd_cls.mock_calls, expected) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.mock_calls) def test_add_device_to_namespace(self): dev = mock.Mock() ip_lib.IPWrapper('ns').add_device_to_namespace(dev) dev.assert_has_calls([mock.call.link.set_netns('ns')]) def test_add_device_to_namespace_is_none(self): dev = mock.Mock() ip_lib.IPWrapper().add_device_to_namespace(dev) self.assertEqual(dev.mock_calls, []) class TestIPDevice(test.TestCase): def test_eq_same_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap0') self.assertEqual(dev1, dev2) def test_eq_diff_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap1') self.assertNotEqual(dev1, dev2) def test_eq_same_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns1') self.assertEqual(dev1, dev2) def test_eq_diff_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns2') self.assertNotEqual(dev1, dev2) def test_eq_other_is_none(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') self.assertNotEqual(dev1, None) def test_str(self): self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0') class TestIPCommandBase(test.TestCase): def setUp(self): super(TestIPCommandBase, self).setUp() self.ip = mock.Mock() self.ip.namespace = 'namespace' self.ip_cmd = ip_lib.IpCommandBase(self.ip) self.ip_cmd.COMMAND = 'foo' def test_run(self): self.ip_cmd._run('link', 'show') self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))]) def test_run_with_options(self): self.ip_cmd._run('link', options='o') self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))]) def test_as_root(self): self.ip_cmd._as_root('link') self.ip.assert_has_calls( [mock.call._as_root([], 'foo', ('link', ), False)]) def test_as_root_with_options(self): self.ip_cmd._as_root('link', options='o') self.ip.assert_has_calls( [mock.call._as_root('o', 'foo', ('link', ), False)]) class TestIPDeviceCommandBase(test.TestCase): def setUp(self): super(TestIPDeviceCommandBase, self).setUp() self.ip_dev = mock.Mock() self.ip_dev.name = 'eth0' self.ip_dev._execute = mock.Mock(return_value='executed') self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev) self.ip_cmd.COMMAND = 'foo' def test_name_property(self): self.assertEqual(self.ip_cmd.name, 'eth0') class TestIPCmdBase(test.TestCase): def setUp(self): super(TestIPCmdBase, self).setUp() self.parent = mock.Mock() self.parent.name = 'eth0' def _assert_call(self, options, args): self.parent.assert_has_calls([ mock.call._run(options, self.command, args)]) def _assert_sudo(self, options, args, force_root_namespace=False): self.parent.assert_has_calls( [mock.call._as_root(options, self.command, args, force_root_namespace)]) class TestIpLinkCommand(TestIPCmdBase): def setUp(self): super(TestIpLinkCommand, self).setUp() self.parent._run.return_value = LINK_SAMPLE[1] self.command = 'link' self.link_cmd = ip_lib.IpLinkCommand(self.parent) def test_set_address(self): self.link_cmd.set_address('aa:bb:cc:dd:ee:ff') self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff')) def test_set_mtu(self): self.link_cmd.set_mtu(1500) self._assert_sudo([], ('set', 'eth0', 'mtu', 1500)) def test_set_up(self): self.link_cmd.set_up() self._assert_sudo([], ('set', 'eth0', 'up')) def test_set_down(self): self.link_cmd.set_down() self._assert_sudo([], ('set', 'eth0', 'down')) def test_set_netns(self): self.link_cmd.set_netns('foo') self._assert_sudo([], ('set', 'eth0', 'netns', 'foo')) self.assertEqual(self.parent.namespace, 'foo') def test_set_name(self): self.link_cmd.set_name('tap1') self._assert_sudo([], ('set', 'eth0', 'name', 'tap1')) self.assertEqual(self.parent.name, 'tap1') def test_set_alias(self): self.link_cmd.set_alias('openvswitch') self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch')) def test_delete(self): self.link_cmd.delete() self._assert_sudo([], ('delete', 'eth0')) def test_address_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd') def test_mtu_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.mtu, 1500) def test_qdisc_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.qdisc, 'mq') def test_qlen_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.qlen, 1000) def test_alias_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.alias, 'openvswitch') def test_state_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.state, 'UP') def test_settings_property(self): expected = {'mtu': 1500, 'qlen': 1000, 'state': 'UP', 'qdisc': 'mq', 'brd': 'ff:ff:ff:ff:ff:ff', 'link/ether': 'cc:dd:ee:ff:ab:cd', 'alias': 'openvswitch'} self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.attributes, expected) self._assert_call('o', ('show', 'eth0')) class TestIpAddrCommand(TestIPCmdBase): def setUp(self): super(TestIpAddrCommand, self).setUp() self.parent.name = 'tap0' self.command = 'addr' self.addr_cmd = ip_lib.IpAddrCommand(self.parent) def test_add_address(self): self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255') self._assert_sudo([4], ('add', '192.168.45.100/24', 'brd', '192.168.45.255', 'scope', 'global', 'dev', 'tap0')) def test_add_address_scoped(self): self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255', scope='link') self._assert_sudo([4], ('add', '192.168.45.100/24', 'brd', '192.168.45.255', 'scope', 'link', 'dev', 'tap0')) def test_del_address(self): self.addr_cmd.delete(4, '192.168.45.100/24') self._assert_sudo([4], ('del', '192.168.45.100/24', 'dev', 'tap0')) def test_flush(self): self.addr_cmd.flush() self._assert_sudo([], ('flush', 'tap0')) def test_list(self): expected = [ dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24', broadcast='172.16.77.255'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64', broadcast='::'), dict(ip_version=6, scope='link', dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64', broadcast='::')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case) self.assertEqual(self.addr_cmd.list(), expected) self._assert_call([], ('show', 'tap0')) def test_list_filtered(self): expected = [ dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24', broadcast='172.16.77.255')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: output = '\n'.join(test_case.split('\n')[0:4]) self.parent._run.return_value = output self.assertEqual(self.addr_cmd.list('global', filters=['permanent']), expected) self._assert_call([], ('show', 'tap0', 'permanent', 'scope', 'global')) class TestIpRouteCommand(TestIPCmdBase): def setUp(self): super(TestIpRouteCommand, self).setUp() self.parent.name = 'eth0' self.command = 'route' self.route_cmd = ip_lib.IpRouteCommand(self.parent) def test_add_gateway(self): gateway = '192.168.45.100' metric = 100 self.route_cmd.add_gateway(gateway, metric) self._assert_sudo([], ('replace', 'default', 'via', gateway, 'metric', metric, 'dev', self.parent.name)) def test_del_gateway(self): gateway = '192.168.45.100' self.route_cmd.delete_gateway(gateway) self._assert_sudo([], ('del', 'default', 'via', gateway, 'dev', self.parent.name)) def test_get_gateway(self): test_cases = [{'sample': GATEWAY_SAMPLE1, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE2, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE3, 'expected': None}, {'sample': GATEWAY_SAMPLE4, 'expected': {'gateway': '10.35.19.254'}}] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case['sample']) self.assertEqual(self.route_cmd.get_gateway(), test_case['expected']) def test_pullup_route(self): # interface is not the first in the list - requires # deleting and creating existing entries output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10') self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2')) self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel', 'src', '10.0.0.1', 'dev', 'qr-23380d11-d2')) def test_pullup_route_first(self): # interface is first in the list - no changes output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10') # Check two calls - device get and subnet get self.assertEqual(len(self.parent._run.mock_calls), 2) class TestIpNetnsCommand(TestIPCmdBase): def setUp(self): super(TestIpNetnsCommand, self).setUp() self.command = 'netns' self.netns_cmd = ip_lib.IpNetnsCommand(self.parent) def test_add_namespace(self): ns = self.netns_cmd.add('ns') self._assert_sudo([], ('add', 'ns'), force_root_namespace=True) self.assertEqual(ns.namespace, 'ns') def test_delete_namespace(self): with mock.patch('manila.utils.execute'): self.netns_cmd.delete('ns') self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True) def test_namespace_exists(self): retval = '\n'.join(NETNS_SAMPLE) self.parent._as_root.return_value = retval self.assertTrue( self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')) self._assert_sudo('o', ('list',), force_root_namespace=True) def test_namespace_doest_not_exist(self): retval = '\n'.join(NETNS_SAMPLE) self.parent._as_root.return_value = retval self.assertFalse( self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb')) self._assert_sudo('o', ('list',), force_root_namespace=True) def test_execute(self): self.parent.namespace = 'ns' with mock.patch('manila.utils.execute') as execute: self.netns_cmd.execute(['ip', 'link', 'list']) execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True, check_exit_code=True) def test_execute_env_var_prepend(self): self.parent.namespace = 'ns' with mock.patch('manila.utils.execute') as execute: env = dict(FOO=1, BAR=2) self.netns_cmd.execute(['ip', 'link', 'list'], env) execute.assert_called_once_with( 'ip', 'netns', 'exec', 'ns', 'env', 'FOO=1', 'BAR=2', 'ip', 'link', 'list', run_as_root=True, check_exit_code=True) class TestDeviceExists(test.TestCase): def test_device_exists(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = LINK_SAMPLE[1] self.assertTrue(ip_lib.device_exists('eth0')) _execute.assert_called_once_with('o', 'link', ('show', 'eth0')) def test_device_does_not_exist(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = '' _execute.side_effect = RuntimeError('Device does not exist.') self.assertFalse(ip_lib.device_exists('eth0')) manila-2013.2.dev175.gbf1a399/manila/tests/fake_volume.py0000664000175000017500000000414012301410454022761 0ustar chuckchuck00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # vim: tabstop=4 shiftwidth=4 softtabstop=4 from oslo.config import cfg from manila.openstack.common import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) class FakeVolume(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_vol_id') self.status = kwargs.pop('status', 'available') self.device = kwargs.pop('device', '') self.display_name = kwargs.pop('display_name', 'fake_vol_name') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeVolumeSnapshot(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_volsnap_id') self.status = kwargs.pop('status', 'available') self.display_name = kwargs.pop('display_name', 'fake_volsnap_name') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class API(object): """Fake Volume API""" def get(self, volume_id): pass def create_snapshot_force(self, *args, **kwargs): pass def get_snapshot(self, *args, **kwargs): pass def delete_snapshot(self, *args, **kwargs): pass def create(self, *args, **kwargs): pass def get_all(self, search_opts): pass def delete(self, volume_id): pass def get_all_snapshots(self, search_opts): pass manila-2013.2.dev175.gbf1a399/manila/tests/test_misc.py0000664000175000017500000000431012301410454022455 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import os from manila import exception from manila import test class ExceptionTestCase(test.TestCase): @staticmethod def _raise_exc(exc): raise exc() def test_exceptions_raise(self): # NOTE(dprince): disable format errors since we are not passing kwargs self.flags(fatal_exception_format_errors=False) for name in dir(exception): exc = getattr(exception, name) if isinstance(exc, type): self.assertRaises(exc, self._raise_exc, exc) class ProjectTestCase(test.TestCase): def test_all_migrations_have_downgrade(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') py_glob = os.path.join(topdir, "manila", "db", "sqlalchemy", "migrate_repo", "versions", "*.py") missing_downgrade = [] for path in glob.iglob(py_glob): has_upgrade = False has_downgrade = False with open(path, "r") as f: for line in f: if 'def upgrade(' in line: has_upgrade = True if 'def downgrade(' in line: has_downgrade = True if has_upgrade and not has_downgrade: fname = os.path.basename(path) missing_downgrade.append(fname) helpful_msg = (_("The following migrations are missing a downgrade:" "\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) self.assert_(not missing_downgrade, helpful_msg) manila-2013.2.dev175.gbf1a399/manila/tests/fake_utils.py0000664000175000017500000000671312301410454022622 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This modules stubs out functions in manila.utils.""" import re from eventlet import greenthread from manila import exception from manila.openstack.common import log as logging from manila import utils LOG = logging.getLogger(__name__) _fake_execute_repliers = [] _fake_execute_log = [] def fake_execute_get_log(): return _fake_execute_log def fake_execute_clear_log(): global _fake_execute_log _fake_execute_log = [] def fake_execute_set_repliers(repliers): """Allows the client to configure replies to commands.""" global _fake_execute_repliers _fake_execute_repliers = repliers def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', '' def fake_execute(*cmd_parts, **kwargs): """This function stubs out execute. It optionally executes a preconfigued function to return expected data. """ global _fake_execute_repliers process_input = kwargs.get('process_input', None) check_exit_code = kwargs.get('check_exit_code', 0) delay_on_retry = kwargs.get('delay_on_retry', True) attempts = kwargs.get('attempts', 1) run_as_root = kwargs.get('run_as_root', False) cmd_str = ' '.join(str(part) for part in cmd_parts) LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str) _fake_execute_log.append(cmd_str) reply_handler = fake_execute_default_reply_handler for fake_replier in _fake_execute_repliers: if re.match(fake_replier[0], cmd_str): reply_handler = fake_replier[1] LOG.debug(_('Faked command matched %s') % fake_replier[0]) break if isinstance(reply_handler, basestring): # If the reply handler is a string, return it as stdout reply = reply_handler, '' else: try: # Alternative is a function, so call it reply = reply_handler(cmd_parts, process_input=process_input, delay_on_retry=delay_on_retry, attempts=attempts, run_as_root=run_as_root, check_exit_code=check_exit_code) except exception.ProcessExecutionError as e: LOG.debug(_('Faked command raised an exception %s'), e) raise stdout = reply[0] stderr = reply[1] LOG.debug(_("Reply to faked command is stdout='%(stdout)s' " "stderr='%(stderr)s'") % locals()) # Replicate the sleep call in the real function greenthread.sleep(0) return reply def stub_out_utils_execute(stubs): fake_execute_set_repliers([]) fake_execute_clear_log() stubs.Set(utils, 'execute', fake_execute) manila-2013.2.dev175.gbf1a399/manila/tests/xenapi/0000775000175000017500000000000012301410516021376 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/xenapi/__init__.py0000664000175000017500000000000012301410454023476 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/image/0000775000175000017500000000000012301410516021174 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/image/fake.py0000664000175000017500000002112112301410454022452 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake image service.""" import copy import datetime import uuid from manila import exception import manila.image.glance from manila.openstack.common import log as logging from oslo.config import cfg LOG = logging.getLogger(__name__) CONF = cfg.CONF class _FakeImageService(object): """Mock (fake) image service for unit testing.""" def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 0o1, 0o1, 0o1, 0o2, 0o3) image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64'}} image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}} image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': None, 'disk_format': None, 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}} image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}} image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': { 'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'ramdisk_id': None}} image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'name': 'fakeimage6', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64', 'auto_disk_config': 'False'}} image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b', 'name': 'fakeimage7', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64', 'auto_disk_config': 'True'}} self.create(None, image1) self.create(None, image2) self.create(None, image3) self.create(None, image4) self.create(None, image5) self.create(None, image6) self.create(None, image7) self._imagedata = {} super(_FakeImageService, self).__init__() #TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) def download(self, context, image_id, data): self.show(context, image_id) data.write(self._imagedata.get(image_id, '')) def show(self, context, image_id): """Get data about specified image. Returns a dict containing image data for the given opaque image id. """ image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) LOG.warn('Unable to find image id %s. Have images: %s', image_id, self.images) raise exception.ImageNotFound(image_id=image_id) def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ image_id = str(metadata.get('id', uuid.uuid4())) metadata['id'] = image_id if image_id in self.images: raise exception.Duplicate() self.images[image_id] = copy.deepcopy(metadata) if data: self._imagedata[image_id] = data.read() return self.images[image_id] def update(self, context, image_id, metadata, data=None, purge_props=False): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. """ if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) if purge_props: self.images[image_id] = copy.deepcopy(metadata) else: image = self.images[image_id] try: image['properties'].update(metadata.pop('properties')) except Exception: pass image.update(metadata) return self.images[image_id] def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. """ removed = self.images.pop(image_id, None) if not removed: raise exception.ImageNotFound(image_id=image_id) def get_location(self, context, image_id): if image_id in self.images: return 'fake_location' return None _fakeImageService = _FakeImageService() def FakeImageService(): return _fakeImageService def FakeImageService_reset(): global _fakeImageService _fakeImageService = _FakeImageService() def stub_out_image_service(stubs): def fake_get_remote_image_service(context, image_href): return (FakeImageService(), image_href) stubs.Set(manila.image.glance, 'get_remote_image_service', lambda x, y: (FakeImageService(), y)) stubs.Set(manila.image.glance, 'get_default_image_service', lambda: FakeImageService()) manila-2013.2.dev175.gbf1a399/manila/tests/image/test_glance.py0000664000175000017500000005356412301410454024054 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import random import time import glanceclient.exc from glanceclient.v2.client import Client as glanceclient_v2 from manila import context from manila import exception from manila.image import glance from manila import test from manila.tests.glance import stubs as glance_stubs from oslo.config import cfg CONF = cfg.CONF class NullWriter(object): """Used to test ImageService.get which takes a writer object.""" def write(self, *arg, **kwargs): pass class TestGlanceSerializer(test.TestCase): def test_serialize(self): metadata = {'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': { 'prop1': 'propvalue1', 'mappings': [ {'virtual': 'aaa', 'device': 'bbb'}, {'virtual': 'xxx', 'device': 'yyy'}], 'block_device_mapping': [ {'virtual_device': 'fake', 'device_name': '/dev/fake'}, {'virtual_device': 'ephemeral0', 'device_name': '/dev/fake0'}]}} converted_expected = { 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': { 'prop1': 'propvalue1', 'mappings': '[{"device": "bbb", "virtual": "aaa"}, ' '{"device": "yyy", "virtual": "xxx"}]', 'block_device_mapping': '[{"virtual_device": "fake", "device_name": "/dev/fake"}, ' '{"virtual_device": "ephemeral0", ' '"device_name": "/dev/fake0"}]'}} converted = glance._convert_to_string(metadata) self.assertEqual(converted, converted_expected) self.assertEqual(glance._convert_from_string(converted), metadata) class TestGlanceImageService(test.TestCase): """ Tests the Glance image service. At a high level, the translations involved are: 1. Glance -> ImageService - This is needed so we can support multple ImageServices (Glance, Local, etc) 2. ImageService -> API - This is needed so we can support multple APIs (OpenStack, EC2) """ NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" class tzinfo(datetime.tzinfo): @staticmethod def utcoffset(*args, **kwargs): return datetime.timedelta() NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) def setUp(self): super(TestGlanceImageService, self).setUp() #fakes.stub_out_compute_api_snapshot(self.stubs) client = glance_stubs.StubGlanceClient() self.service = self._create_image_service(client) self.context = context.RequestContext('fake', 'fake', auth_token=True) self.stubs.Set(glance.time, 'sleep', lambda s: None) def _create_image_service(self, client): def _fake_create_glance_client(context, host, port, use_ssl, version): return client self.stubs.Set(glance, '_create_glance_client', _fake_create_glance_client) client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292) return glance.GlanceImageService(client=client_wrapper) @staticmethod def _make_fixture(**kwargs): fixture = {'name': None, 'properties': {}, 'status': None, 'is_public': None} fixture.update(kwargs) return fixture def _make_datetime_fixture(self): return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT, updated_at=self.NOW_GLANCE_FORMAT, deleted_at=self.NOW_GLANCE_FORMAT) def test_create_with_instance_id(self): """Ensure instance_id is persisted as an image-property.""" fixture = {'name': 'test image', 'is_public': False, 'properties': {'instance_id': '42', 'user_id': 'fake'}} image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'test image', 'is_public': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {'instance_id': '42', 'user_id': 'fake'}, 'owner': None, } self.assertDictMatch(image_meta, expected) image_metas = self.service.detail(self.context) self.assertDictMatch(image_metas[0], expected) def test_create_without_instance_id(self): """ Ensure we can create an image without having to specify an instance_id. Public images are an example of an image not tied to an instance. """ fixture = {'name': 'test image', 'is_public': False} image_id = self.service.create(self.context, fixture)['id'] expected = { 'id': image_id, 'name': 'test image', 'is_public': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, } actual = self.service.show(self.context, image_id) self.assertDictMatch(actual, expected) def test_create(self): fixture = self._make_fixture(name='test image') num_images = len(self.service.detail(self.context)) image_id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, image_id) self.assertEquals(num_images + 1, len(self.service.detail(self.context))) def test_create_and_show_non_existing_image(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, image_id) self.assertRaises(exception.ImageNotFound, self.service.show, self.context, 'bad image id') def test_detail_private_image(self): fixture = self._make_fixture(name='test image') fixture['is_public'] = False properties = {'owner_id': 'proj1'} fixture['properties'] = properties self.service.create(self.context, fixture)['id'] proj = self.context.project_id self.context.project_id = 'proj1' image_metas = self.service.detail(self.context) self.context.project_id = proj self.assertEqual(1, len(image_metas)) self.assertEqual(image_metas[0]['name'], 'test image') self.assertEqual(image_metas[0]['is_public'], False) def test_detail_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[1]) self.assertEquals(len(image_metas), 8) i = 2 for meta in image_metas: expected = { 'id': ids[i], 'status': None, 'is_public': None, 'name': 'TestImage %d' % (i), 'properties': {}, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'owner': None, } self.assertDictMatch(meta, expected) i = i + 1 def test_detail_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, limit=5) self.assertEquals(len(image_metas), 5) def test_detail_default_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context) for i, meta in enumerate(image_metas): self.assertEqual(meta['name'], 'TestImage %d' % (i)) def test_detail_marker_and_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[3], limit=5) self.assertEquals(len(image_metas), 5) i = 4 for meta in image_metas: expected = { 'id': ids[i], 'status': None, 'is_public': None, 'name': 'TestImage %d' % (i), 'properties': {}, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'owner': None, } self.assertDictMatch(meta, expected) i = i + 1 def test_detail_invalid_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) self.assertRaises(exception.Invalid, self.service.detail, self.context, marker='invalidmarker') def test_update(self): fixture = self._make_fixture(name='test image') image = self.service.create(self.context, fixture) print image image_id = image['id'] fixture['name'] = 'new image name' self.service.update(self.context, image_id, fixture) new_image_data = self.service.show(self.context, image_id) self.assertEquals('new image name', new_image_data['name']) def test_delete(self): fixture1 = self._make_fixture(name='test image 1') fixture2 = self._make_fixture(name='test image 2') fixtures = [fixture1, fixture2] num_images = len(self.service.detail(self.context)) self.assertEquals(0, num_images) ids = [] for fixture in fixtures: new_id = self.service.create(self.context, fixture)['id'] ids.append(new_id) num_images = len(self.service.detail(self.context)) self.assertEquals(2, num_images) self.service.delete(self.context, ids[0]) num_images = len(self.service.detail(self.context)) self.assertEquals(1, num_images) def test_show_passes_through_to_client(self): fixture = self._make_fixture(name='image1', is_public=True) image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'image1', 'is_public': True, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, } self.assertEqual(image_meta, expected) def test_show_raises_when_no_authtoken_in_the_context(self): fixture = self._make_fixture(name='image1', is_public=False, properties={'one': 'two'}) image_id = self.service.create(self.context, fixture)['id'] self.context.auth_token = False self.assertRaises(exception.ImageNotFound, self.service.show, self.context, image_id) def test_detail_passes_through_to_client(self): fixture = self._make_fixture(name='image10', is_public=True) image_id = self.service.create(self.context, fixture)['id'] image_metas = self.service.detail(self.context) expected = [ { 'id': image_id, 'name': 'image10', 'is_public': True, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, }, ] self.assertEqual(image_metas, expected) def test_show_makes_datetimes(self): fixture = self._make_datetime_fixture() image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) def test_detail_makes_datetimes(self): fixture = self._make_datetime_fixture() self.service.create(self.context, fixture) image_meta = self.service.detail(self.context)[0] self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) def test_download_with_retries(self): tries = [0] class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that fails the first time, then succeeds.""" def get(self, image_id): if tries[0] == 0: tries[0] = 1 raise glanceclient.exc.ServiceUnavailable('') else: return {} client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() # When retries are disabled, we should get an exception self.flags(glance_num_retries=0) self.assertRaises(exception.GlanceConnectionFailed, service.download, self.context, image_id, writer) # Now lets enable retries. No exception should happen now. tries = [0] self.flags(glance_num_retries=1) service.download(self.context, image_id, writer) def test_client_forbidden_converts_to_imagenotauthed(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a Forbidden exception.""" def get(self, image_id): raise glanceclient.exc.Forbidden(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotAuthorized, service.download, self.context, image_id, writer) def test_client_httpforbidden_converts_to_imagenotauthed(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a HTTPForbidden exception.""" def get(self, image_id): raise glanceclient.exc.HTTPForbidden(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotAuthorized, service.download, self.context, image_id, writer) def test_client_notfound_converts_to_imagenotfound(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a NotFound exception.""" def get(self, image_id): raise glanceclient.exc.NotFound(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotFound, service.download, self.context, image_id, writer) def test_client_httpnotfound_converts_to_imagenotfound(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a HTTPNotFound exception.""" def get(self, image_id): raise glanceclient.exc.HTTPNotFound(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotFound, service.download, self.context, image_id, writer) def test_glance_client_image_id(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] (service, same_id) = glance.get_remote_image_service(self.context, image_id) self.assertEquals(same_id, image_id) def test_glance_client_image_ref(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] image_url = 'http://something-less-likely/%s' % image_id (service, same_id) = glance.get_remote_image_service(self.context, image_url) self.assertEquals(same_id, image_id) self.assertEquals(service._client.host, 'something-less-likely') class TestGlanceClientVersion(test.TestCase): """Tests the version of the glance client generated""" def setUp(self): super(TestGlanceClientVersion, self).setUp() def fake_get_image_model(self): return self.stubs.Set(glanceclient_v2, '_get_image_model', fake_get_image_model) self.stubs.Set(glanceclient_v2, '_get_member_model', fake_get_image_model) def test_glance_version_by_flag(self): """Test glance version set by flag is honoured""" client_wrapper_v1 = glance.GlanceClientWrapper('fake', 'fake_host', 9292) self.assertEquals(client_wrapper_v1.client.__module__, 'glanceclient.v1.client') self.flags(glance_api_version=2) client_wrapper_v2 = glance.GlanceClientWrapper('fake', 'fake_host', 9292) self.assertEquals(client_wrapper_v2.client.__module__, 'glanceclient.v2.client') CONF.reset() def test_glance_version_by_arg(self): """Test glance version set by arg to GlanceClientWrapper""" client_wrapper_v1 = glance.GlanceClientWrapper('fake', 'fake_host', 9292, version=1) self.assertEquals(client_wrapper_v1.client.__module__, 'glanceclient.v1.client') client_wrapper_v2 = glance.GlanceClientWrapper('fake', 'fake_host', 9292, version=2) self.assertEquals(client_wrapper_v2.client.__module__, 'glanceclient.v2.client') def _create_failing_glance_client(info): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that fails the first time, then succeeds.""" def get(self, image_id): info['num_calls'] += 1 if info['num_calls'] == 1: raise glanceclient.exc.ServiceUnavailable('') return {} return MyGlanceStubClient() manila-2013.2.dev175.gbf1a399/manila/tests/image/__init__.py0000664000175000017500000000141312301410454023305 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work from manila.tests import * manila-2013.2.dev175.gbf1a399/manila/tests/test_share_lvm.py0000664000175000017500000006602112301410454023511 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the NFS driver module.""" import mock import os from manila import context from manila.db.sqlalchemy import models from manila import exception from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.share.configuration import Configuration from manila.share.drivers import lvm from manila import test from manila.tests.db import fakes as db_fakes from manila.tests import fake_utils from oslo.config import cfg CONF = cfg.CONF def fake_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } share.update(kwargs) return db_fakes.FakeModel(share) def fake_snapshot(**kwargs): snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_size': 1, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } snapshot.update(kwargs) return db_fakes.FakeModel(snapshot) def fake_access(**kwargs): access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'state': 'active', } access.update(kwargs) return db_fakes.FakeModel(access) class LVMShareDriverTestCase(test.TestCase): """Tests LVMShareDriver.""" def setUp(self): super(LVMShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self.stubs) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() CONF.set_default('share_volume_group', 'fakevg') CONF.set_default('share_export_ip', '10.0.0.1') self._helper_cifs = mock.Mock() self._helper_nfs = mock.Mock() self.fake_conf = Configuration(None) self._db = mock.Mock() self._os = lvm.os = mock.Mock() self._os.path.join = os.path.join self._driver = lvm.LVMShareDriver(self._db, execute=self._execute, configuration=self.fake_conf) self._driver._helpers = { 'CIFS': self._helper_cifs, 'NFS': self._helper_nfs, } self.share = fake_share() self.access = fake_access() self.snapshot = fake_snapshot() def tearDown(self): super(LVMShareDriverTestCase, self).tearDown() fake_utils.fake_execute_set_repliers([]) fake_utils.fake_execute_clear_log() def test_do_setup(self): CONF.set_default('share_lvm_helpers', ['NFS=fakenfs']) lvm.importutils = mock.Mock() lvm.importutils.import_class.return_value = self._helper_nfs self._driver.do_setup(self._context) lvm.importutils.import_class.assert_has_calls([ mock.call('fakenfs') ]) def test_check_for_setup_error(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake1\n fakevg\n fake2\n', '' expected_exec = [ 'vgs --noheadings -o name', ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) ret = self._driver.check_for_setup_error() self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_check_for_setup_error_no_vg(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake0\n fake1\n fake2\n', '' fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name', exec_runner)]) self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_check_for_setup_error_no_export_ip(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake1\n fakevg\n fake2\n', '' fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name', exec_runner)]) CONF.set_default('share_export_ip', None) self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_local_path_normal(self): share = fake_share(name='fake_sharename') CONF.set_default('share_volume_group', 'fake_vg') ret = self._driver._local_path(share) self.assertEqual(ret, '/dev/mapper/fake_vg-fake_sharename') def test_local_path_escapes(self): share = fake_share(name='fake-sharename') CONF.set_default('share_volume_group', 'fake-vg') ret = self._driver._local_path(share) self.assertEqual(ret, '/dev/mapper/fake--vg-fake--sharename') def test_create_share(self): self._helper_nfs.create_export.return_value = 'fakelocation' self._driver._mount_device = mock.Mock() ret = self._driver.create_share(self._context, self.share) CONF.set_default('share_lvm_mirrors', 0) self._driver._mount_device.assert_called_with( self.share, '/dev/mapper/fakevg-fakename') expected_exec = [ 'lvcreate -L 1G -n fakename fakevg', 'mkfs.ext4 /dev/mapper/fakevg-fakename', ] self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, 'fakelocation') def test_create_share_from_snapshot(self): CONF.set_default('share_lvm_mirrors', 0) self._driver._mount_device = mock.Mock() mount_share = '/dev/mapper/fakevg-fakename' mount_snapshot = '/dev/mapper/fakevg-fakesnapshotname' self._helper_nfs.create_export.return_value = 'fakelocation' mount_path = self._get_mount_path(self.share) ret = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot) self._driver._mount_device.assert_called_with(self.share, mount_snapshot) expected_exec = [ 'lvcreate -L 1G -n fakename fakevg', 'mkfs.ext4 /dev/mapper/fakevg-fakename', ("dd count=0 if=%s of=%s iflag=direct oflag=direct" % (mount_snapshot, mount_share)), ("dd if=%s of=%s count=1024 bs=1M iflag=direct oflag=direct" % (mount_snapshot, mount_share)), ] self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_create_share_mirrors(self): share = fake_share(size='2048') CONF.set_default('share_lvm_mirrors', 2) self._helper_nfs.create_export.return_value = 'fakelocation' self._driver._mount_device = mock.Mock() ret = self._driver.create_share(self._context, share) self._driver._mount_device.assert_called_with( share, '/dev/mapper/fakevg-fakename') expected_exec = [ 'lvcreate -L 2048G -n fakename fakevg -m 2 --nosync -R 2', 'mkfs.ext4 /dev/mapper/fakevg-fakename', ] self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, 'fakelocation') def test_deallocate_container(self): expected_exec = ['lvremove -f fakevg/fakename'] ret = self._driver._deallocate_container(self.share['name']) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_get_share_stats(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fakevg 5.38 4.30\n', '' expected_exec = [ 'vgs --noheadings --nosuffix --unit=G -o name,size,free fakevg', ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) CONF.set_default('reserved_share_percentage', 1) ret = self._driver.get_share_stats(refresh=True) expected_ret = { 'share_backend_name': 'LVM', 'vendor_name': 'Open Source', 'driver_version': '1.0', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 5.38, 'free_capacity_gb': 4.30, 'reserved_percentage': 1, 'QoS_support': False, } self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, expected_ret) def test_get_share_stats_error(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError() expected_exec = [ 'vgs --noheadings --nosuffix --unit=G -o name,size,free fakevg', ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) CONF.set_default('reserved_share_percentage', 1) ret = self._driver.get_share_stats(refresh=True) expected_ret = { 'share_backend_name': 'LVM', 'vendor_name': 'Open Source', 'driver_version': '1.0', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'reserved_percentage': 1, 'QoS_support': False, } self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, expected_ret) def test_remove_export(self): mount_path = self._get_mount_path(self.share) self._os.path.exists.return_value = True self._driver._remove_export(self._context, self.share) expected_exec = [ "umount -f %s" % (mount_path,), ] self._os.path.exists.assert_called_with(mount_path) self._os.rmdir.assert_called_with(mount_path) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_remove_export_is_busy_error(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='device is busy') self._os.path.exists.return_value = True mount_path = self._get_mount_path(self.share) expected_exec = [ "umount -f %s" % (mount_path), ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.ShareIsBusy, self._driver._remove_export, self._context, self.share) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_remove_export_error(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='fake error') mount_path = self._get_mount_path(self.share) expected_exec = [ "umount -f %s" % (mount_path), ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self._os.path.exists.return_value = True self._driver._remove_export(self._context, self.share) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_create_snapshot(self): self._driver.create_snapshot(self._context, self.snapshot) expected_exec = [ ("lvcreate -L 1G --name fakesnapshotname --snapshot %s/fakename" % (CONF.share_volume_group,)), ] self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_ensure_share(self): mount_path = self._get_mount_path(self.share) self.mox.StubOutWithMock(self._driver, '_mount_device') self._driver._mount_device(self.share, '/dev/mapper/fakevg-fakename').\ AndReturn(mount_path) self._helper_nfs.create_export(mount_path, self.share['name'], recreate=True).AndReturn('fakelocation') self.mox.ReplayAll() self._driver.ensure_share(self._context, self.share) def test_delete_share(self): mount_path = self._get_mount_path(self.share) self._helper_nfs.remove_export(mount_path, self.share['name']) self._driver._delete_share(self._context, self.share) def test_delete_snapshot(self): expected_exec = ['lvremove -f fakevg/fakesnapshotname'] self._driver.delete_snapshot(self._context, self.snapshot) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_delete_share_invalid_share(self): self._driver._get_helper = mock.Mock( side_effect=exception.InvalidShare(reason='fake')) self._driver.delete_share(self._context, self.share) def test_allow_access(self): mount_path = self._get_mount_path(self.share) self._helper_nfs.allow_access(mount_path, self.share['name'], self.access['access_type'], self.access['access_to']) self._driver.allow_access(self._context, self.share, self.access) def test_deny_access(self): mount_path = self._get_mount_path(self.share) self._helper_nfs.deny_access(mount_path, self.share['name'], self.access['access_type'], self.access['access_to']) self._driver.deny_access(self._context, self.share, self.access) def test_mount_device(self): mount_path = self._get_mount_path(self.share) ret = self._driver._mount_device(self.share, 'fakedevice') expected_exec = [ "mkdir -p %s" % (mount_path,), "mount fakedevice %s" % (mount_path,), "chmod 777 %s" % (mount_path,), ] self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, mount_path) def test_mount_device_already(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='already mounted') mount_path = self._get_mount_path(self.share) expected_exec = [ "mkdir -p %s" % (mount_path,), "mount fakedevice %s" % (mount_path,), ] fake_utils.fake_execute_set_repliers([(expected_exec[1], exec_runner)]) ret = self._driver._mount_device(self.share, 'fakedevice') self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertEqual(ret, mount_path) def test_mount_device_error(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='fake error') mount_path = self._get_mount_path(self.share) expected_exec = [ "mkdir -p %s" % (mount_path,), "mount fakedevice %s" % (mount_path,), ] fake_utils.fake_execute_set_repliers([(expected_exec[1], exec_runner)]) self.assertRaises(exception.ProcessExecutionError, self._driver._mount_device, self.share, 'fakedevice') def test_get_helper(self): share_cifs = fake_share(share_proto='CIFS') share_nfs = fake_share(share_proto='NFS') share_fake = fake_share(share_proto='FAKE') self.assertEqual(self._driver._get_helper(share_cifs), self._helper_cifs) self.assertEqual(self._driver._get_helper(share_nfs), self._helper_nfs) self.assertRaises(exception.InvalidShare, self._driver._get_helper, fake_share(share_proto='FAKE')) def _get_mount_path(self, share): return os.path.join(CONF.share_export_root, share['name']) class NFSHelperTestCase(test.TestCase): """Test case for NFS driver.""" def setUp(self): super(NFSHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self.stubs) CONF.set_default('share_export_ip', '127.0.0.1') self._execute = fake_utils.fake_execute self.fake_conf = Configuration(None) self._helper = lvm.NFSHelper(self._execute, self.fake_conf) fake_utils.fake_execute_clear_log() def tearDown(self): super(NFSHelperTestCase, self).tearDown() fake_utils.fake_execute_set_repliers([]) fake_utils.fake_execute_clear_log() def test_failed_init(self): self._execute = mock.Mock(side_effect=exception.ProcessExecutionError) self.assertRaises(exception.Error, lvm.NFSHelper.__init__, self._helper, self._execute, self.fake_conf) def test_create_export(self): ret = self._helper.create_export('/opt/nfs', 'volume-00001') expected_location = '%s:/opt/nfs' % CONF.share_export_ip self.assertEqual(ret, expected_location) def test_remove_export(self): self._helper.remove_export('/opt/nfs', 'volume-00001') def test_allow_access(self): self._helper.allow_access('/opt/nfs', 'volume-00001', 'ip', '10.0.0.*') export_string = '10.0.0.*:/opt/nfs' expected_exec = [ 'exportfs', 'exportfs -o rw,no_subtree_check %s' % export_string, ] self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_allow_access_no_ip(self): self.assertRaises(exception.InvalidShareAccess, self._helper.allow_access, '/opt/nfs', 'share0', 'fake', 'fakerule') def test_allow_access_negative(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n/opt/nfs\t\t10.0.0.*\n', '' fake_utils.fake_execute_set_repliers([('exportfs', exec_runner)]) self.assertRaises(exception.ShareAccessExists, self._helper.allow_access, '/opt/nfs', 'volume-00001', 'ip', '10.0.0.*') def test_deny_access(self): self._helper.deny_access('/opt/nfs', 'volume-00001', 'ip', '10.0.0.*') export_string = '10.0.0.*:/opt/nfs' expected_exec = ['exportfs -u %s' % export_string] self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) class CIFSNetConfHelperTestCase(test.TestCase): """Test case for CIFS driver with net conf management.""" def setUp(self): super(CIFSNetConfHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self.stubs) CONF.set_default('share_export_ip', '127.0.0.1') self.share = fake_share() self._execute = fake_utils.fake_execute self.fake_conf = Configuration(None) self._helper = lvm.CIFSNetConfHelper(self._execute, self.fake_conf) fake_utils.fake_execute_clear_log() def tearDown(self): super(CIFSNetConfHelperTestCase, self).tearDown() fake_utils.fake_execute_set_repliers([]) fake_utils.fake_execute_clear_log() def test_create_export(self): share_name = self.share['name'] self._helper._execute = mock.Mock() parameters = { 'browseable': 'yes', 'create mask': '0755', 'hosts deny': '0.0.0.0/0', 'hosts allow': '127.0.0.1', } ret = self._helper.create_export('fakelocalpath', share_name) calls = [mock.call('net', 'conf', 'addshare', share_name, 'fakelocalpath', 'writeable=y', 'guest_ok=y', run_as_root=True)] for name, value in parameters.items(): calls.append(mock.call('net', 'conf', 'setparm', share_name, name, value, run_as_root=True)) self._helper._execute.assert_has_calls(calls) expected_ret = "//127.0.0.1/%s" % (share_name,) self.assertEqual(ret, expected_ret) def test_create_export_already_exists(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='already exists') expected_exec = [ "net conf addshare %s %s writeable=y guest_ok=y" % ( self.share['name'], 'fakelocalpath', ), ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.ShareBackendException, self._helper.create_export, 'fakelocalpath', self.share['name']) def test_create_export_recreate(self): share_name = self.share['name'] def raise_exec_error(): raise exception.ProcessExecutionError(stderr="already exists") execute_return_values = [raise_exec_error, ''] parameters = { 'browseable': 'yes', 'create mask': '0755', 'hosts deny': '0.0.0.0/0', 'hosts allow': '127.0.0.1', } execute_return_values.extend([''] * len(parameters)) self._helper._execute = mock.Mock(side_effect=execute_return_values) ret = self._helper.create_export('fakelocalpath', share_name, recreate=True) expected_ret = "//127.0.0.1/%s" % (share_name,) calls = [mock.call('net', 'conf', 'setparm', share_name, name, value, run_as_root=True) for name, value in parameters.items()] self._helper._execute.assert_has_calls(calls) self.assertEqual(ret, expected_ret) def test_create_export_error(self): share_name = self.share['name'] def raise_exec_error(*args, **kwargs): raise exception.ProcessExecutionError(stderr="fake_stderr") self._helper._execute = mock.Mock( side_effect=raise_exec_error) self.assertRaises(exception.ProcessExecutionError, self._helper.create_export, 'fakelocalpath', share_name) def test_remove_export(self): share_name = self.share['name'] self._helper._execute = mock.Mock() self._helper.remove_export('fakelocalpath', share_name) self._helper._execute.assert_called_with('smbcontrol', 'all', 'close-share', share_name, run_as_root=True) def test_remove_export_no_such_service(self): share_name = self.share['name'] def exec_return(*args, **kwargs): if 'net' in args: raise exception.ProcessExecutionError( stderr='SBC_ERR_NO_SUCH_SERVICE') self._helper._execute = mock.Mock(side_effect=exec_return) self._helper.remove_export('fakelocalpath', share_name) self._helper._execute.assert_called_with( 'smbcontrol', 'all', 'close-share', share_name, run_as_root=True) def test_remove_export_error(self): share_name = self.share['name'] def raise_exec_error(*args, **kwargs): raise exception.ProcessExecutionError(stderr="fake_stderr") self._helper._execute = mock.Mock( side_effect=raise_exec_error) self.assertRaises(exception.ProcessExecutionError, self._helper.remove_export, 'fakelocalpath', share_name) def test_allow_access(self): share_name = self.share['name'] self._helper._get_allow_hosts = mock.Mock(return_value=['127.0.0.1', '10.0.0.1']) self._helper._set_allow_hosts = mock.Mock() self._helper.allow_access('fakelocalpath', share_name, 'ip', '10.0.0.2') self._helper._set_allow_hosts.assert_called_with( ['127.0.0.1', '10.0.0.1', '10.0.0.2'], share_name) def test_allow_access_exists(self): share_name = self.share['name'] self._helper._get_allow_hosts = mock.Mock(return_value=['127.0.0.1', '10.0.0.1']) self.assertRaises(exception.ShareAccessExists, self._helper.allow_access, 'fakelocalpath', share_name, 'ip', '10.0.0.1') def test_allow_access_wrong_type(self): share_name = self.share['name'] self.assertRaises(exception.InvalidShareAccess, self._helper.allow_access, 'fakelocalpath', share_name, 'fake', 'fake access') def test_deny_access(self): share_name = self.share['name'] self._helper._get_allow_hosts = mock.Mock(return_value=['127.0.0.1', '10.0.0.1']) self._helper._set_allow_hosts = mock.Mock() self._helper.deny_access('fakelocalpath', share_name, 'ip', '10.0.0.1') self._helper._set_allow_hosts.assert_called_with( ['127.0.0.1'], share_name) def test_deny_access_not_exists(self): share_name = self.share['name'] def raise_exec_error(*args, **kwargs): raise exception.ProcessExecutionError(stdout="does not exist") self._helper._get_allow_hosts = mock.Mock(side_effect=raise_exec_error) self.assertRaises(exception.ProcessExecutionError, self._helper.deny_access, 'fakelocalpath', share_name, 'ip', '10.0.0.1') def test_deny_access_not_exists_force(self): share_name = self.share['name'] def raise_exec_error(*args, **kwargs): raise exception.ProcessExecutionError(stdout="does not exist") self._helper._get_allow_hosts = mock.Mock(side_effect=raise_exec_error) self._helper.deny_access('fakelocalpath', share_name, 'ip', '10.0.0.1', force=True) def test_deny_access_error(self): share_name = self.share['name'] def raise_exec_error(*args, **kwargs): raise exception.ProcessExecutionError(stdout="fake out") self._helper._get_allow_hosts = mock.Mock(side_effect=raise_exec_error) self.assertRaises(exception.ProcessExecutionError, self._helper.deny_access, 'fakelocalpath', share_name, 'ip', '10.0.0.1') def test_get_allow_hosts(self): share_name = self.share['name'] self._helper._execute = mock.Mock(return_value=( '127.0.0.1 10.0.0.1', '')) ret = self._helper._get_allow_hosts(share_name) self.assertEqual(ret, ['127.0.0.1', '10.0.0.1']) def test_set_allow_hosts(self): share_name = self.share['name'] self._helper._execute = mock.Mock() self._helper._set_allow_hosts(['127.0.0.1', '10.0.0.1'], share_name) self._helper._execute.assert_called_with( 'net', 'conf', 'setparm', share_name, 'hosts allow', '127.0.0.1 10.0.0.1', run_as_root=True) manila-2013.2.dev175.gbf1a399/manila/tests/test_share_generic.py0000664000175000017500000017050712301410454024334 0ustar chuckchuck00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Generic driver module.""" import copy import mock import os from manila import context from manila import compute from manila import exception from manila.network.neutron import api as neutron from manila import volume from manila.share.configuration import Configuration from manila.share.drivers import generic from manila import test from manila.tests.db import fakes as db_fakes from manila.tests import fake_compute from manila.tests import fake_network from manila.tests import fake_utils from manila.tests import fake_volume from oslo.config import cfg CONF = cfg.CONF def fake_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake share network id', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } share.update(kwargs) return db_fakes.FakeModel(share) def fake_snapshot(**kwargs): snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_size': 1, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } snapshot.update(kwargs) return db_fakes.FakeModel(snapshot) def fake_access(**kwargs): access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'state': 'active', } access.update(kwargs) return db_fakes.FakeModel(access) class GenericShareDriverTestCase(test.TestCase): """Tests GenericShareDriver.""" def setUp(self): super(GenericShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._execute = mock.Mock(return_value=('', '')) self._helper_cifs = mock.Mock() self._helper_nfs = mock.Mock() self.fake_conf = Configuration(None) self._db = mock.Mock() self._driver = generic.GenericShareDriver(self._db, execute=self._execute, configuration=self.fake_conf) self._driver.service_tenant_id = 'service tenant id' self._driver.service_network_id = 'service network id' self._driver.neutron_api = fake_network.API() self._driver.compute_api = fake_compute.API() self._driver.volume_api = fake_volume.API() self._driver.share_networks_locks = {} self._driver.share_networks_servers = {} self._driver.admin_context = self._context self._driver.vif_driver = mock.Mock() self.stubs.Set(generic, '_ssh_exec', mock.Mock()) self.stubs.Set(generic, 'synchronized', mock.Mock(side_effect= lambda f: f)) self.stubs.Set(generic.os.path, 'exists', mock.Mock(return_value=True)) self._driver._helpers = { 'CIFS': self._helper_cifs, 'NFS': self._helper_nfs, } self.share = fake_share() self.access = fake_access() self.snapshot = fake_snapshot() def test_do_setup(self): self.stubs.Set(neutron, 'API', mock.Mock()) self.stubs.Set(volume, 'API', mock.Mock()) self.stubs.Set(compute, 'API', mock.Mock()) self.stubs.Set(self._driver, '_setup_connectivity_with_service_instances', mock.Mock()) self.stubs.Set(self._driver, '_get_service_network', mock.Mock(return_value='fake network id')) self.stubs.Set(self._driver, '_setup_helpers', mock.Mock()) self._driver.do_setup(self._context) neutron.API.assert_called_once() volume.API.assert_called_once() compute.API.assert_called_once() self._driver._setup_helpers.assert_called_once() self._driver._setup_connectivity_with_service_instances.\ assert_called_once() self.assertEqual(self._driver.service_network_id, 'fake network id') def test_do_setup_exception(self): self.stubs.Set(neutron, 'API', mock.Mock()) neutron.API.return_value = fake_network.API() self.stubs.Set(volume, 'API', mock.Mock()) self.stubs.Set(compute, 'API', mock.Mock()) self.stubs.Set(neutron.API, 'admin_tenant_id', mock.Mock()) neutron.API.admin_tenant_id.side_effect = Exception self.assertRaises(exception.ManilaException, self._driver.do_setup, self._context) def test_get_service_network_net_exists(self): net1 = copy.copy(fake_network.API.network) net2 = copy.copy(fake_network.API.network) net1['name'] = CONF.service_network_name net1['id'] = 'fake service network id' self.stubs.Set(self._driver.neutron_api, 'get_all_tenant_networks', mock.Mock(return_value=[net1, net2])) result = self._driver._get_service_network() self.assertEqual(result, net1['id']) def test_get_service_network_net_does_not_exists(self): net = fake_network.FakeNetwork() self.stubs.Set(self._driver.neutron_api, 'get_all_tenant_networks', mock.Mock(return_value=[])) self.stubs.Set(self._driver.neutron_api, 'network_create', mock.Mock(return_value=net)) result = self._driver._get_service_network() self.assertEqual(result, net['id']) def test_get_service_network_ambiguos(self): net = fake_network.FakeNetwork(name=CONF.service_network_name) self.stubs.Set(self._driver.neutron_api, 'get_all_tenant_networks', mock.Mock(return_value=[net, net])) self.assertRaises(exception.ManilaException, self._driver._get_service_network) def test_setup_helpers(self): CONF.set_default('share_helpers', ['NFS=fakenfs']) self.stubs.Set(generic.importutils, 'import_class', mock.Mock(return_value=self._helper_nfs)) self._driver._setup_helpers() generic.importutils.import_class.assert_has_calls([ mock.call('fakenfs') ]) self._helper_nfs.assert_called_once_with(self._execute, self.fake_conf, self._driver.share_networks_locks) self.assertEqual(len(self._driver._helpers), 1) def test_create_share(self): self._helper_nfs.create_export.return_value = 'fakelocation' methods = ('_get_service_instance', '_allocate_container', '_attach_volume', '_format_device', '_mount_device') for method in methods: self.stubs.Set(self._driver, method, mock.Mock()) result = self._driver.create_share(self._context, self.share) for method in methods: getattr(self._driver, method).assert_called_once() self.assertEqual(result, 'fakelocation') def test_create_share_exception(self): share = fake_share(share_network_id=None) self.assertRaises(exception.ManilaException, self._driver.create_share, self._context, share) def test_format_device(self): volume = {'mountpoint': 'fake_mount_point'} self._driver._format_device('fake_server', volume) generic._ssh_exec.assert_called_once_with('fake_server', ['sudo', 'mkfs.ext4', volume['mountpoint']]) def _test_mount_device(self): volume = {'mountpoint': 'fake_mount_point'} self.stubs.Set(self._driver, '_get_mount_path', mock.Mock(return_value='fake_mount_path')) self._driver._mount_device(self._context, self.share, 'fake_server', volume) generic._ssh_exec.assert_has_calls([ mock.call('fake_server', ['sudo', 'mkdir', '-p', 'fake_mount_path', ';', 'sudo', 'mount', volume['mountpoint'], 'fake_mount_path']), mock.call('fake_server', ['sudo', 'chmod', '777', 'fake_mount_path']) ]) def test_mount_device_exception_01(self): volume = {'mountpoint': 'fake_mount_point'} generic._ssh_exec.side_effect = [ exception.ProcessExecutionError(stderr='already mounted'), None] self.stubs.Set(self._driver, '_get_mount_path', mock.Mock(return_value='fake_mount_path')) self._driver._mount_device(self._context, self.share, 'fake_server', volume) generic._ssh_exec.assert_has_calls([ mock.call('fake_server', ['sudo', 'mkdir', '-p', 'fake_mount_path', ';', 'sudo', 'mount', volume['mountpoint'], 'fake_mount_path']), mock.call('fake_server', ['sudo', 'chmod', '777', 'fake_mount_path']) ]) def test_mount_device_exception_02(self): volume = {'mountpoint': 'fake_mount_point'} generic._ssh_exec.side_effect = exception.ManilaException self.stubs.Set(self._driver, '_get_mount_path', mock.Mock(return_value='fake_mount_path')) self.assertRaises(exception.ManilaException, self._driver._mount_device, self._context, self.share, 'fake_server', volume) def test_umount_device(self): self.stubs.Set(self._driver, '_get_mount_path', mock.Mock(return_value='fake_mount_path')) self._driver._unmount_device(self._context, self.share, 'fake_server') generic._ssh_exec.assert_called_once_with('fake_server', ['sudo', 'umount', 'fake_mount_path', ';', 'sudo', 'rmdir', 'fake_mount_path']) def test_get_mount_path(self): result = self._driver._get_mount_path(self.share) self.assertEqual(result, os.path.join(CONF.share_mount_path, self.share['name'])) def test_attach_volume_not_attached(self): fake_server = fake_compute.FakeServer() availiable_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.stubs.Set(self._driver, '_get_device_path', mock.Mock(return_value='fake_device_path')) self.stubs.Set(self._driver.compute_api, 'instance_volume_attach', mock.Mock()) self.stubs.Set(self._driver.volume_api, 'get', mock.Mock(return_value=attached_volume)) result = self._driver._attach_volume(self._context, self.share, fake_server, availiable_volume) self._driver._get_device_path.assert_called_once_with(self._context, fake_server) self._driver.compute_api.instance_volume_attach.\ assert_called_once_with(self._context, fake_server['id'], availiable_volume['id'], 'fake_device_path') self._driver.volume_api.get.\ assert_called_once_with(self._context, attached_volume['id']) self.assertEqual(result, attached_volume) def test_attach_volume_attached_correct(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='in-use') self.stubs.Set(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) result = self._driver._attach_volume(self._context, self.share, fake_server, attached_volume) self.assertEqual(result, attached_volume) def test_attach_volume_attached_incorrect(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='in-use') anoter_volume = fake_volume.FakeVolume(id='fake_id2', status='in-use') self.stubs.Set(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[anoter_volume])) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, attached_volume) def test_attach_volume_failed_attach(self): fake_server = fake_compute.FakeServer() availiable_volume = fake_volume.FakeVolume() self.stubs.Set(self._driver, '_get_device_path', mock.Mock(return_value='fake_device_path')) self.stubs.Set(self._driver.compute_api, 'instance_volume_attach', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, availiable_volume) def test_attach_volume_error(self): fake_server = fake_compute.FakeServer() availiable_volume = fake_volume.FakeVolume() error_volume = fake_volume.FakeVolume(status='error') self.stubs.Set(self._driver, '_get_device_path', mock.Mock(return_value='fake_device_path')) self.stubs.Set(self._driver.compute_api, 'instance_volume_attach', mock.Mock()) self.stubs.Set(self._driver.volume_api, 'get', mock.Mock(return_value=error_volume)) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, availiable_volume) def test_get_volume(self): volume = fake_volume.FakeVolume( display_name=CONF.volume_name_template % self.share['id']) self.stubs.Set(self._driver.volume_api, 'get_all', mock.Mock(return_value=[volume])) result = self._driver._get_volume(self._context, self.share['id']) self.assertEqual(result, volume) def test_get_volume_none(self): self.stubs.Set(self._driver.volume_api, 'get_all', mock.Mock(return_value=[])) result = self._driver._get_volume(self._context, self.share['id']) self.assertEqual(result, None) def test_get_volume_error(self): volume = fake_volume.FakeVolume( display_name=CONF.volume_name_template % self.share['id']) self.stubs.Set(self._driver.volume_api, 'get_all', mock.Mock(return_value=[volume, volume])) self.assertRaises(exception.ManilaException, self._driver._get_volume, self._context, self.share['id']) def test_get_volume_snapshot(self): volume_snapshot = fake_volume.FakeVolumeSnapshot(display_name= CONF.volume_snapshot_name_template % self.snapshot['id']) self.stubs.Set(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[volume_snapshot])) result = self._driver._get_volume_snapshot(self._context, self.snapshot['id']) self.assertEqual(result, volume_snapshot) def test_get_volume_snapshot_none(self): self.stubs.Set(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[])) result = self._driver._get_volume_snapshot(self._context, self.share['id']) self.assertEqual(result, None) def test_get_volume_snapshot_error(self): volume_snapshot = fake_volume.FakeVolumeSnapshot(display_name= CONF.volume_snapshot_name_template % self.snapshot['id']) self.stubs.Set(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[volume_snapshot, volume_snapshot])) self.assertRaises(exception.ManilaException, self._driver._get_volume_snapshot, self._context, self.share['id']) def test_detach_volume(self): fake_server = fake_compute.FakeServer() availiable_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.stubs.Set(self._driver, '_get_volume', mock.Mock(return_value=attached_volume)) self.stubs.Set(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) self.stubs.Set(self._driver.compute_api, 'instance_volume_detach', mock.Mock()) self.stubs.Set(self._driver.volume_api, 'get', mock.Mock(return_value=availiable_volume)) self._driver._detach_volume(self._context, self.share, fake_server) self._driver.compute_api.instance_volume_detach.\ assert_called_once_with(self._context, fake_server['id'], availiable_volume['id']) self._driver.volume_api.get.\ assert_called_once_with(self._context, availiable_volume['id']) def test_detach_volume_detached(self): fake_server = fake_compute.FakeServer() availiable_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.stubs.Set(self._driver, '_get_volume', mock.Mock(return_value=attached_volume)) self.stubs.Set(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[])) self.stubs.Set(self._driver.volume_api, 'get', mock.Mock(return_value=availiable_volume)) self.stubs.Set(self._driver.compute_api, 'instance_volume_detach', mock.Mock()) self._driver._detach_volume(self._context, self.share, fake_server) self.assertFalse(self._driver.volume_api.get.called) self.assertFalse(self._driver.compute_api. instance_volume_detach.called) def test_get_device_path_01(self): fake_server = fake_compute.FakeServer() vol_list = [[], [fake_volume.FakeVolume(device='/dev/vdc')], [fake_volume.FakeVolume(device='/dev/vdd')]] self.stubs.Set(self._driver.compute_api, 'instance_volumes_list', mock.Mock(side_effect=lambda x, y: vol_list.pop())) result = self._driver._get_device_path(self._context, fake_server) self.assertEqual(result, '/dev/vdb') def test_get_device_path_02(self): fake_server = fake_compute.FakeServer() vol_list = [[fake_volume.FakeVolume(device='/dev/vdb')], [fake_volume.FakeVolume(device='/dev/vdb'), fake_volume.FakeVolume(device='/dev/vdd')]] self.stubs.Set(self._driver.compute_api, 'instance_volumes_list', mock.Mock(side_effect=lambda x, y: vol_list.pop())) result = self._driver._get_device_path(self._context, fake_server) self.assertEqual(result, '/dev/vdc') def test_get_service_instance_name(self): result = self._driver._get_service_instance_name(self.share) self.assertEqual(result, CONF.service_instance_name_template % self.share['share_network_id']) def test_get_server_ip(self): fake_server = fake_compute.FakeServer(networks= {CONF.service_network_name: '10.254.0.1'}) result = self._driver._get_server_ip(fake_server) self.assertEqual(result, fake_server['networks'][CONF.service_network_name][0]) def test_get_server_ip_exception(self): fake_server = fake_compute.FakeServer(networks={}) self.assertRaises(exception.ManilaException, self._driver._get_server_ip, fake_server) def test_get_service_instance(self): fake_server = fake_compute.FakeServer() self.stubs.Set(self._driver, '_ensure_or_delete_server', mock.Mock(return_value=True)) self.stubs.Set(self._driver, '_get_server_ip', mock.Mock(return_value='fake_ip')) self.stubs.Set(self._driver.compute_api, 'server_list', mock.Mock(return_value=[])) self.stubs.Set(self._driver, '_create_service_instance', mock.Mock(return_value=fake_server)) self.stubs.Set(self._driver, '_get_ssh_pool', mock.Mock(return_value=mock.Mock())) result = self._driver._get_service_instance(self._context, self.share) self.assertFalse(self._driver._ensure_or_delete_server.called) self._driver._get_ssh_pool.assert_called_once_with(fake_server) self._driver.compute_api.server_list.assert_called_once() self._driver._get_server_ip.assert_called_once() self._driver._create_service_instance.assert_called_once() self.assertEqual(result, fake_server) def test_get_service_instance_existed_in_memory(self): fake_server = fake_compute.FakeServer() self._driver.share_networks_servers = {self.share['share_network_id']: fake_server} self.stubs.Set(self._driver, '_ensure_or_delete_server', mock.Mock(return_value=True)) self.stubs.Set(self._driver.compute_api, 'server_list', mock.Mock(return_value=[fake_server])) self.stubs.Set(self._driver, '_get_ssh_pool', mock.Mock(return_value=mock.Mock())) self.stubs.Set(self._driver, '_create_service_instance', mock.Mock(return_value=fake_server)) result = self._driver._get_service_instance(self._context, self.share) self._driver._ensure_or_delete_server.assert_called_once() self.assertFalse(self._driver._get_ssh_pool.called) self.assertFalse(self._driver.compute_api.server_list.called) self.assertFalse(self._driver._create_service_instance.called) self.assertEqual(result, fake_server) def test_get_service_instance_existed_in_memory_non_active(self): old_fake_server = fake_compute.FakeServer(status='ERROR') new_fake_server = fake_compute.FakeServer() self._driver.share_networks_servers = {self.share['share_network_id']: old_fake_server} self.stubs.Set(self._driver, '_ensure_or_delete_server', mock.Mock(return_value=False)) self.stubs.Set(self._driver, '_get_server_ip', mock.Mock(return_value='fake_ip')) self.stubs.Set(self._driver.compute_api, 'server_list', mock.Mock(return_value=[])) self.stubs.Set(self._driver, '_create_service_instance', mock.Mock(return_value=new_fake_server)) self.stubs.Set(self._driver, '_get_ssh_pool', mock.Mock(return_value=mock.Mock())) result = self._driver._get_service_instance(self._context, self.share) self._driver._ensure_or_delete_server.assert_has_calls( [mock.call(self._context, old_fake_server, update=True)]) self._driver._get_ssh_pool.assert_called_once_with(new_fake_server) self._driver.compute_api.server_list.assert_called_once() self._driver._get_server_ip.assert_called_once() self._driver._create_service_instance.assert_called_once() self.assertEqual(result, new_fake_server) def test_get_service_instance_existed(self): fake_server = fake_compute.FakeServer() self.stubs.Set(self._driver, '_ensure_or_delete_server', mock.Mock(return_value=True)) self.stubs.Set(self._driver, '_get_server_ip', mock.Mock(return_value='fake_ip')) self.stubs.Set(self._driver.compute_api, 'server_list', mock.Mock(return_value=[fake_server])) self.stubs.Set(self._driver, '_create_service_instance', mock.Mock()) self.stubs.Set(self._driver, '_get_ssh_pool', mock.Mock(return_value=mock.Mock())) result = self._driver._get_service_instance(self._context, self.share) self._driver._ensure_or_delete_server.assert_called_once() self._driver._get_ssh_pool.assert_called_once_with(fake_server) self._driver.compute_api.server_list.assert_called_once() self._driver._get_server_ip.assert_called_once() self.assertFalse(self._driver._create_service_instance.called) self.assertEqual(result, fake_server) def test_ensure_or_delete_server(self): fake_server = fake_compute.FakeServer() self.stubs.Set(self._driver, '_check_server_availability', mock.Mock(return_value=True)) self.stubs.Set(self._driver.compute_api, 'server_get', mock.Mock(return_value=fake_server)) result = self._driver._ensure_or_delete_server(self._context, fake_server, update=True) self._driver.compute_api.server_get.\ assert_called_once_with(self._context, fake_server['id']) self._driver._check_server_availability.\ assert_called_once_with(fake_server) self.assertTrue(result) def test_ensure_or_delete_server_not_exists(self): fake_server = fake_compute.FakeServer() self.stubs.Set(self._driver, '_check_server_availability', mock.Mock(return_value=True)) self.stubs.Set(self._driver.compute_api, 'server_get', mock.Mock(side_effect=exception.InstanceNotFound( instance_id=fake_server['id']))) result = self._driver._ensure_or_delete_server(self._context, fake_server, update=True) self._driver.compute_api.server_get.\ assert_called_once_with(self._context, fake_server['id']) self.assertFalse(self._driver._check_server_availability.called) self.assertFalse(result) def test_ensure_or_delete_server_exception(self): fake_server = fake_compute.FakeServer() self.stubs.Set(self._driver, '_check_server_availability', mock.Mock(return_value=True)) self.stubs.Set(self._driver.compute_api, 'server_get', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self._driver._ensure_or_delete_server, self._context, fake_server, update=True) self._driver.compute_api.server_get.\ assert_called_once_with(self._context, fake_server['id']) self.assertFalse(self._driver._check_server_availability.called) def test_ensure_or_delete_server_non_active(self): fake_server = fake_compute.FakeServer(status='ERROR') self.stubs.Set(self._driver, '_delete_server', mock.Mock()) self.stubs.Set(self._driver, '_check_server_availability', mock.Mock(return_value=True)) result = self._driver._ensure_or_delete_server(self._context, fake_server) self.assertFalse(self._driver._check_server_availability.called) self._driver._delete_server.assert_called_once_with(self._context, fake_server) self.assertFalse(result) def test_get_key_create_new(self): fake_keypair = fake_compute.FakeKeypair(name= CONF.manila_service_keypair_name) self.stubs.Set(self._driver.compute_api, 'keypair_list', mock.Mock(return_value=[])) self.stubs.Set(self._driver.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) result = self._driver._get_key(self._context) self.assertEqual(result, fake_keypair.name) self._driver.compute_api.keypair_list.assert_called_once() self._driver.compute_api.keypair_import.assert_called_once() def test_get_key_exists(self): fake_keypair = fake_compute.FakeKeypair( name=CONF.manila_service_keypair_name, public_key='fake_public_key') self.stubs.Set(self._driver.compute_api, 'keypair_list', mock.Mock(return_value=[fake_keypair])) self.stubs.Set(self._driver.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) self.stubs.Set(self._driver, '_execute', mock.Mock(return_value=('fake_public_key', ''))) result = self._driver._get_key(self._context) self._driver.compute_api.keypair_list.assert_called_once() self.assertFalse(self._driver.compute_api.keypair_import.called) self.assertEqual(result, fake_keypair.name) def test_get_key_exists_recreate(self): fake_keypair = fake_compute.FakeKeypair( name=CONF.manila_service_keypair_name, public_key='fake_public_key1') self.stubs.Set(self._driver.compute_api, 'keypair_list', mock.Mock(return_value=[fake_keypair])) self.stubs.Set(self._driver.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) self.stubs.Set(self._driver.compute_api, 'keypair_delete', mock.Mock()) self.stubs.Set(self._driver, '_execute', mock.Mock(return_value=('fake_public_key2', ''))) result = self._driver._get_key(self._context) self._driver.compute_api.keypair_list.assert_called_once() self._driver.compute_api.keypair_delete.assert_called_once() self._driver.compute_api.keypair_import.\ assert_called_once_with(self._context, fake_keypair.name, 'fake_public_key2') self.assertEqual(result, fake_keypair.name) def test_get_service_image(self): fake_image1 = fake_compute.FakeImage(name=CONF.service_image_name) fake_image2 = fake_compute.FakeImage(name='another-image') self.stubs.Set(self._driver.compute_api, 'image_list', mock.Mock(return_value=[fake_image1, fake_image2])) result = self._driver._get_service_image(self._context) self.assertEqual(result, fake_image1.id) def test_get_service_image_not_found(self): self.stubs.Set(self._driver.compute_api, 'image_list', mock.Mock(return_value=[])) self.assertRaises(exception.ManilaException, self._driver._get_service_image, self._context) def test_get_service_image_ambiguous(self): fake_image = fake_compute.FakeImage(name=CONF.service_image_name) self.stubs.Set(self._driver.compute_api, 'image_list', mock.Mock(return_value=[fake_image, fake_image])) self.assertRaises(exception.ManilaException, self._driver._get_service_image, self._context) def test_create_service_instance(self): fake_server = fake_compute.FakeServer() fake_port = fake_network.FakePort() self.stubs.Set(self._driver, '_get_service_image', mock.Mock(return_value='fake_image_id')) self.stubs.Set(self._driver, '_get_key', mock.Mock(return_value='fake_key_name')) self.stubs.Set(self._driver, '_setup_network_for_instance', mock.Mock(return_value=fake_port)) self.stubs.Set(self._driver, '_setup_connectivity_with_service_instances', mock.Mock()) self.stubs.Set(self._driver.compute_api, 'server_create', mock.Mock(return_value=fake_server)) self.stubs.Set(self._driver, '_get_server_ip', mock.Mock(return_value='fake_ip')) self.stubs.Set(generic.socket, 'socket', mock.Mock()) result = self._driver._create_service_instance(self._context, 'instance_name', self.share, None) self._driver._get_service_image.assert_called_once() self._driver._get_key.assert_called_once() self._driver._setup_network_for_instance.assert_called_once() self._driver._setup_connectivity_with_service_instances.\ assert_called_once() self._driver.compute_api.server_create.assert_called_once_with( self._context, 'instance_name', 'fake_image_id', CONF.service_instance_flavor_id, 'fake_key_name', None, None, nics=[{'port-id': fake_port['id']}]) generic.socket.socket.assert_called_once() self.assertEqual(result, fake_server) def test_create_service_instance_error(self): fake_server = fake_compute.FakeServer(status='ERROR') fake_port = fake_network.FakePort() self.stubs.Set(self._driver, '_get_service_image', mock.Mock(return_value='fake_image_id')) self.stubs.Set(self._driver, '_get_key', mock.Mock(return_value='fake_key_name')) self.stubs.Set(self._driver, '_setup_network_for_instance', mock.Mock(return_value=fake_port)) self.stubs.Set(self._driver, '_setup_connectivity_with_service_instances', mock.Mock()) self.stubs.Set(self._driver.compute_api, 'server_create', mock.Mock(return_value=fake_server)) self.stubs.Set(self._driver.compute_api, 'server_get', mock.Mock(return_value=fake_server)) self.stubs.Set(generic.socket, 'socket', mock.Mock()) self.assertRaises(exception.ManilaException, self._driver._create_service_instance, self._context, 'instance_name', self.share, None) self._driver.compute_api.server_create.assert_called_once() self.assertFalse(self._driver.compute_api.server_get.called) self.assertFalse(generic.socket.socket.called) def test_create_service_instance_failed_setup_connectivity(self): fake_server = fake_compute.FakeServer(status='ERROR') fake_port = fake_network.FakePort() self.stubs.Set(self._driver, '_get_service_image', mock.Mock(return_value='fake_image_id')) self.stubs.Set(self._driver, '_get_key', mock.Mock(return_value='fake_key_name')) self.stubs.Set(self._driver, '_setup_network_for_instance', mock.Mock(return_value=fake_port)) self.stubs.Set(self._driver, '_setup_connectivity_with_service_instances', mock.Mock(side_effect=exception.ManilaException)) self.stubs.Set(self._driver.neutron_api, 'delete_port', mock.Mock()) self.stubs.Set(self._driver.compute_api, 'server_create', mock.Mock(return_value=fake_server)) self.stubs.Set(self._driver.compute_api, 'server_get', mock.Mock(return_value=fake_server)) self.stubs.Set(generic.socket, 'socket', mock.Mock()) self.assertRaises(exception.ManilaException, self._driver._create_service_instance, self._context, 'instance_name', self.share, None) self._driver.neutron_api.delete_port.\ assert_called_once_with(fake_port['id']) self.assertFalse(self._driver.compute_api.server_create.called) self.assertFalse(self._driver.compute_api.server_get.called) self.assertFalse(generic.socket.socket.called) def test_create_service_instance_no_key_and_password(self): self.stubs.Set(self._driver, '_get_service_image', mock.Mock(return_value='fake_image_id')) self.stubs.Set(self._driver, '_get_key', mock.Mock(return_value=None)) self.assertRaises(exception.ManilaException, self._driver._create_service_instance, self._context, 'instance_name', self.share, None) def test_setup_network_for_instance(self): fake_service_net = fake_network.FakeNetwork(subnets=[]) fake_service_subnet = fake_network.\ FakeSubnet(name=self.share['share_network_id']) fake_router = fake_network.FakeRouter() fake_port = fake_network.FakePort() self.stubs.Set(self._driver.neutron_api, 'get_network', mock.Mock(return_value=fake_service_net)) self.stubs.Set(self._driver.neutron_api, 'subnet_create', mock.Mock(return_value=fake_service_subnet)) self.stubs.Set(self._driver.db, 'share_network_get', mock.Mock(return_value='fake_share_network')) self.stubs.Set(self._driver, '_get_private_router', mock.Mock(return_value=fake_router)) self.stubs.Set(self._driver.neutron_api, 'router_add_interface', mock.Mock()) self.stubs.Set(self._driver.neutron_api, 'create_port', mock.Mock(return_value=fake_port)) self.stubs.Set(self._driver, '_get_cidr_for_subnet', mock.Mock(return_value='fake_cidr')) result = self._driver._setup_network_for_instance(self._context, self.share, None) self._driver.neutron_api.get_network.\ assert_called_once_with(self._driver.service_network_id) self._driver._get_private_router.\ assert_called_once_with('fake_share_network') self._driver.neutron_api.router_add_interface.\ assert_called_once_with('fake_router_id', 'fake_subnet_id') self._driver.neutron_api.subnet_create.assert_called_once_with( self._driver.service_tenant_id, self._driver.service_network_id, self.share['share_network_id'], 'fake_cidr') self._driver.neutron_api.create_port.assert_called_once_with( self._driver.service_tenant_id, self._driver.service_network_id, subnet_id='fake_subnet_id', fixed_ip=None, device_owner='manila') self._driver._get_cidr_for_subnet.assert_called_once_with([]) self.assertEqual(result, fake_port) def test_get_private_router(self): fake_net = fake_network.FakeNetwork() fake_subnet = fake_network.FakeSubnet(gateway_ip='fake_ip') fake_port = fake_network.FakePort(fixed_ips=[ {'subnet_id': fake_subnet['id'], 'ip_address': fake_subnet['gateway_ip']}], device_id='fake_router_id') fake_router = fake_network.FakeRouter(id='fake_router_id') self.stubs.Set(self._driver.neutron_api, 'get_subnet', mock.Mock(return_value=fake_subnet)) self.stubs.Set(self._driver.neutron_api, 'list_ports', mock.Mock(return_value=[fake_port])) self.stubs.Set(self._driver.neutron_api, 'show_router', mock.Mock(return_value=fake_router)) result = self._driver._get_private_router( {'neutron_subnet_id': fake_subnet['id'], 'neutron_net_id': fake_net['id']}) self._driver.neutron_api.get_subnet.\ assert_called_once_with(fake_subnet['id']) self._driver.neutron_api.list_ports.\ assert_called_once_with(network_id=fake_net['id']) self._driver.neutron_api.show_router.\ assert_called_once_with(fake_router['id']) self.assertEqual(result, fake_router) def test_get_private_router_exception(self): fake_net = fake_network.FakeNetwork() fake_subnet = fake_network.FakeSubnet(gateway_ip='fake_ip') self.stubs.Set(self._driver.neutron_api, 'get_subnet', mock.Mock(return_value=fake_subnet)) self.stubs.Set(self._driver.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.assertRaises(exception.ManilaException, self._driver._get_private_router, {'neutron_subnet_id': fake_subnet['id'], 'neutron_net_id': fake_net['id']}) def test_setup_connectivity_with_service_instances(self): fake_subnet = fake_network.FakeSubnet(cidr='10.254.0.1/29') fake_port = fake_network.FakePort(fixed_ips=[ {'subnet_id': fake_subnet['id'], 'ip_address': '10.254.0.2'}], mac_address='fake_mac_address') self.stubs.Set(self._driver, '_setup_service_port', mock.Mock(return_value=fake_port)) self.stubs.Set(self._driver.vif_driver, 'get_device_name', mock.Mock(return_value='fake_interface_name')) self.stubs.Set(self._driver.neutron_api, 'get_subnet', mock.Mock(return_value=fake_subnet)) self.stubs.Set(self._driver, '_clean_garbage', mock.Mock()) self.stubs.Set(self._driver.vif_driver, 'plug', mock.Mock()) device_mock = mock.Mock() self.stubs.Set(generic.ip_lib, 'IPDevice', mock.Mock(return_value=device_mock)) self._driver._setup_connectivity_with_service_instances() self._driver._setup_service_port.assert_called_once() self._driver.vif_driver.get_device_name.\ assert_called_once_with(fake_port) self._driver.vif_driver.plug.assert_called_once_with(fake_port['id'], 'fake_interface_name', fake_port['mac_address']) self._driver.neutron_api.get_subnet.\ assert_called_once_with(fake_subnet['id']) self._driver.vif_driver.init_l3.assert_called_once() generic.ip_lib.IPDevice.assert_called_once() device_mock.route.pullup_route.assert_called_once() self._driver._clean_garbage.assert_called_once_with(device_mock) def test_setup_service_port(self): fake_service_port = fake_network.FakePort(device_id='manila-share') fake_service_net = fake_network.FakeNetwork(subnets=[]) self.stubs.Set(self._driver.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.stubs.Set(self._driver.db, 'service_get_all_by_topic', mock.Mock(return_value=[{'host': 'fake_host'}])) self.stubs.Set(self._driver.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.stubs.Set(self._driver.neutron_api, 'get_network', mock.Mock(return_value=fake_service_net)) self.stubs.Set(self._driver.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = self._driver._setup_service_port() self._driver.neutron_api.list_ports.\ assert_called_once_with(device_id='manila-share') self._driver.db.service_get_all_by_topic.assert_called_once() self._driver.neutron_api.create_port.assert_called_once_with( self._driver.service_tenant_id, self._driver.service_network_id, device_id='manila-share', device_owner='manila:generic_driver', host_id='fake_host' ) self._driver.neutron_api.get_network.assert_called_once() self.assertFalse(self._driver.neutron_api.update_port_fixed_ips.called) self.assertEqual(result, fake_service_port) def test_setup_service_port_ambigious_ports(self): fake_service_port = fake_network.FakePort(device_id='manila-share') self.stubs.Set(self._driver.neutron_api, 'list_ports', mock.Mock(return_value=[fake_service_port, fake_service_port])) self.assertRaises(exception.ManilaException, self._driver._setup_service_port) def test_setup_service_port_exists(self): fake_service_port = fake_network.FakePort(device_id='manila-share') fake_service_net = fake_network.FakeNetwork(subnets=[]) self.stubs.Set(self._driver.neutron_api, 'list_ports', mock.Mock(return_value=[fake_service_port])) self.stubs.Set(self._driver.db, 'service_get_all_by_topic', mock.Mock(return_value=[{'host': 'fake_host'}])) self.stubs.Set(self._driver.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.stubs.Set(self._driver.neutron_api, 'get_network', mock.Mock(return_value=fake_service_net)) self.stubs.Set(self._driver.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = self._driver._setup_service_port() self._driver.neutron_api.list_ports.\ assert_called_once_with(device_id='manila-share') self.assertFalse(self._driver.db.service_get_all_by_topic.called) self.assertFalse(self._driver.neutron_api.create_port.called) self._driver.neutron_api.get_network.assert_called_once() self.assertFalse(self._driver.neutron_api.update_port_fixed_ips.called) self.assertEqual(result, fake_service_port) def test_get_cidr_for_subnet(self): serv_cidr = generic.netaddr.IPNetwork(CONF.service_network_cidr) cidrs = serv_cidr.subnet(29) cidr1 = str(cidrs.next()) cidr2 = str(cidrs.next()) result = self._driver._get_cidr_for_subnet([]) self.assertEqual(result, cidr1) fake_subnet = fake_network.FakeSubnet(cidr=cidr1) result = self._driver._get_cidr_for_subnet([fake_subnet]) self.assertEqual(result, cidr2) def test_allocate_container(self): fake_vol = fake_volume.FakeVolume() self.stubs.Set(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) result = self._driver._allocate_container(self._context, self.share) self.assertEqual(result, fake_vol) self._driver.volume_api.create.assert_called_once_with(self._context, self.share['size'], CONF.volume_name_template % self.share['id'], '', snapshot=None) def test_allocate_container_with_snaphot(self): fake_vol = fake_volume.FakeVolume() fake_vol_snap = fake_volume.FakeVolumeSnapshot() self.stubs.Set(self._driver, '_get_volume_snapshot', mock.Mock(return_value=fake_vol_snap)) self.stubs.Set(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) result = self._driver._allocate_container(self._context, self.share, self.snapshot) self.assertEqual(result, fake_vol) self._driver.volume_api.create.assert_called_once_with(self._context, self.share['size'], CONF.volume_name_template % self.share['id'], '', snapshot=fake_vol_snap) def test_allocate_container_error(self): fake_vol = fake_volume.FakeVolume(status='error') self.stubs.Set(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) self.assertRaises(exception.ManilaException, self._driver._allocate_container, self._context, self.share) def test_deallocate_container(self): fake_vol = fake_volume.FakeVolume() self.stubs.Set(self._driver, '_get_volume', mock.Mock(return_value=fake_vol)) self.stubs.Set(self._driver.volume_api, 'delete', mock.Mock()) self.stubs.Set(self._driver.volume_api, 'get', mock.Mock( side_effect=exception.VolumeNotFound(volume_id=fake_vol['id']))) self._driver._deallocate_container(self._context, self.share) self._driver._get_volume.assert_called_once() self._driver.volume_api.delete.assert_called_once() self._driver.volume_api.get.assert_called_once() def test_create_share_from_snapshot(self): self._helper_nfs.create_export.return_value = 'fakelocation' methods = ('_get_service_instance', '_allocate_container', '_attach_volume', '_mount_device') for method in methods: self.stubs.Set(self._driver, method, mock.Mock()) result = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot) for method in methods: getattr(self._driver, method).assert_called_once() self.assertEqual(result, 'fakelocation') def test_delete_share(self): fake_server = fake_compute.FakeServer() self.stubs.Set(self._driver, '_get_service_instance', mock.Mock(return_value=fake_server)) self.stubs.Set(self._driver, '_unmount_device', mock.Mock()) self.stubs.Set(self._driver, '_detach_volume', mock.Mock()) self.stubs.Set(self._driver, '_deallocate_container', mock.Mock()) self._driver.delete_share(self._context, self.share) self._driver._get_service_instance.assert_called_once() self._driver._unmount_device.assert_called_once() self._driver._detach_volume.assert_called_once() self._driver._deallocate_container.assert_called_once() def test_create_snapshot(self): fake_vol = fake_volume.FakeVolume() fake_vol_snap = fake_volume.FakeVolumeSnapshot() self.stubs.Set(self._driver, '_get_volume', mock.Mock(return_value=fake_vol)) self.stubs.Set(self._driver.volume_api, 'create_snapshot_force', mock.Mock(return_value=fake_vol_snap)) self._driver.create_snapshot(self._context, self.snapshot) self._driver._get_volume.assert_called_once() self._driver.volume_api.create_snapshot_force.assert_called_once_with( self._context, fake_vol['id'], CONF.volume_snapshot_name_template % self.snapshot['id'], '' ) def test_delete_snapshot(self): fake_vol_snap = fake_volume.FakeVolumeSnapshot() self.stubs.Set(self._driver, '_get_volume_snapshot', mock.Mock(return_value=fake_vol_snap)) self.stubs.Set(self._driver.volume_api, 'delete_snapshot', mock.Mock()) self.stubs.Set(self._driver.volume_api, 'get_snapshot', mock.Mock(side_effect=exception.VolumeSnapshotNotFound( snapshot_id=fake_vol_snap['id']))) self._driver.delete_snapshot(self._context, fake_vol_snap) self._driver._get_volume_snapshot.assert_called_once() self._driver.volume_api.delete_snapshot.assert_called_once() self._driver.volume_api.get_snapshot.assert_called_once() def test_ensure_share(self): self._helper_nfs.create_export.return_value = 'fakelocation' methods = ('_get_service_instance', '_get_volume', '_attach_volume', '_mount_device') for method in methods: self.stubs.Set(self._driver, method, mock.Mock()) self._driver.ensure_share(self._context, self.share) for method in methods: getattr(self._driver, method).assert_called_once() def test_allow_access(self): fake_server = fake_compute.FakeServer() access = {'access_type': 'ip', 'access_to': 'fake_dest'} self.stubs.Set(self._driver, '_get_service_instance', mock.Mock(return_value=fake_server)) self._driver.allow_access(self._context, self.share, access) self._driver._get_service_instance.assert_called_once() self._driver._helpers[self.share['share_proto']].\ allow_access.assert_called_once_with(fake_server, self.share['name'], access['access_type'], access['access_to']) def test_deny_access(self): fake_server = fake_compute.FakeServer() access = {'access_type': 'ip', 'access_to': 'fake_dest'} self.stubs.Set(self._driver, '_get_service_instance', mock.Mock(return_value=fake_server)) self._driver.deny_access(self._context, self.share, access) self._driver._get_service_instance.assert_called_once() self._driver._helpers[self.share['share_proto']].\ deny_access.assert_called_once_with(fake_server, self.share['name'], access['access_type'], access['access_to']) class NFSHelperTestCase(test.TestCase): """Test case for NFS helper of generic driver.""" def setUp(self): super(NFSHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self.stubs) self.fake_conf = Configuration(None) self.stubs.Set(generic, '_ssh_exec', mock.Mock(return_value=('', ''))) self._execute = mock.Mock(return_value=('', '')) self._helper = generic.NFSHelper(self._execute, self.fake_conf, {}) def test_create_export(self): fake_server = fake_compute.FakeServer(ip='10.254.0.3') ret = self._helper.create_export(fake_server, 'volume-00001') expected_location = ':'.join([fake_server['ip'], os.path.join(CONF.share_mount_path, 'volume-00001')]) self.assertEqual(ret, expected_location) def test_allow_access(self): fake_server = fake_compute.FakeServer(ip='10.254.0.3') self._helper.allow_access(fake_server, 'volume-00001', 'ip', '10.0.0.2') local_path = os.path.join(CONF.share_mount_path, 'volume-00001') generic._ssh_exec.assert_has_calls([ mock.call(fake_server, ['sudo', 'exportfs']), mock.call(fake_server, ['sudo', 'exportfs', '-o', 'rw,no_subtree_check', ':'.join(['10.0.0.2', local_path])]) ]) def test_allow_access_no_ip(self): self.assertRaises(exception.InvalidShareAccess, self._helper.allow_access, 'fake_server', 'share0', 'fake', 'fakerule') def test_deny_access(self): fake_server = fake_compute.FakeServer(ip='10.254.0.3') local_path = os.path.join(CONF.share_mount_path, 'volume-00001') self._helper.deny_access(fake_server, 'volume-00001', 'ip', '10.0.0.2') export_string = ':'.join(['10.0.0.2', local_path]) expected_exec = ['sudo', 'exportfs', '-u', export_string] generic._ssh_exec.assert_called_once_with(fake_server, expected_exec) class CIFSHelperTestCase(test.TestCase): """Test case for CIFS helper of generic driver.""" def setUp(self): super(CIFSHelperTestCase, self).setUp() self.fake_conf = Configuration(None) self.stubs.Set(generic, '_ssh_exec', mock.Mock(return_value=('', ''))) self._execute = mock.Mock(return_value=('', '')) self._helper = generic.CIFSHelper(self._execute, self.fake_conf, {}) def test_create_export(self): fake_server = fake_compute.FakeServer(ip='10.254.0.3', share_network_id='fake_share_network_id') self.stubs.Set(self._helper, '_update_config', mock.Mock()) self.stubs.Set(self._helper, '_write_remote_config', mock.Mock()) self.stubs.Set(self._helper, '_restart_service', mock.Mock()) self.stubs.Set(self._helper, '_get_local_config', mock.Mock()) self.stubs.Set(generic.ConfigParser, 'ConfigParser', mock.Mock()) ret = self._helper.create_export(fake_server, 'volume-00001', recreate=True) self._helper._get_local_config.\ assert_called_once_with(fake_server['share_network_id']) self._helper._update_config.assert_called_once() self._helper._write_remote_config.assert_called_once() self._helper._restart_service.assert_called_once() expected_location = '//%s/%s' % (fake_server['ip'], 'volume-00001') self.assertEqual(ret, expected_location) def test_remove_export(self): fake_server = fake_compute.FakeServer(ip='10.254.0.3', share_network_id='fake_share_network_id') self.stubs.Set(generic.ConfigParser, 'ConfigParser', mock.Mock()) self.stubs.Set(self._helper, '_get_local_config', mock.Mock()) self.stubs.Set(self._helper, '_update_config', mock.Mock()) self.stubs.Set(self._helper, '_write_remote_config', mock.Mock()) self._helper.remove_export(fake_server, 'volume-00001') self._helper._get_local_config.assert_called_once() self._helper._update_config.assert_called_once() self._helper._write_remote_config.assert_called_once() generic._ssh_exec.assert_called_once_with(fake_server, ['sudo', 'smbcontrol', 'all', 'close-share', 'volume-00001']) def test_allow_access(self): class FakeParser(object): def read(self, *args, **kwargs): pass def get(self, *args, **kwargs): return '' def set(self, *args, **kwargs): pass fake_server = fake_compute.FakeServer(ip='10.254.0.3', share_network_id='fake_share_network_id') self.stubs.Set(generic.ConfigParser, 'ConfigParser', FakeParser) self.stubs.Set(self._helper, '_get_local_config', mock.Mock()) self.stubs.Set(self._helper, '_update_config', mock.Mock()) self.stubs.Set(self._helper, '_write_remote_config', mock.Mock()) self.stubs.Set(self._helper, '_restart_service', mock.Mock()) self._helper.allow_access(fake_server, 'volume-00001', 'ip', '10.0.0.2') self._helper._get_local_config.assert_called_once() self._helper._update_config.assert_called_once() self._helper._write_remote_config.assert_called_once() self._helper._restart_service.assert_called_once() def test_deny_access(self): fake_server = fake_compute.FakeServer(ip='10.254.0.3', share_network_id='fake_share_network_id') self.stubs.Set(generic.ConfigParser, 'ConfigParser', mock.Mock()) self.stubs.Set(self._helper, '_get_local_config', mock.Mock()) self.stubs.Set(self._helper, '_update_config', mock.Mock()) self.stubs.Set(self._helper, '_write_remote_config', mock.Mock()) self.stubs.Set(self._helper, '_restart_service', mock.Mock()) self._helper.deny_access(fake_server, 'volume-00001', 'ip', '10.0.0.2') self._helper._get_local_config.assert_called_once() self._helper._update_config.assert_called_once() self._helper._write_remote_config.assert_called_once() self._helper._restart_service.assert_called_once() manila-2013.2.dev175.gbf1a399/manila/tests/test_conf.py0000664000175000017500000000575112301410454022461 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from manila import test CONF = cfg.CONF CONF.register_opt(cfg.StrOpt('conf_unittest', default='foo', help='for testing purposes only')) class ConfigTestCase(test.TestCase): def setUp(self): super(ConfigTestCase, self).setUp() def test_declare(self): self.assertNotIn('answer', CONF) CONF.import_opt('answer', 'manila.tests.declare_conf') self.assertIn('answer', CONF) self.assertEqual(CONF.answer, 42) # Make sure we don't overwrite anything CONF.set_override('answer', 256) self.assertEqual(CONF.answer, 256) CONF.import_opt('answer', 'manila.tests.declare_conf') self.assertEqual(CONF.answer, 256) def test_runtime_and_unknown_flags(self): self.assertNotIn('runtime_answer', CONF) import manila.tests.runtime_conf self.assertIn('runtime_answer', CONF) self.assertEqual(CONF.runtime_answer, 54) def test_long_vs_short_flags(self): CONF.clear() CONF.register_cli_opt(cfg.StrOpt('duplicate_answer_long', default='val', help='desc')) CONF.register_cli_opt(cfg.IntOpt('duplicate_answer', default=50, help='desc')) argv = ['--duplicate_answer=60'] CONF(argv, default_config_files=[]) self.assertEqual(CONF.duplicate_answer, 60) self.assertEqual(CONF.duplicate_answer_long, 'val') def test_flag_leak_left(self): self.assertEqual(CONF.conf_unittest, 'foo') self.flags(conf_unittest='bar') self.assertEqual(CONF.conf_unittest, 'bar') def test_flag_leak_right(self): self.assertEqual(CONF.conf_unittest, 'foo') self.flags(conf_unittest='bar') self.assertEqual(CONF.conf_unittest, 'bar') def test_flag_overrides(self): self.assertEqual(CONF.conf_unittest, 'foo') self.flags(conf_unittest='bar') self.assertEqual(CONF.conf_unittest, 'bar') CONF.reset() self.assertEqual(CONF.conf_unittest, 'foo') manila-2013.2.dev175.gbf1a399/manila/tests/test_share_driver.py0000664000175000017500000000311112301410454024175 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Share driver module.""" import time from manila import exception from manila.share.configuration import Configuration from manila.share import driver from manila import test from manila import utils def fake_execute_with_raise(*cmd, **kwargs): raise exception.ProcessExecutionError def fake_sleep(duration): pass class ShareDriverTestCase(test.TestCase): def setUp(self): super(ShareDriverTestCase, self).setUp() self.utils = utils self.stubs.Set(self.utils, 'execute', fake_execute_with_raise) self.time = time self.stubs.Set(self.time, 'sleep', fake_sleep) def tearDown(self): super(ShareDriverTestCase, self).tearDown() def test__try_execute(self): execute_mixin = driver.ExecuteMixin(configuration=Configuration(None)) self.assertRaises(exception.ProcessExecutionError, execute_mixin._try_execute) manila-2013.2.dev175.gbf1a399/manila/tests/runtime_conf.py0000664000175000017500000000160512301410454023157 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag')) manila-2013.2.dev175.gbf1a399/manila/tests/integrated/0000775000175000017500000000000012301410516022240 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/integrated/api/0000775000175000017500000000000012301410516023011 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/integrated/api/__init__.py0000664000175000017500000000141212301410454025121 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`api` -- OpenStack API client, for testing rather than production ================================= """ manila-2013.2.dev175.gbf1a399/manila/tests/integrated/api/client.py0000664000175000017500000001631112301410454024644 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import httplib import urlparse from manila.openstack.common import jsonutils from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) class OpenStackApiException(Exception): def __init__(self, message=None, response=None): self.response = response if not message: message = 'Unspecified error' if response: _status = response.status _body = response.read() message = _('%(message)s\nStatus Code: %(_status)s\n' 'Body: %(_body)s') % locals() super(OpenStackApiException, self).__init__(message) class OpenStackApiAuthenticationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = _("Authentication error") super(OpenStackApiAuthenticationException, self).__init__(message, response) class OpenStackApiAuthorizationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = _("Authorization error") super(OpenStackApiAuthorizationException, self).__init__(message, response) class OpenStackApiNotFoundException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = _("Item not found") super(OpenStackApiNotFoundException, self).__init__(message, response) class TestOpenStackClient(object): """Simple OpenStack API Client. This is a really basic OpenStack API client that is under our control, so we can make changes / insert hooks for testing """ def __init__(self, auth_user, auth_key, auth_uri): super(TestOpenStackClient, self).__init__() self.auth_result = None self.auth_user = auth_user self.auth_key = auth_key self.auth_uri = auth_uri # default project_id self.project_id = 'openstack' def request(self, url, method='GET', body=None, headers=None): _headers = {'Content-Type': 'application/json'} _headers.update(headers or {}) parsed_url = urlparse.urlparse(url) port = parsed_url.port hostname = parsed_url.hostname scheme = parsed_url.scheme if scheme == 'http': conn = httplib.HTTPConnection(hostname, port=port) elif scheme == 'https': conn = httplib.HTTPSConnection(hostname, port=port) else: raise OpenStackApiException("Unknown scheme: %s" % url) relative_url = parsed_url.path if parsed_url.query: relative_url = relative_url + "?" + parsed_url.query LOG.info(_("Doing %(method)s on %(relative_url)s") % locals()) if body: LOG.info(_("Body: %s") % body) conn.request(method, relative_url, body, _headers) response = conn.getresponse() return response def _authenticate(self): if self.auth_result: return self.auth_result auth_uri = self.auth_uri headers = {'X-Auth-User': self.auth_user, 'X-Auth-Key': self.auth_key, 'X-Auth-Project-Id': self.project_id} response = self.request(auth_uri, headers=headers) http_status = response.status LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals()) if http_status == 401: raise OpenStackApiAuthenticationException(response=response) auth_headers = {} for k, v in response.getheaders(): auth_headers[k] = v self.auth_result = auth_headers return self.auth_result def api_request(self, relative_uri, check_response_status=None, **kwargs): auth_result = self._authenticate() # NOTE(justinsb): httplib 'helpfully' converts headers to lower case base_uri = auth_result['x-server-management-url'] full_uri = '%s/%s' % (base_uri, relative_uri) headers = kwargs.setdefault('headers', {}) headers['X-Auth-Token'] = auth_result['x-auth-token'] response = self.request(full_uri, **kwargs) http_status = response.status LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals()) if check_response_status: if http_status not in check_response_status: if http_status == 404: raise OpenStackApiNotFoundException(response=response) elif http_status == 401: raise OpenStackApiAuthorizationException(response=response) else: raise OpenStackApiException( message=_("Unexpected status code"), response=response) return response def _decode_json(self, response): body = response.read() LOG.debug(_("Decoding JSON: %s") % (body)) if body: return jsonutils.loads(body) else: return "" def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_post(self, relative_uri, body, **kwargs): kwargs['method'] = 'POST' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_put(self, relative_uri, body, **kwargs): kwargs['method'] = 'PUT' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202, 204]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_delete(self, relative_uri, **kwargs): kwargs['method'] = 'DELETE' kwargs.setdefault('check_response_status', [200, 202, 204]) return self.api_request(relative_uri, **kwargs) def get_shares(self, detail=True): rel_url = '/shares/detail' if detail else '/shares' return self.api_get(rel_url)['shares'] manila-2013.2.dev175.gbf1a399/manila/tests/integrated/integrated_helpers.py0000664000175000017500000001006612301410454026466 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Provides common functionality for integrated unit tests """ import random import string import uuid from manila.openstack.common import log as logging from manila import service from manila import test # For the flags from manila.tests.integrated.api import client from oslo.config import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) def generate_random_alphanumeric(length): """Creates a random alphanumeric string of specified length.""" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _x in range(length)) def generate_random_numeric(length): """Creates a random numeric string of specified length.""" return ''.join(random.choice(string.digits) for _x in range(length)) def generate_new_element(items, prefix, numeric=False): """Creates a random string with prefix, that is not in 'items' list.""" while True: if numeric: candidate = prefix + generate_random_numeric(8) else: candidate = prefix + generate_random_alphanumeric(8) if candidate not in items: return candidate LOG.debug("Random collision on %s" % candidate) class _IntegratedTestBase(test.TestCase): def setUp(self): super(_IntegratedTestBase, self).setUp() f = self._get_flags() self.flags(**f) self.flags(verbose=True) # set up services self.volume = self.start_service('share') self.scheduler = self.start_service('scheduler') self._start_api_service() self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url) def tearDown(self): self.osapi.stop() super(_IntegratedTestBase, self).tearDown() def _start_api_service(self): self.osapi = service.WSGIService("osapi_share") self.osapi.start() # FIXME(ja): this is not the auth url - this is the service url # FIXME(ja): this needs fixed in nova as well self.auth_url = 'http://%s:%s/v1' % (self.osapi.host, self.osapi.port) LOG.warn(self.auth_url) def _get_flags(self): """An opportunity to setup flags, before the services are started.""" f = {} # Ensure tests only listen on localhost f['osapi_share_listen'] = '127.0.0.1' # Auto-assign ports to allow concurrent tests f['osapi_share_listen_port'] = 0 return f def get_unused_server_name(self): servers = self.api.get_servers() server_names = [server['name'] for server in servers] return generate_new_element(server_names, 'server') def get_invalid_image(self): return str(uuid.uuid4()) def _build_minimal_create_server_request(self): server = {} image = self.api.get_images()[0] LOG.debug("Image: %s" % image) if 'imageRef' in image: image_href = image['imageRef'] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId server['imageRef'] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[0] LOG.debug("Using flavor: %s" % flavor) server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] # Set a valid server name server_name = self.get_unused_server_name() server['name'] = server_name return server manila-2013.2.dev175.gbf1a399/manila/tests/integrated/__init__.py0000664000175000017500000000156512301410454024361 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`integrated` -- Tests whole systems, using mock services where needed ================================= """ # NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work from manila.tests import * manila-2013.2.dev175.gbf1a399/manila/tests/integrated/test_login.py0000664000175000017500000000211012301410454024754 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.openstack.common import log as logging from manila.tests.integrated import integrated_helpers LOG = logging.getLogger(__name__) class LoginTest(integrated_helpers._IntegratedTestBase): def test_login(self): """Simple check - we list shares - so we know we're logged in.""" shares = self.api.get_shares() for share in shares: LOG.debug(_("share: %s") % share) manila-2013.2.dev175.gbf1a399/manila/tests/integrated/test_extensions.py0000664000175000017500000000273212301410454026055 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.openstack.common import log as logging from manila.tests.integrated import integrated_helpers from oslo.config import cfg CONF = cfg.CONF LOG = logging.getLogger(__name__) class ExtensionsTest(integrated_helpers._IntegratedTestBase): def _get_flags(self): f = super(ExtensionsTest, self)._get_flags() f['osapi_share_extension'] = CONF.osapi_share_extension[:] f['osapi_share_extension'].append( 'manila.tests.api.extensions.foxinsocks.Foxinsocks') return f def test_get_foxnsocks(self): """Simple check that fox-n-socks works.""" response = self.api.api_request('/foxnsocks') foxnsocks = response.read() LOG.debug("foxnsocks: %s" % foxnsocks) self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks) manila-2013.2.dev175.gbf1a399/manila/tests/declare_conf.py0000664000175000017500000000157512301410454023101 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('answer', default=42, help='test conf')) manila-2013.2.dev175.gbf1a399/manila/tests/test_migrations.py0000664000175000017500000003647512301410454023717 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. This test case reads the configuration file test_migrations.conf for database connection settings to use in the tests. For each connection found in the config file, the test case runs a series of test cases to ensure that migrations work properly both upgrading and downgrading, and that no data loss occurs if possible. """ import commands import ConfigParser import os import urlparse import uuid from migrate.versioning import repository import sqlalchemy import manila.db.migration as migration import manila.db.sqlalchemy.migrate_repo from manila.db.sqlalchemy.migration import versioning_api as migration_api from manila.openstack.common import log as logging from manila import test LOG = logging.getLogger('manila.tests.test_migrations') def _get_connect_string(backend, user="openstack_citest", passwd="openstack_citest", database="openstack_citest"): """ Try to get a connection with a very specific set of values, if we get these then we'll run the tests, otherwise they are skipped """ if backend == "postgres": backend = "postgresql+psycopg2" return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % locals()) def _is_mysql_avail(**kwargs): return _is_backend_avail('mysql', **kwargs) def _is_backend_avail(backend, user="openstack_citest", passwd="openstack_citest", database="openstack_citest"): try: if backend == "mysql": connect_uri = _get_connect_string("mysql", user=user, passwd=passwd, database=database) elif backend == "postgres": connect_uri = _get_connect_string("postgres", user=user, passwd=passwd, database=database) engine = sqlalchemy.create_engine(connect_uri) connection = engine.connect() except Exception: # intentionally catch all to handle exceptions even if we don't # have any backend code loaded. return False else: connection.close() engine.dispose() return True def _have_mysql(): present = os.environ.get('NOVA_TEST_MYSQL_PRESENT') if present is None: return _is_backend_avail('mysql') return present.lower() in ('', 'true') def get_table(engine, name): """Returns an sqlalchemy table dynamically from db. Needed because the models don't work for us in migrations as models will be far out of sync with the current data.""" metadata = sqlalchemy.schema.MetaData() metadata.bind = engine return sqlalchemy.Table(name, metadata, autoload=True) class TestMigrations(test.TestCase): """Test sqlalchemy-migrate migrations.""" DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), 'test_migrations.conf') # Test machines can set the MANILA_TEST_MIGRATIONS_CONF variable # to override the location of the config file for migration testing CONFIG_FILE_PATH = os.environ.get('MANILA_TEST_MIGRATIONS_CONF', DEFAULT_CONFIG_FILE) MIGRATE_FILE = manila.db.sqlalchemy.migrate_repo.__file__ REPOSITORY = repository.Repository( os.path.abspath(os.path.dirname(MIGRATE_FILE))) def setUp(self): super(TestMigrations, self).setUp() self.snake_walk = False self.test_databases = {} # Load test databases from the config file. Only do this # once. No need to re-run this on each test... LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH) if not self.test_databases: if os.path.exists(TestMigrations.CONFIG_FILE_PATH): cp = ConfigParser.RawConfigParser() try: cp.read(TestMigrations.CONFIG_FILE_PATH) defaults = cp.defaults() for key, value in defaults.items(): self.test_databases[key] = value self.snake_walk = cp.getboolean('walk_style', 'snake_walk') except ConfigParser.ParsingError, e: self.fail("Failed to read test_migrations.conf config " "file. Got error: %s" % e) else: self.fail("Failed to find test_migrations.conf config " "file.") self.engines = {} for key, value in self.test_databases.items(): self.engines[key] = sqlalchemy.create_engine(value) # We start each test case with a completely blank slate. self._reset_databases() def tearDown(self): # We destroy the test data store between each test case, # and recreate it, which ensures that we have no side-effects # from the tests self._reset_databases() super(TestMigrations, self).tearDown() def _reset_databases(self): def execute_cmd(cmd=None): status, output = commands.getstatusoutput(cmd) LOG.debug(output) self.assertEqual(0, status) for key, engine in self.engines.items(): conn_string = self.test_databases[key] conn_pieces = urlparse.urlparse(conn_string) engine.dispose() if conn_string.startswith('sqlite'): # We can just delete the SQLite database, which is # the easiest and cleanest solution db_path = conn_pieces.path.strip('/') if os.path.exists(db_path): os.unlink(db_path) # No need to recreate the SQLite DB. SQLite will # create it for us if it's not there... elif conn_string.startswith('mysql'): # We can execute the MySQL client to destroy and re-create # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: if auth_pieces[1].strip(): password = "-p\"%s\"" % auth_pieces[1] sql = ("drop database if exists %(database)s; " "create database %(database)s;") % locals() cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " "-e \"%(sql)s\"") % locals() execute_cmd(cmd) elif conn_string.startswith('postgresql'): database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: password = auth_pieces[1].strip() # note(krtaylor): File creation problems with tests in # venv using .pgpass authentication, changed to # PGPASSWORD environment variable which is no longer # planned to be deprecated os.environ['PGPASSWORD'] = password os.environ['PGUSER'] = user # note(boris-42): We must create and drop database, we can't # drop database which we have connected to, so for such # operations there is a special database template1. sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" " '%(sql)s' -d template1") sql = ("drop database if exists %(database)s;") % locals() droptable = sqlcmd % locals() execute_cmd(droptable) sql = ("create database %(database)s;") % locals() createtable = sqlcmd % locals() execute_cmd(createtable) os.unsetenv('PGPASSWORD') os.unsetenv('PGUSER') def test_walk_versions(self): """ Walks all version scripts for each tested database, ensuring that there are no errors in the version scripts for each engine """ for key, engine in self.engines.items(): self._walk_versions(engine, self.snake_walk) def test_mysql_connect_fail(self): """ Test that we can trigger a mysql connection failure and we fail gracefully to ensure we don't break people without mysql """ if _is_mysql_avail(user="openstack_cifail"): self.fail("Shouldn't have connected") @test.skip_unless(_have_mysql(), "mysql not available") def test_mysql_innodb(self): """ Test that table creation on mysql only builds InnoDB tables """ # add this to the global lists to make reset work with it, it's removed # automaticaly in tearDown so no need to clean it up here. connect_string = _get_connect_string('mysql') engine = sqlalchemy.create_engine(connect_string) self.engines["mysqlcitest"] = engine self.test_databases["mysqlcitest"] = connect_string # build a fully populated mysql database with all the tables self._reset_databases() self._walk_versions(engine, False, False) uri = _get_connect_string('mysql', database="information_schema") connection = sqlalchemy.create_engine(uri).connect() # sanity check total = connection.execute("SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='openstack_citest'") self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") noninnodb = connection.execute("SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='openstack_citest' " "and ENGINE!='InnoDB' " "and TABLE_NAME!='migrate_version'") count = noninnodb.scalar() self.assertEqual(count, 0, "%d non InnoDB tables created" % count) def test_postgresql_connect_fail(self): """ Test that we can trigger a postgres connection failure and we fail gracefully to ensure we don't break people without postgres """ if _is_backend_avail('postgres', user="openstack_cifail"): self.fail("Shouldn't have connected") @test.skip_unless(_is_backend_avail('postgres'), "postgresql not available") def test_postgresql_opportunistically(self): # add this to the global lists to make reset work with it, it's removed # automatically in tearDown so no need to clean it up here. connect_string = _get_connect_string("postgres") engine = sqlalchemy.create_engine(connect_string) self.engines["postgresqlcitest"] = engine self.test_databases["postgresqlcitest"] = connect_string # build a fully populated postgresql database with all the tables self._reset_databases() self._walk_versions(engine, False, False) def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. # Place the database under version control migration_api.version_control(engine, TestMigrations.REPOSITORY, migration.INIT_VERSION) self.assertEqual(migration.INIT_VERSION, migration_api.db_version(engine, TestMigrations.REPOSITORY)) migration_api.upgrade(engine, TestMigrations.REPOSITORY, migration.INIT_VERSION + 1) LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest) for version in xrange(migration.INIT_VERSION + 2, TestMigrations.REPOSITORY.latest + 1): # upgrade -> downgrade -> upgrade self._migrate_up(engine, version, with_data=True) if snake_walk: self._migrate_down(engine, version - 1) self._migrate_up(engine, version) if downgrade: # Now walk it back down to 0 from the latest, testing # the downgrade paths. for version in reversed( xrange(migration.INIT_VERSION + 1, TestMigrations.REPOSITORY.latest)): # downgrade -> upgrade -> downgrade self._migrate_down(engine, version) if snake_walk: self._migrate_up(engine, version + 1) self._migrate_down(engine, version) def _migrate_down(self, engine, version): migration_api.downgrade(engine, TestMigrations.REPOSITORY, version) self.assertEqual(version, migration_api.db_version(engine, TestMigrations.REPOSITORY)) def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _prerun_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None prerun = getattr(self, "_prerun_%3.3d" % version, None) if prerun: data = prerun(engine) migration_api.upgrade(engine, TestMigrations.REPOSITORY, version) self.assertEqual( version, migration_api.db_version(engine, TestMigrations.REPOSITORY)) if with_data: check = getattr(self, "_check_%3.3d" % version, None) if check: check(engine, data) except Exception: LOG.error("Failed to migrate to version %s on engine %s" % (version, engine)) raise manila-2013.2.dev175.gbf1a399/manila/tests/test_api.py0000664000175000017500000000514312301410454022300 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the API endpoint.""" import httplib import StringIO import webob class FakeHttplibSocket(object): """A fake socket implementation for httplib.HTTPResponse, trivial.""" def __init__(self, response_string): self.response_string = response_string self._buffer = StringIO.StringIO(response_string) def makefile(self, _mode, _other): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """A fake httplib.HTTPConnection for boto. requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into the httplib.HTTPResponse that boto expects. """ def __init__(self, app, host, is_secure=False): self.app = app self.host = host def request(self, method, path, data, headers): req = webob.Request.blank(path) req.method = method req.body = data req.headers = headers req.headers['Accept'] = 'text/html' req.host = self.host # Call the WSGI app, get the HTTP response resp = str(req.get_response(self.app)) # For some reason, the response doesn't have "HTTP/1.0 " prepended; I # guess that's a function the web server usually provides. resp = "HTTP/1.0 %s" % resp self.sock = FakeHttplibSocket(resp) self.http_response = httplib.HTTPResponse(self.sock) # NOTE(vish): boto is accessing private variables for some reason self._HTTPConnection__response = self.http_response self.http_response.begin() def getresponse(self): return self.http_response def getresponsebody(self): return self.sock.response_string def close(self): """Required for compatibility with boto/tornado.""" pass manila-2013.2.dev175.gbf1a399/manila/tests/test_test_utils.py0000664000175000017500000000206312301410454023724 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2010 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import test from manila.tests import utils as test_utils class TestUtilsTestCase(test.TestCase): def test_get_test_admin_context(self): """get_test_admin_context's return value behaves like admin context.""" ctxt = test_utils.get_test_admin_context() # TODO(soren): This should verify the full interface context # objects expose. self.assertTrue(ctxt.is_admin) manila-2013.2.dev175.gbf1a399/manila/tests/fake_driver.py0000664000175000017500000000240612301410454022750 0ustar chuckchuck00000000000000# Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.openstack.common import log as logging from manila.share.drivers import lvm LOG = logging.getLogger(__name__) class FakeShareDriver(lvm.LVMShareDriver): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): super(FakeShareDriver, self).__init__(execute=self.fake_execute, *args, **kwargs) def check_for_setup_error(self): """No setup necessary in fake mode.""" pass @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" LOG.debug(_("FAKE EXECUTE: %s"), cmd) return (None, None) manila-2013.2.dev175.gbf1a399/manila/tests/test_test.py0000664000175000017500000000307312301410454022506 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the testing base code.""" from manila.openstack.common import rpc from manila import test class IsolationTestCase(test.TestCase): """Ensure that things are cleaned up after failed tests. These tests don't really do much here, but if isolation fails a bunch of other tests should fail. """ def test_service_isolation(self): import os print os.path.abspath(".") self.start_service('share') def test_rpc_consumer_isolation(self): class NeverCalled(object): def __getattribute__(*args): assert False, "I should never get called." connection = rpc.create_connection(new=True) proxy = NeverCalled() connection.create_consumer('share', proxy, fanout=False) connection.consume_in_thread() manila-2013.2.dev175.gbf1a399/manila/tests/conf_fixture.py0000664000175000017500000000265512301410454023170 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg CONF = cfg.CONF CONF.import_opt('policy_file', 'manila.policy') def_vol_type = 'fake_vol_type' def set_defaults(conf): conf.set_default('connection_type', 'fake') conf.set_default('fake_rabbit', True) conf.set_default('rpc_backend', 'manila.openstack.common.rpc.impl_fake') conf.set_default('verbose', True) conf.set_default('sql_connection', "sqlite://") conf.set_default('sqlite_synchronous', False) conf.set_default('policy_file', 'manila/tests/policy.json') conf.set_default('share_export_ip', '0.0.0.0') conf.set_default('share_driver', 'manila.tests.fake_driver.FakeShareDriver') manila-2013.2.dev175.gbf1a399/manila/tests/test_exception.py0000664000175000017500000000600612301410454023524 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import exception from manila import test from manila import utils class FakeNotifier(object): """Acts like the manila.openstack.common.notifier.api module.""" ERROR = 88 def __init__(self): self.provided_publisher = None self.provided_event = None self.provided_priority = None self.provided_payload = None def notify(self, context, publisher, event, priority, payload): self.provided_publisher = publisher self.provided_event = event self.provided_priority = priority self.provided_payload = payload def good_function(): return 99 def bad_function_error(): raise exception.Error() def bad_function_exception(): raise test.TestingException() class ManilaExceptionTestCase(test.TestCase): def test_default_error_msg(self): class FakeManilaException(exception.ManilaException): message = "default message" exc = FakeManilaException() self.assertEquals(unicode(exc), 'default message') def test_error_msg(self): self.assertEquals(unicode(exception.ManilaException('test')), 'test') def test_default_error_msg_with_kwargs(self): class FakeManilaException(exception.ManilaException): message = "default message: %(code)s" exc = FakeManilaException(code=500) self.assertEquals(unicode(exc), 'default message: 500') def test_error_msg_exception_with_kwargs(self): # NOTE(dprince): disable format errors for this test self.flags(fatal_exception_format_errors=False) class FakeManilaException(exception.ManilaException): message = "default message: %(mispelled_code)s" exc = FakeManilaException(code=500) self.assertEquals(unicode(exc), 'default message: %(mispelled_code)s') def test_default_error_code(self): class FakeManilaException(exception.ManilaException): code = 404 exc = FakeManilaException() self.assertEquals(exc.kwargs['code'], 404) def test_error_code_from_kwarg(self): class FakeManilaException(exception.ManilaException): code = 500 exc = FakeManilaException(code=404) self.assertEquals(exc.kwargs['code'], 404) manila-2013.2.dev175.gbf1a399/manila/tests/windows/0000775000175000017500000000000012301410516021604 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/db_fakes.py0000664000175000017500000000217512301410454023722 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Pedro Navarro Perez # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Stubouts, mocks and fixtures for windows volume test suite """ def get_fake_volume_info(name): return {'name': name, 'size': 1, 'provider_location': 'iqn.2010-10.org.openstack:' + name, 'id': 1, 'provider_auth': None} def get_fake_snapshot_info(volume_name, snapshot_name): return {'name': snapshot_name, 'volume_name': volume_name, } def get_fake_connector_info(initiator): return {'initiator': initiator, } manila-2013.2.dev175.gbf1a399/manila/tests/windows/__init__.py0000664000175000017500000000000012301410454023704 0ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/windowsutils.py0000664000175000017500000001042612301410454024735 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Pedro Navarro Perez # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Windows storage classes to be used in testing. """ import os import sys # Check needed for unit testing on Unix if os.name == 'nt': import wmi CONF = cfg.CONF class WindowsUtils(object): def __init__(self): self.__conn_cimv2 = None self.__conn_wmi = None @property def _conn_cimv2(self): if self.__conn_cimv2 is None: self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') return self.__conn_cimv2 @property def _conn_wmi(self): if self.__conn_wmi is None: self.__conn_wmi = wmi.WMI(moniker='//./root/wmi') return self.__conn_wmi def find_vhd_by_name(self, name): '''Finds a volume by its name.''' wt_disks = self._conn_wmi.WT_Disk(Description=name) return wt_disks def volume_exists(self, name): '''Checks if a volume exists.''' wt_disks = self.find_vhd_by_name(name) if len(wt_disks) > 0: return True return False def snapshot_exists(self, name): '''Checks if a snapshot exists.''' wt_snapshots = self.find_snapshot_by_name(name) if len(wt_snapshots) > 0: return True return False def find_snapshot_by_name(self, name): '''Finds a snapshot by its name.''' wt_snapshots = self._conn_wmi.WT_Snapshot(Description=name) return wt_snapshots def delete_volume(self, name): '''Deletes a volume.''' wt_disk = self._conn_wmi.WT_Disk(Description=name)[0] wt_disk.Delete_() vhdfiles = self._conn_cimv2.query( "Select * from CIM_DataFile where Name = '" + self._get_vhd_path(name) + "'") if len(vhdfiles) > 0: vhdfiles[0].Delete() def _get_vhd_path(self, volume_name): '''Gets the path disk of the volume.''' base_vhd_folder = CONF.windows_iscsi_lun_path return os.path.join(base_vhd_folder, volume_name + ".vhd") def delete_snapshot(self, name): '''Deletes a snapshot.''' wt_snapshot = self._conn_wmi.WT_Snapshot(Description=name)[0] wt_snapshot.Delete_() vhdfile = self._conn_cimv2.query( "Select * from CIM_DataFile where Name = '" + self._get_vhd_path(name) + "'")[0] vhdfile.Delete() def find_initiator_ids(self, target_name, initiator_name): '''Finds a initiator id by its name.''' wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name, Method=4, Value=initiator_name) return wt_idmethod def initiator_id_exists(self, target_name, initiator_name): '''Checks if a initiatorId exists.''' wt_idmethod = self.find_initiator_ids(target_name, initiator_name) if len(wt_idmethod) > 0: return True return False def find_exports(self, target_name): '''Finds a export id by its name.''' wt_host = self._conn_wmi.WT_Host(HostName=target_name) return wt_host def export_exists(self, target_name): '''Checks if a export exists.''' wt_host = self.find_exports(target_name) if len(wt_host) > 0: return True return False def delete_initiator_id(self, target_name, initiator_name): '''Deletes a initiatorId.''' wt_init_id = self.find_initiator_ids(target_name, initiator_name)[0] wt_init_id.Delete_() def delete_export(self, target_name): '''Deletes an export.''' wt_host = self.find_exports(target_name)[0] wt_host.RemoveAllWTDisks() wt_host.Delete_() manila-2013.2.dev175.gbf1a399/manila/tests/windows/mockproxy.py0000664000175000017500000001664312301410454024224 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Classes for dynamic generation of mock objects. """ import inspect def serialize_obj(obj): if isinstance(obj, float): val = str(round(obj, 10)) elif isinstance(obj, dict): d = {} for k1, v1 in obj.items(): d[k1] = serialize_obj(v1) val = str(d) elif isinstance(obj, list): l1 = [] for i1 in obj: l1.append(serialize_obj(i1)) val = str(l1) elif isinstance(obj, tuple): l1 = () for i1 in obj: l1 = l1 + (serialize_obj(i1),) val = str(l1) else: val = str(obj) return val def serialize_args(*args, **kwargs): """Workaround for float string conversion issues in Python 2.6.""" return serialize_obj((args, kwargs)) class Mock(object): def _get_next_value(self, name): c = self._access_count.get(name) if c is None: c = 0 else: c = c + 1 self._access_count[name] = c return self._values[name][c] def _get_next_ret_value(self, name, params): d = self._access_count.get(name) if d is None: d = {} self._access_count[name] = d c = d.get(params) if c is None: c = 0 else: c = c + 1 d[params] = c return self._values[name][params][c] def __init__(self, values): self._values = values self._access_count = {} def has_values(self): return len(self._values) > 0 def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): return object.__getattribute__(self, name) else: if isinstance(self._values[name], dict): def newfunc(*args, **kwargs): params = serialize_args(args, kwargs) return self._get_next_ret_value(name, params) return newfunc else: return self._get_next_value(name) def __str__(self): return self._get_next_value('__str__') def __iter__(self): return getattr(self._get_next_value('__iter__'), '__iter__')() def __len__(self): return self._get_next_value('__len__') def __getitem__(self, key): return self._get_next_ret_value('__getitem__', str(key)) def __call__(self, *args, **kwargs): params = serialize_args(args, kwargs) return self._get_next_ret_value('__call__', params) class MockProxy(object): def __init__(self, wrapped): self._wrapped = wrapped self._recorded_values = {} def _get_proxy_object(self, obj): if (hasattr(obj, '__dict__') or isinstance(obj, tuple) or isinstance(obj, list) or isinstance(obj, dict)): p = MockProxy(obj) else: p = obj return p def __getattr__(self, name): if name in ['_wrapped']: return object.__getattribute__(self, name) else: attr = getattr(self._wrapped, name) if (inspect.isfunction(attr) or inspect.ismethod(attr) or inspect.isbuiltin(attr)): def newfunc(*args, **kwargs): result = attr(*args, **kwargs) p = self._get_proxy_object(result) params = serialize_args(args, kwargs) self._add_recorded_ret_value(name, params, p) return p return newfunc elif (hasattr(attr, '__dict__') or (hasattr(attr, '__getitem__') and not (isinstance(attr, str) or isinstance(attr, unicode)))): p = MockProxy(attr) else: p = attr self._add_recorded_value(name, p) return p def __setattr__(self, name, value): if name in ['_wrapped', '_recorded_values']: object.__setattr__(self, name, value) else: setattr(self._wrapped, name, value) def _add_recorded_ret_value(self, name, params, val): d = self._recorded_values.get(name) if d is None: d = {} self._recorded_values[name] = d l = d.get(params) if l is None: l = [] d[params] = l l.append(val) def _add_recorded_value(self, name, val): if name not in self._recorded_values: self._recorded_values[name] = [] self._recorded_values[name].append(val) def get_mock(self): values = {} for k, v in self._recorded_values.items(): if isinstance(v, dict): d = {} values[k] = d for k1, v1 in v.items(): l = [] d[k1] = l for i1 in v1: if isinstance(i1, MockProxy): l.append(i1.get_mock()) else: l.append(i1) else: l = [] values[k] = l for i in v: if isinstance(i, MockProxy): l.append(i.get_mock()) elif isinstance(i, dict): d = {} for k1, v1 in v.items(): if isinstance(v1, MockProxy): d[k1] = v1.get_mock() else: d[k1] = v1 l.append(d) elif isinstance(i, list): l1 = [] for i1 in i: if isinstance(i1, MockProxy): l1.append(i1.get_mock()) else: l1.append(i1) l.append(l1) else: l.append(i) return Mock(values) def __str__(self): s = str(self._wrapped) self._add_recorded_value('__str__', s) return s def __len__(self): l = len(self._wrapped) self._add_recorded_value('__len__', l) return l def __iter__(self): it = [] for i in self._wrapped: it.append(self._get_proxy_object(i)) self._add_recorded_value('__iter__', it) return iter(it) def __getitem__(self, key): p = self._get_proxy_object(self._wrapped[key]) self._add_recorded_ret_value('__getitem__', str(key), p) return p def __call__(self, *args, **kwargs): c = self._wrapped(*args, **kwargs) p = self._get_proxy_object(c) params = serialize_args(args, kwargs) self._add_recorded_ret_value('__call__', params, p) return p manila-2013.2.dev175.gbf1a399/manila/tests/windows/basetestcase.py0000664000175000017500000000634112301410454024631 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ TestCase for MockProxy based tests and related classes. """ import gzip import manila.test import os import pickle from manila.tests.windows import mockproxy gen_test_mocks_key = 'MANILA_GENERATE_TEST_MOCKS' class BaseTestCase(manila.test.TestCase): """TestCase for MockProxy based tests.""" def run(self, result=None): self._currentResult = result super(BaseTestCase, self).run(result) def setUp(self): super(BaseTestCase, self).setUp() self._mps = {} def tearDown(self): super(BaseTestCase, self).tearDown() has_errors = len([test for (test, msgs) in self._currentResult.errors if test.id() == self.id()]) > 0 failed = len([test for (test, msgs) in self._currentResult.failures if test.id() == self.id()]) > 0 if not has_errors and not failed: self._save_mock_proxies() def _save_mock(self, name, mock): path = self._get_stub_file_path(self.id(), name) pickle.dump(mock, gzip.open(path, 'wb')) def _get_stub_file_path(self, test_name, mock_name): # test naming differs between platforms prefix = 'manila.tests.' if test_name.startswith(prefix): test_name = test_name[len(prefix):] file_name = '{0}_{1}.p.gz'.format(test_name, mock_name) return os.path.join(os.path.dirname(mockproxy.__file__), "stubs", file_name) def _load_mock(self, name): path = self._get_stub_file_path(self.id(), name) if os.path.exists(path): return pickle.load(gzip.open(path, 'rb')) else: return None def _load_mock_or_create_proxy(self, module_name): m = None if (not gen_test_mocks_key in os.environ or os.environ[gen_test_mocks_key].lower() not in ['true', 'yes', '1']): m = self._load_mock(module_name) else: module = __import__(module_name) m = mockproxy.MockProxy(module) self._mps[module_name] = m return m def _inject_mocks_in_modules(self, objects_to_mock, modules_to_test): for module_name in objects_to_mock: mp = self._load_mock_or_create_proxy(module_name) for mt in modules_to_test: module_local_name = module_name.split('.')[-1] setattr(mt, module_local_name, mp) def _save_mock_proxies(self): for name, mp in self._mps.items(): m = mp.get_mock() if m.has_values(): self._save_mock(name, m) manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/0000775000175000017500000000000012301410516022744 5ustar chuckchuck00000000000000././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_from_snapshot_wmi.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_0000664000175000017500000000346012301410454033531 0ustar chuckchuck00000000000000‹ÄôÈPÿtest_windows.TestWindowsDriver.test_create_volume_from_snapshot_wmi.p½XKoI¾Ï¯°r!¬°éªê'Ò aµ>€V—H–3ŒÇãõL,â¿owõtO|/êõòsµ __ŒÂçóç“çÛºnŸ_],Ãõƒ§áéÓ'ßœ>yLbq¸Ú o{Å‚‚ôHAñjºX04ðØ l@ônv²žošOu.˜A±kÁ›A ï ufP&3X8ÚVó¶òP³3°`ƒ'ÑY H³r¾ZÍfÞß•wòóáÝqÏ6J(‡ o%†TÀCz‡ââ®Ã#EÂ#ñ,ªvÙV Iòî%ùD¸”ÁªTÅûïŠàµQZ•tj,=»—ò娼~í^)sô Íñÿ†.æMÓœ¥³ù ɸz¤2 勤(¼“”éyÏÉ~¨n%LQØš’»„)åñ©Ë$|6ãó#CÚ{7’£aׂFoAó&´ì,h•,hí÷µ¹ä2|#çáÁ!ï²xpªæh5¢x»Cí[Â×0<“Â×äð5*qkôÜWM¹]nÚe½îI&á” Ç8=–XÑØUæãX[‡f^ùßÍ­øí.AÆy˜–Kƒ…¦ÅÓR¢ØÊD±U»¬˜>k“—,8á·÷úë¦Þ– w-¸PÚ×6§: Ng &QåöðŽcï€Á x/ ’C@d€Pi¯ tÚ,SLQ”–<-ƒdºvÅ5íxÙ|æJkã»n—`—`.Ę@øš@@(?o«+_k:[À¥@m¶Õ½–²0§/`Î_@¼'Ⱦ,Ëê¯yû©±£§ƒË“£“éûå¶½œ¯²fø<–nòåÓy\úÎèÞÇLoâdù_5]¿yuý}”wHWÀA‰ 5°cÏdmÏ£æ»*(»”¢K)»”z—zEIn õs¹¾/7wK:*ó@,ÃeM™DdVEð)Ò@RN9¤M2m²{=Ó&{Ú$·ïŽ/*ÁŠW”iÎb~Oõ…ÜDDÞT*Ý rí¯ ‰W/{‰vNäÛIS1mÕ0m5ûXGëìcÝû8hKGšV™4=ÌYÍ9«»wrΚ>gM¨Á&~Ň«˜Õ^Xn¨ÑMN ûÌt‹fŸ™Þg^ ±;¬ –+¨»´¹‚Ú¾‚Ú,j`KÕÀš×áa›^—¤ \Ö6pÄ ×ªZs³.zÍKÐT\ÎEªçnèOÇþt¹äOÙŸ(r F?G >L‰½•ûéA΢þGÑ4®íá²ÿ™ôb5…k< šsîÎ!¶çûsètÈÑ€ kë`"v;œ% ˆ)s4`˜1ÒÖ½HܳuV ÄÎB¦±W ôŠ‘¶Nâ±¶NÜ[! š+¤Ð]!É'õWH¹Á ²´lª^Hƒ2ˆ2”A”1Ì#Y.ƒ(C÷r\­ª¶ …RÅÛúáJ†2zÎ Èý}5Jv¥Š®TÙ•ªw¥WD¿’¤¸¨¸OC5¨¨ Ãíå¡ûf–y^]î‰<Í®ÔÑ•:»R÷®Ô¹YÆ8Ë<ÆÖãD„f8Àó„qÂ<a?¡É=7šÜs£Îñ†yÃ] ÚÔµ Í] zYZ¤p³´G¸Ù¸”—¬›ÂɽüŸuÓòƒñhÁÞ8[`L6br“ë1…1§óKÍí¾ë¼_T½#–ÿ®'^8ÄĤÞ.&õ¦Z7í¼üüâ×]Ô…—Ìnè4‘˜ˆÒæ]äùÂ$ÕE'y5Û•ÆÈÖÏ'•Ë‹/wœ›Œ Ž”H„3%*BJ§J$LÓÃ|ñt„·âÁä# \*‚cþ½¬¶ßÂê ãÍ6ýàÄ´²ý6ú¸­/FGÓ7³ãy;ÿc¹ªFWŸªm5 îý>ê&¡_™žö©úÝôÄK<{ÚOAßùHeÈ$pÜ â–À2'ñT ó±^;ÃÞã^³Ç™“Ÿ˜(÷å»M ²€0d¡‰Û·™“> èZx¡åÝU. Š+»Ÿ†qK·ã–rÜR·d³fò!$×R’ƒZJ2ÔRêV”©–’̵”dî·hÁ¥(¸$íp%Nš¨¯”õ•z}%¯¯wœ‚øÉÿ“"ç6././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_os.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_0000664000175000017500000000073012301410454033525 0ustar chuckchuck00000000000000‹ÅôÈPÿtest_windows.TestWindowsDriver.test_delete_volume_os.pÍRËNä0¼÷W .ÉHL?òð^áÂ=ìHœ"E7fc+vxñït{f@€â°‘ât·íêêªhmÝC;™pÑvôašu°¸R­‡qc¦,|v‡‰½óÙ?«oÜdïà#p tÛ®ça†±mÁ®¯à8üNÀ'!ݸVIÛim¼oµÇ€+i£;·Ýv6k5Õv]¸Âœ!­c z züDXÆ— :Ë$ôˆàûx•xßÜHšªX«±–6Izœ6M“œþjÞ<Ãêtu~9Laî¶gƒ¿ñtìdq|rDÁã­ ¬Å$Y²"n<‡óœAç½_w¯Xr†,9§ö\ìYry`ÉI’k;ŒˆÄw{Õ·8F‚,WE³ri˜*—’±T¦ú»,6µâUgD®Tv{µ‰c|6¯ãPdx§åW[!˜ÈßÓE."ê"ºˆ]DýÎ3Qþ¼g¢¢ñDý‘gBÑ›S{ÉöÜ$?p“Hwç•”ÿW² adù#^Éj¯¾Ù3y#35manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/README.rst0000664000175000017500000000015312301410454024433 0ustar chuckchuck00000000000000Files with extension p.gz are compressed pickle files containing serialized mocks used during unit testing ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_os.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_0000664000175000017500000000067112301410454033532 0ustar chuckchuck00000000000000‹ÃôÈPÿtest_windows.TestWindowsDriver.test_create_snapshot_os.pÍQËjÃ0¼ïW„\ì@b,)~¨×ô’C{h 'ƒqÕq’ZÂ’ó ôß»RÒBK(¥ôPƒåÝÙÕììX¥Oe'kÀC¨ÖØ®Vu c…hÚ•ì"+5Ñu0ѳ[Ý©ã î0M@”å²ov¶iËÔr#…MáÞjz áJ'°ÊJiL)TßÚtê ¬ì«]/ b¹Ã86ëÊ®1'(c§ ƒ?ž–Pä%Ìõ’)Ôžˆ$`j¤x_í2刅Eâ(‚ÙMñåi³Åü±él_ín³5®m<Ž.xyuç‘O‚‘cæNa¨Œ1Ëê“JJP%¥n4p±m}@ 1¢7Éu^?"„AFÇ cø>ã‚!·öôöMÂb¢& A îà´á@8C“,Ýí’ÄapîïŠé¹¶OyfI›‡¡«ËÍ䓯¯Ö«ßòª9¦;Œ®ž¾ÔHM!^Z0ñR0˗ƪwKy¯ S©åÔ˜èéá¾=º=¸Îª¼lòâ0œü%0Ä:ÿÓ®7?~ºŸ‰¿­)—È%Ç\rå¸ä-q¦ãRÐÀ¥p¬Þ¥uí—»t´S0ì[O®a§ìwºsZæ…úæÿŸ%(4&(Ì8AI]˜¼¾X¦ä!L)|³lm“7vïûEú—±{@ñR!ªÔTãPc/¼:Ô˜Ô˜»Í×vg‹ˆ±WwŒÍ½˜/< CàóOSˆ}¿ÇŠÜÒ_¤?FÄ^*ˆ@õ"P.ÕVj"b…"V>GD¬z+ʦÌw*›¦¦†q˜»KûHtè.Ýw—Ž}ÙvöàK¦}´&+JÒž²1Ìø‚™P0ÓÌð¶ß)mãåh&r4(GÓÆä´×#P÷b›­Óù8ÝÚOߟ‹ºñžÀÛ—Äd„S‰3œvÀª?BG˜À8gy‰ <ç6ÝÛ‰üý!bèhTTÛ¨(­sÊ4{¼üÏ35ÌÂÙÑ8 @Ïm”2dá\)dá|)“¾èÈúZ¯Ìòý{!L¯4˜šx·ƒÖî ÷; ¼ã½?ÚêÙ»n÷|41çk7H²föÃì]UìgW«›ä:mÒŸòlegX‡ÙëYçCߨ»üóWçƒ9ÜúÙºåd2Žã<î2ð0‘÷#œU‡Ò9ÏZ+ݶÊ€î%»Gj ¬=°Ÿˆ ÂHÑÏD¬Ë¡h%"¦^"¢ÛÞKD ƒD$ý÷©²mc9mcéÛX¶m,û6–C»dGm;Z¢¿ÐP:²™ ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_wmi.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_0000664000175000017500000000274012301410454033530 0ustar chuckchuck00000000000000‹ÅôÈPÿtest_windows.TestWindowsDriver.test_delete_snapshot_wmi.p½W]OÜF}÷¯XñTÙeîOGêCa[u@U‰’¤•1“ÅeYoÖš¢ü÷Þ™ñŒY‡MH¥‚ÁöøøÌ™{Ï—e½þ<ߨEF‡²^5íæ®lëM¶fÙAYV«+»™´¶i›ÉÔÍä¶.oÖ›úïÏÙ)ý—­!+çóË»jÙV«ù<«/ÿ²e›­1;k×<ûs-²ƒ«µÌÎ÷çEYÚ¦™—õݪÝÏÖÊÝÐYCwî‹åmèšq×rüátF§ÀÜ9]8¸ ß :ºã›ÑÞ£û{[¯ª»qÿ¾¹ãÑÑähS×íÑÃmåοìºÑû_.ö$fË5ðlAØ Èž(b Ò¿Me O ˆÛÂã½›Ÿ¯Šus];ò;dÛ0ƒèžAÞÁ ˆ0èT8ÙØ¢µ„€Ê#è‚q¹ÎY‡À!"pt:ÎËb¹œÏ ƒsUìÔçûi¯b(¥V€Ï Ã¥ãÃÕ6®‰/Ï;>‚E><Ÿ…m«ÖÞzJÂÏ^pºÁÜ©p¨Bfï§ÇÇ8ü2–æ73,Wããã_Å¡™žLócù…žPYÑ4Íe<\[„„#$ÂÛ#!™ID‘¸S”ÙU¯ÉËX=+˜änjRl &%ñ“–ŽüLâG”ƒBŠV7ˆ£`A!!(? %:%#‚R4¯õo í/¸â<Ø;ð³ì î=¥ª|µj–mI;ÔV»òÕžžŽå«SùjµÕj§¶SÛ”›jÝVõª™³\jlähù8·úãX^™uaé¾~Vb­=o³-Ή¦ñÖ` £i0Ò4eÅ {¾Ò–; ¶"šˆä@ ´! b8Uá§*ºgÒTE?U-Ý ${%³ /G—’k›®ä~s"éRÈDº”4.‡—vå3(pnêÛÑÉìt>-Úâ·jiG×vcGnùF?ºÝÓÿ¼ãò¯Ø{sØïœý×ÎPIêV ëVøº¡nEª[Ñ×-¥jZqŠUoG±IeÚF Ö’ôµ$C-ÉTK²¯%©Ò>ÝQ”:™!s9Ta1Tt9TÉåP%—C%^ÐíʪA¡ÒþMh2(Ý”K«§Õ¾u˜ü é—¢Ž././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_initialize_connection_wmi.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_initial0000664000175000017500000000367612301410454033571 0ustar chuckchuck00000000000000‹ÇôÈPÿtest_windows.TestWindowsDriver.test_initialize_connection_wmi.pÝXßoÛ6~×_aä%é0»â)’öÐ5–‡ÝR´/ WV]-¶åJJ²¬èÿ>ÞQ"æG³k[Òñîãw÷ݱ,›Ýõ¼­V™»”ͶëÛ‹²oÚl—geYo—U;ë«®ïfWîCsÕÍ6My¾k›?¯³c÷W¶Y9Ÿ¿½¨×}½Ï³æíUÙg;ÈNúf¿ïdv°Ü©ìt¾(˪ëæes±í÷³]A7tÖ¹;—‹õEÕ¹ï }gÝÃoŽÜG‘Óg!Ügîÿ™»<¡ë“½ô{Ólëóª¥?ŸMèúôéìiÛ4ýÓ«MMŸ?í=¡§Ïö?~:ÛB&!;Xïf+g{%²;*¤óT(^­ÈVìšp¾­Ø!aÈ£Wó£Ããªß,ÉŒ%3ß4™ w3 G3@(l«+÷:ø[šÛ;ਢ—{Ÿû †²7ÂÜ-„‚Œ !Ž ¡tvw2rLX|y!Ô´šìdÑuÝÛáÂqÖÝ9=Á1Ë$fI1KŽYŽ1˳¤˜Oª«7¯#²`#:1bȈeªäƒ%F# ˆ>ór±^ÏçΆBþVÞK‹Ãê².«—‹þ}dÆ‹ggÉ¿úôÅéÑëºí/kò®K¹U&Å´¶˜J¨pj+ýnª–Æ‚^T˜[;»|¿ôKû…»²­w}ÝlãÊ1MœÖUGÛãŸ?ä\VаTÅM,•vX*œ°,òË¡:n®û¹ñfîÍ‚Á-äø¦ oºu<ò…~ù¯àV€“¿Hȯ‰üšÉ¯Gòë@~-™,ëjË\ÑŒ‘.²#‘çóUÕ×}µñw5¿BùÓGf¸Inˆá†nF†›Àp£¸8RõbníÖ*”R*—‹˜L1£~m:ÊNÃñ™$>KñYŽÏŽñÙŸ•>£–ã³ ,qÀúçGˆ<@äbÜKG®{7“–8Ylª¸“õ‡í r‘OE>kÚÕ¬ÙUN3åù³½Ç"G®È¹LJr®¨&çƒï:Dabö~. A( !¼‚Œ8 €®è õAL!’º(„á(¸2 K£€P…“›•§± Íñ¢ Œ& ¯4!¢ ¤»Ï—ËPª…SÖJ‘ØBª=t8V ¡T µBàX,öܘ 2kÐá®)*h¼6UkÞ[é÷V†½•qoI>£F̿ģ"‡×*2¢âDiŒÄ)ÓC$Ùþç$qŠHp89¼…Bî^¤÷RQ8a£pš0’Ä)ÃH•âZ0®…ǵ¸ׂªÏ«EëŠèÑo'äTÁH8uxM1 kÅ4W³²ÙÌ6uÙ6]ó®æšÇéFwÛs£êVÁôëЙöì-®ï®¯/›¶_¬Ù=O¡TC‹ˆð*"‚Œˆ¨#BÇò Õ—û'¡} ë4…5§°ö)lB ›˜Â&¦°‰)lÒ6LV3¼Èj"Y ¥0ÅΪÏ^'$Gng —Þ­ëÁù¶o„°¼vÏPnG¬û)覼…ë­Ü±ìŽõNØàŽîôàÐ(§rέrî{å<4Ëyì–]ýð'Ÿ¾MùöîšÔ]Kîúyœ¨ î:]Ý¥‘Â7à„…»;kŦ:˜2ÑT¨9ù·Š¸¸$Åy¦‘Þ±¸„⊠@(.Ž,À3 ø¡ÂÔqlž[«uÕWÔœ^à1Ó øñÒù…/+ßgAª#À:^G èDŠÈïÜ#€ €‡'ðÓ„ñ âü¤7]0pSÑ Ê㮊`JGS&¢ìw¤à EÒŸÏ1à“ ÄQh–([è@Ù"Íú‚³^û¬×!ëuÌz§«‘§.â’1.'è·X§˜uʳNÖ©È:EÍʇ‹ª½æCàáþs»½S×i•ýä‡É»¶ÙL^Ïýâ—z]M®ÞWm5!BN~š §¡ÿñ */±÷ã“HÏ;ÏPq/‡éìŠ<»¢Ÿ]1Ì®gW¤N`Hj× pRÔã~ÀS¯Ð©aƹ½B=¶W¨C{…BßÊé¾ìè´ìh.;zx=”ËŽ?šü6ŸÙ'J‹†”ô–F¥E”Àß#ßî2ûHêM././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_initialize_connection_os.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_initial0000664000175000017500000000067712301410454033567 0ustar chuckchuck00000000000000‹ÇôÈPÿtest_windows.TestWindowsDriver.test_initialize_connection_os.pÍQËjÃ0¼ïW„\ì@b,)~¨×ô’C{h 'ƒqÕq’ZÂ’ó ôß»RÒBK(¥ôPƒåÝÙÕììX¥Oe'kÀC¨ÖØ®Vu c…hÚ•ì"+5Ñu0ѳ[Ý©ã î0M@”å²ov¶iËÔr#…MáÞjz áJ'°ÊJiL)TßÚtê ¬ì«]/ b¹Ã86ëÊ®1'(c§ ƒ?ž–Pä%Ìõ’)Ôžˆ$`j¤x_í2刅Eâ(‚ÙMñåi³Åü±él_ín³5®m<Ž.xyuç‘O‚‘cæNa¨Œ1Ëê“JJP%¥n†”üT=IÏ©»«+÷kÙ½zùürôWÏ_ÎÏ~«wÝm¹"tíx›ű[+tòØ:ýö8¿6u餰vv÷î:L&n«]½íêf3Ìü5a†óúOw¶9ÿéñû¨>¹§2'.e±Ï¥ÔžKˆ³=—JD.•gõªl[¾\•{o*$Ý2¹JÅ7óô¦Ÿ'0¯ôg˜ÿümÊЕÝ_`.<Ì8¿°‡™Ë3W,–•Û°Vræ(/²3ÈJʹÅÒuuçÖáWͯÎݲ‹‘ RxÁ /¢Â‹¤ð"ç$9¥Ìfm:;“BX¥„–YÙtÓ…ê—¦e'àå£åiZžæåé¸<–§UH¨>„æåé‘4I@‡ñQ&IÀ@ÜHƒOn$Å¿(×nØÅúýf†Ä1ˆY³[Κ­ó¾YV7ÏÿõþIèÚGorÞÐ:¢7 ½}ZÀ–¨³LÔÙDUqÝ6ÿ¾ë¶,;2AK&hÙAD‘lÐû@¶ Šá…Ô bDâDÿºN̈JÍ‹ëëäÊ@ÇÇò%g?%ÈP¢)$WH¶}aÊĦ$`~¦Áß<þ:®N`Âü£\¤ä-Œ{ ˜6¨Ö<ÒãT«‰Á>D"b| Z†2rä É@²€ŒF29x?ŽTxSþ6 Þî®õW…“„wý®÷øW‘†Ø?ÀÛ=ûcäntH`ÞT ñ¦ÞÈ¢û•{wþF+§à±S ؃›K>Ô¨'p} ˆp}ÅDòÞ)‹¯`ñA|E_1ˆ¯ 7>u+×9*'PôG)Þýé!0€žîÁ/ÂÆù"p±§Zº,Ci€qm.ª¤òC}\ôw¶9Ðìs õx†W¬Î$«3ƒÕLÚôEâ3Ú4|‚5w“ΰæÑ!Ö$BŒý΄Ø`³vl³–mÖ”6Ù¬lÖ—Œ(Yª½dí8ë-e= Îz>žÇEÊz¤ÊÑë…ú²NQäáØ_°Nç¯Üº¹s/V«P=Z£Ã˜ '´3  p¨# »ƒð5h  bÜ…hž©=ˆéÔƒŸ)í?ï´ªz}‡ŸÜfDn¶GÝ¢â^)E©&K‘I”D™H”‰žÀùÁû[·{ Ù¥ ÷²:÷.Uu“&owÍzòòì|qZvåÏõÊMîß¹›ú'?NúÞçî—xŠé³£¡ïùä¹ehå¸_”Ü0JºM‘úÎä ¨’ƒ ê¤O ¤æ&$úÒ¶ØW9XõM l‡ŽV$Ï'„¡ Ḡ!W! US¡ !U¡^úÜ|Iú=äq{‚ÜŸ`hP0u(8´(èÓý)é·W³¿jùï ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_wmi.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_0000664000175000017500000000204112301410454033523 0ustar chuckchuck00000000000000‹ÃôÈPÿtest_windows.TestWindowsDriver.test_create_volume_wmi.p½V]oÛ6}ç¯0üg¨^~Šö²Ãü`˜‹õÅ€ (¬¢Å¶TIŽ—ýïã%M9Òš¢Ý° Éäá¹ç~œäyU?§-ˆ[òj×vÍ>惘ԔÌò¼ÜÝÛfÑÙ¶k÷PÚŶÊë¦úó™Ü¸¿H $OÓ»}¹éÊ]š’êî›w¤fä¶«9ù­dv_K²:K³<·m›æÕ~בZáš´î“§l³·­{—à;ã6¿»YºG ø à^ÌÖî{í–s\ßL¦ñ÷¶Ú•¶Á?/'¸^\,.šªê.ÛŸ?MÏq÷úìã§õÙ9B22ÛÔÀIá° …' Â1éoS¤ðÔÀq+Û »v¼¨‘ 5·`Ô\0ËçÆê÷syŸ¦3Ë©1‹§‡ûpu¸¸Í›²îÊjwºùk`N«ò/»ÜÝüôò<ŸÍ)—¨%WC-¹vZò œ9j)hÔR8Uï²¶õKøœ k× ,D<-ûÓî®Pb”A>ƒ2fPö”Œ!k’!kÿA¼ˆ#Ò”rHS*GSjÏ$‰4M¤©¨/´Ýù:S€Š‘%£Zc¹¶ºò½®äO©OǰUòÂV>;zÔ¤›Tû&Õ±Iuߤ™ûXµúB¬ K‡±°’¾°ˆ±&ì;Åšp䜈a¬‰t4“ÀNGšIOÓø¶+;»õi6~Úœ¶ý¤4£IipR?)Mœ”¦Ÿ”Ó{m7¶³Ñ÷P,§ÙtæU81Ÿæ2õ»'·±-ûÅOß_ª¶óyØ8 (Æ 4Ìn#Ú‡ìô© ¯æï¹Í¶ö”˜òÃnÁ(Ð9ÐEÕ‹ª¶Î)³üñò_§ „ëìh`#ˆÀ2¶@ßK:V)8cZÒezë[½2/·Oìš¾`lvàÝ‚ÝAïwp2†”üT=IÏ©»«+÷kÙ½zùürôWÏ_ÎÏ~«wÝm¹"tíx›ű[+tòØ:ýö8¿6u餰vv÷î:L&n«]½íêf3Ìü5a†óúOw¶9ÿéñû¨>¹§2'.e±Ï¥ÔžKˆ³=—JD.•gõªl[¾\•{o*$Ý2¹JÅ7óô¦Ÿ'0¯ôg˜ÿümÊЕÝ_`.<Ì8¿°‡™Ë3W,–•Û°Vræ(/²3ÈJʹÅÒuuçÖáWͯÎݲ‹‘ RxÁ /¢Â‹¤ð"ç$9¥ÌfmÚÏ%D‘k)|•íÀ7]8£~iZ¶^_1ZŸ¦õi^ŸŽëÓi}Z…ŒêCh^Ÿi@“t5`’ Ä4øäNRü‹rí†m¬ßof(@ƒ˜5»å¬Ù:oœeuóü_o°‘„Þ¨}ô&÷èM­#z“ÐÛ§l‰:ËÔÙHMÔY×móï»nËJ±#´ä‚–]Ð (Ú±H>è [É‚ðJ r1¢ñ¢]§@fDµæÅõu²e ’ãcùš³ Èd(HÑ’-$_ˆÆ0= y³€ >š‡ó$øë¸> FÉH[ È{ 70í*Pµy$ˆG¹6– 3ƒ}ˆÄ Ìø"´ …äÈ@’€d'­dòð޹ð¶ümL¼á3\3†k ® g ïû=\ïò®"±€7|vÈÈÝè˜À¼©>@âM ¼‘I÷+÷þüVNEÀc§*°7—|¬QNáú*áú,Š™äÝ?fR>_Áâ+‚øŠ$¾b_Av|êV®sTP èS¼ûÓC&`=݃_„óUàbOµtY†Úãâ\ ”Hõ†:Ùèïìs Ùè@ëñ* ¯"xI^g¯3˜´é«Äg´iø kï&bÍ£c¬I„û ±ÁgíØg-û¬ (mòY;ø¬¯Q²T8zÉÚqÖ[Êzœõ|<‘‹”õH¥£×) õe¢ÈÃÁ¿`Î^¹usç^¬V¡|´S(ÚÞkUõú?¹ÍˆÜn!Žú-DÅÝR ‹R M6–8"“(‰2‘(=óƒ÷·n÷@³Kîeÿtî]ªê&?LÞîšõäåÙùâ´ìʟ땛ܿs;7!õO~œôÝÏÿÜ1ñÓgGCçóÉsÊÐ0ÊqÇ(¹e”6ô›"užÉAP%AÕ;HŸ@HíMH ô¥m?°¯r>°êšØ=­HžOCÂqB®Bª¦*„CBªB½ô¹ù’ô{Èã¹CÁТ`êQphRЧûSÒo¯f˜EE././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_os.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_0000664000175000017500000000066712301410454033571 0ustar chuckchuck00000000000000‹ÇôÈPÿtest_windows.TestWindowsDriver.test_remove_export_os.pÍQËjÃ0¼ïW„\ì@b,)~¨×ô’C{h 'ƒqÕq’ZÂ’ó ôß»RÒBK(¥ôPƒåÝÙÕììX¥Oe'kÀC¨ÖØ®Vu c…hÚ•ì"+5Ñu0ѳ[Ý©ã î0M@”å²ov¶iËÔr#…MáÞjz áJ'°ÊJiL)TßÚtê ¬ì«]/ b¹Ã86ëÊ®1'(c§ ƒ?ž–Pä%Ìõ’)Ôžˆ$`j¤x_í2刅Eâ(‚ÙMñåi³Åü±él_ín³5®m<Ž.xyuç‘O‚‘cæNa¨Œ1Ëê“JJP%¥nÓî™ÁÅï/fx ÄÝàƒ£kü¿ÆË±»¾<¹ÏUµ.ïíÆ}}=rד“ÉɦªÚ“ÇUéî?»ÕׇOŸ¯Iš-k`Ùm/ [x À)¿›Ì ¶…Ú!z;¿ZçusW9ð`œJvÍP@3”ºw(ëÌPÍPç…óÍ[‹¨ôÔÀ‚vŒ[ÎHgA´À¨óã¼È—Ëùm0æŸò½þyÿvÚû¤ÖBrÐú«žaÂbrSˆ…ÝMˆ“ˆƒ´°mÙÚ•ÇÄýñ9È»åÎ*Ù»'xs:¥gggc8¥dÌOŒ’§c%"DÓ7êœ}Æ7d–7Ms/7ù îñ°{$ Ë‚WÝë•Ùmï”CõU‡ æŽ&ø®Ã„@|"ÀRŸNørðÄðçHص )Zþ’w¤ˆ¤ÄsÕ[ŸAÊ?pì<:8ò§ì<‡*=]É.w\ë.žÙÓ²¹Çe ü2º‹G¹4V>•èð(ñ(…Û_ÚG¤ZgD{#f׈&hDƒ{CÓΈfшæ1lZì ÛÔ>”…ý=oïúð¿¾ü•WçW³wå¦ÝæK‡¨.b„& ÇŒsjÙØXõa,nµ¡*·Œ3y¸» [‡›bSÖmY­ŸeÓ˜éM\•ÛÙúâìùû”•VÚ=( Úí ƒ‰…Á¤Â`h¶7aŒ Ÿñá31|&…Ϩèy£¿áùÿà€/h< Š'W=Ñ-¾ä’X?¤ DÄì"cúQCCÚ2A<¢·’»¨¯èSWÉÁ§1 Ìú ™åƒ4zÀå9@gZ¥Mt¿ ¦@Ð  x¥€ ´z±T‹.0@åÏE†}?2j8ªWá—9F=êà¢E¸Œy%XÚµW`!6(*3xæÌ]³(6ά t2k’Y”šè/åzþE“à»ÚžúÞ7 ¨K‹xtT§yΣE(¯ †dž "A$2ˆž ¢'ƒ/”¦ Ä Â“A2ÈDÙ“Å+¹Aòo1@ú,’…”E²Ï"”®xtÔ¯:z©¡‚—BZI ¡WC@9LGG%üÆÑ•w£nÔɺw#êc<ºæ/utíÛ@Ѓ>´òp;d±ó“Z/@9ŠÕõ(Vg3Ì"ã³È„,2)‹LŸE^‰¦vi[ëk‰ñ•œòýF‡9Ý×êüV5­_ÈÂÂAûF‰c$%¡w'‘‘”$FR’Ia?#Ý>—ùÊö1)?®'ˆŒLªÍbRÕ®¼¸ý¯£E!èpqD¥ÀÊHT ‰¨z¢R­ÝR¼õ³3WQ®èœa^úb`ê&¦02õ3Ó³¡ up˜:”Jÿ– tz+ée.0·vóÉíÎ <Ü ®aE;úeôaS­F糋ù4oó7åÒŽïìÆŽ\øF¿Žº¾ñî5ý¯ŽûžñÉCO²À[6ä-ó¼eÁ,ñ–õ¼uó[Œ8êª/M]ÂR1a)r‰{.ñÀ%ž¸Ä{.q•ÖAä:ŒT¸€Š §*X?!§9€ ñÙ.Â-†S´ðc´uCÆÊÔR¹Û1ï–ŠÉ?`™K././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_wmi.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_0000664000175000017500000000266612301410454033572 0ustar chuckchuck00000000000000‹ÇôÈPÿtest_windows.TestWindowsDriver.test_remove_export_wmi.pÅWQoÛ6~ׯ0ü’dhÝ‘É{è ËC‚¡.Ö—†¢°®Ûr-%YVô¿w©ZMÖnÃÚ P"[:Þ}÷ÝwwUÕl;·Ìü¥j6m·»­ºf—móì°ªê͵ÛÍ:×víìÞß4÷ílÝT7Û]óÇCvîÿ˶U‹ÅÕm½êêÍb‘5W¿»ªË¶˜]t[‘½ÚÊìðz«²ùÁ¢¬*×¶‹ª¹ÝtÙ¶ /tÖúoîÊÕ­kýg†>³þá7çgþrºð^úßK9¢ë³Éôý]7›úÆíèß纞œÌNvMÓܯkºÿ8=¢§/>|¼<8"“˜®¶ ²¥·½„lÉŽ‚ôž‚âÓŠlÉ®÷mÉ!^/Nëö†LX2ù¾ o‘žGÑ›@M !páîß¼î`ÁFôȈ!#–ÞyoD@4"`\TåjµXxBð§òIxNÝ]]¹_ËîÝ€ÐËç—£Ÿzþr~ö[½ënËy׎€Ü*“CqìÀÇ8¶N¿=Vׯ¢.ȭݽ»G‡ƒÛjWo»ºÙ '™ÁļþÓmÎúô}”æT(ÂRûX í±8Ûc)óˆ¥ô¨^•mË—«rïM‰Ä[WÊø¦JoúsòRÿ òÿ€Ï”†”v?@•{7p}aï¦ÑM%™,+·a®(ÆHÙd%ÕÜb麺sëð­æWˆç9Ý2Ëà bxÁ /"ËÄðBq‘œRe3· aQ€G½l´éÂõôKÓ²ptÅ(:MÑiŽNÇètŠNËPO½ ÍÑé41@‡ç#Lb€˜GƒOæ‘ì_”k7$±~¿™aù1ä³f·œ5[çe³¬nžÿëôAÞ¹ï½QÞ{œÖÑ{“¼·Oó×t–¡³:› ³2ÆmÕ÷Û2OìH-i e „<Š äI½ dË@XÈ=Y!Á9áyÿºN†Ì`ˆ:Í‹ëë$Ê@ ÇÛògß"ˆÐŽ¢&$QHªeaÊÀÆ`x¦AÝ<þ:îM`Âñ£R¤„rFcJSN:Í't*mdˆ1Áþõ„ ˜øæãðè»Ò|çã^#8Í dðRÅ(|ˆQxíñ RDŒq•Œ« ¸Ê„«p•¤3§nå:G: 2̾Ì9¿„ËàôtÏ}ßèT/Þ>óƒWnÝܹ«Uà[ËÏÏÔHjA$,¶ ¢Ú‚Jr ^Ðc¢Ô×x£‚7^ãG'Y:©“–—øþ$¯óñ$/ösës~$…ï«sùežc®Ât^pÕíóuF ÌÇcwÎswΩDˆ©DH©Dßzüѷ懲èEá»Ïh)( ÐÁK“¢°) ZZ!Ñw“³ý¹ŒÇ²ºgUõúw14Úµ%oJ!§Éލ7M6.D¦ŠT‰*b Š ¼¼¿u»:=´{21{)¯ºÉ“·»f=yyv¾8-»òçzå&÷ïÜÎM({“'ýæó?oK|ÄôÙѰõ<: Ë¢ÓV0mE ­L´•meR ”½õu‰´Ú„ºD9f’d&ÉþÁÄ$90Iå©1’‡Š[5ªQ«FE­UÈ…Š­U1Ò©°xÿøR}÷.—äíÃz‚i?ÁaAAß–žêcíÕì/±¾/š ././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_from_snapshot_os.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_0000664000175000017500000000076412301410454033535 0ustar chuckchuck00000000000000‹ÄôÈPÿtest_windows.TestWindowsDriver.test_create_volume_from_snapshot_os.pÍ“MOÜ0†ïó+—d%6Š?òá^é…C{èJœ"EY¯ mlÅBüwfÌ.R«mY!DJbÇï¼óÄÑÚºÇv2=àCÛчiÖÁNàrHµÆ™²`|ðÙ=Nì½Ï~[}ë&ûð?pŽnÛõ ±|·³÷›‡M“Bc5ÆÒ&IOÓ¦i’óoÍ_×°:_]\S˜»í÷ÁßzJ;[œžÐà陞 ŒœÄI² eEÞx9ƒÎ{¿îþpɺäœÊs±sÉåÞ%'$7vQ‰¿®Uò ²\uÎÊ¥aª\JnÄR™êjYljūΈ\©ìîzÛø_S¼ŽMÑ' J[ ÅD~ˆ‹ ."r{.â‹(0í5V~œßd}QQÿ›Ç±¥HLâ!súKµ,ùއ{RîyÈâ œYY}ÊùõŽÞÙ Z.$ð‹././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_os.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_0000664000175000017500000000066712301410454033537 0ustar chuckchuck00000000000000‹ÂôÈPÿtest_windows.TestWindowsDriver.test_create_export_os.pÍQËjÃ0¼ïW„\ì@b,)~¨×ô’C{h 'ƒqÕq’ZÂ’ó ôß»RÒBK(¥ôPƒåÝÙÕììX¥Oe'kÀC¨ÖØ®Vu c…hÚ•ì"+5Ñu0ѳ[Ý©ã î0M@”å²ov¶iËÔr#…MáÞjz áJ'°ÊJiL)TßÚtê ¬ì«]/ b¹Ã86ëÊ®1'(c§ ƒ?ž–Pä%Ìõ’)Ôžˆ$`j¤x_í2刅Eâ(‚ÙMñåi³Åü±él_ín³5®m<Ž.xyuç‘O‚‘cæNa¨Œ1Ëê“JJP%¥nÝabw>í¬Þ¸Á~~ÁöÀ1ÐJ­¶Mš^)°«w£8Á xv’µËáe¬*­÷JÛmÆàfT˜ƒÇÊGÕnDZ‚ÆJœüö°À”e”3†É¿%† Åۛўþ훨{wCq:M§ƒµaºëÊ£ Í^Ž÷‡åxBHI똀Ù5ƒ:e²<ªÍ ŽÖz«£!V£Wõd‡Pµ) ³kgáœVpq‚py†ðxJWm«2ø±>§½’¸±‹ÑÑO˼ˆj嵚ÈPM0‚~Râ¬&dT«Mh‚颠ˆÛ3,d”Ή*Š_Ô’®,»d'ªägª¸ø¾ñÁôH’2‡EÆ ò~…í°ýÿÖtÓ}ð?ïM΢ØüÚ®,ÈnŸXv²›³³Ýý¤ß遲é././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000manila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_os.p.gzmanila-2013.2.dev175.gbf1a399/manila/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_0000664000175000017500000000066712301410454033575 0ustar chuckchuck00000000000000‹ÆôÈPÿtest_windows.TestWindowsDriver.test_ensure_export_os.pÍQËjÃ0¼ïW„\ì@b,)~¨×ô’C{h 'ƒqÕq’ZÂ’ó ôß»RÒBK(¥ôPƒåÝÙÕììX¥Oe'kÀC¨ÖØ®Vu c…hÚ•ì"+5Ñu0ѳ[Ý©ã î0M@”å²ov¶iËÔr#…MáÞjz áJ'°ÊJiL)TßÚtê ¬ì«]/ b¹Ã86ëÊ®1'(c§ ƒ?ž–Pä%Ìõ’)Ôžˆ$`j¤x_í2刅Eâ(‚ÙMñåi³Åü±él_ín³5®m<Ž.xyuç‘O‚‘cæNa¨Œ1Ëê“JJP%¥n= 3: name = tokens[1].split('@', 1)[0].strip() if exclude_loopback and name == LOOPBACK_DEVNAME: continue retval.append(IPDevice(name, self.namespace)) return retval def add_tuntap(self, name, mode='tap'): self._as_root('', 'tuntap', ('add', name, 'mode', mode)) return IPDevice(name, self.namespace) def add_veth(self, name1, name2, namespace2=None): args = ['add', name1, 'type', 'veth', 'peer', 'name', name2] if namespace2 is None: namespace2 = self.namespace else: self.ensure_namespace(namespace2) args += ['netns', namespace2] self._as_root('', 'link', tuple(args)) return (IPDevice(name1, self.namespace), IPDevice(name2, namespace2)) def ensure_namespace(self, name): if not self.netns.exists(name): ip = self.netns.add(name) lo = ip.device(LOOPBACK_DEVNAME) lo.link.set_up() else: ip = IPWrapper(name) return ip def namespace_is_empty(self): return not self.get_devices(exclude_loopback=True) def garbage_collect_namespace(self): """Conditionally destroy the namespace if it is empty.""" if self.namespace and self.netns.exists(self.namespace): if self.namespace_is_empty(): self.netns.delete(self.namespace) return True return False def add_device_to_namespace(self, device): if self.namespace: device.link.set_netns(self.namespace) @classmethod def get_namespaces(cls): output = cls._execute('', 'netns', ('list',)) return [l.strip() for l in output.split('\n')] class IPDevice(SubProcessBase): def __init__(self, name, namespace=None): super(IPDevice, self).__init__(namespace=namespace) self.name = name self.link = IpLinkCommand(self) self.addr = IpAddrCommand(self) self.route = IpRouteCommand(self) def __eq__(self, other): return (other is not None and self.name == other.name and self.namespace == other.namespace) def __str__(self): return self.name class IpCommandBase(object): COMMAND = '' def __init__(self, parent): self._parent = parent def _run(self, *args, **kwargs): return self._parent._run(kwargs.get('options', []), self.COMMAND, args) def _as_root(self, *args, **kwargs): return self._parent._as_root(kwargs.get('options', []), self.COMMAND, args, kwargs.get('use_root_namespace', False)) class IpDeviceCommandBase(IpCommandBase): @property def name(self): return self._parent.name class IpLinkCommand(IpDeviceCommandBase): COMMAND = 'link' def set_address(self, mac_address): self._as_root('set', self.name, 'address', mac_address) def set_mtu(self, mtu_size): self._as_root('set', self.name, 'mtu', mtu_size) def set_up(self): self._as_root('set', self.name, 'up') def set_down(self): self._as_root('set', self.name, 'down') def set_netns(self, namespace): self._as_root('set', self.name, 'netns', namespace) self._parent.namespace = namespace def set_name(self, name): self._as_root('set', self.name, 'name', name) self._parent.name = name def set_alias(self, alias_name): self._as_root('set', self.name, 'alias', alias_name) def delete(self): self._as_root('delete', self.name) @property def address(self): return self.attributes.get('link/ether') @property def state(self): return self.attributes.get('state') @property def mtu(self): return self.attributes.get('mtu') @property def qdisc(self): return self.attributes.get('qdisc') @property def qlen(self): return self.attributes.get('qlen') @property def alias(self): return self.attributes.get('alias') @property def attributes(self): return self._parse_line(self._run('show', self.name, options='o')) def _parse_line(self, value): if not value: return {} device_name, settings = value.replace("\\", '').split('>', 1) tokens = settings.split() keys = tokens[::2] values = [int(v) if v.isdigit() else v for v in tokens[1::2]] retval = dict(zip(keys, values)) return retval class IpAddrCommand(IpDeviceCommandBase): COMMAND = 'addr' def add(self, ip_version, cidr, broadcast, scope='global'): self._as_root('add', cidr, 'brd', broadcast, 'scope', scope, 'dev', self.name, options=[ip_version]) def delete(self, ip_version, cidr): self._as_root('del', cidr, 'dev', self.name, options=[ip_version]) def flush(self): self._as_root('flush', self.name) def list(self, scope=None, to=None, filters=None): if filters is None: filters = [] retval = [] if scope: filters += ['scope', scope] if to: filters += ['to', to] for line in self._run('show', self.name, *filters).split('\n'): line = line.strip() if not line.startswith('inet'): continue parts = line.split() if parts[0] == 'inet6': version = 6 scope = parts[3] broadcast = '::' else: version = 4 if parts[2] == 'brd': broadcast = parts[3] scope = parts[5] else: # sometimes output of 'ip a' might look like: # inet 192.168.100.100/24 scope global eth0 # and broadcast needs to be calculated from CIDR broadcast = str(netaddr.IPNetwork(parts[1]).broadcast) scope = parts[3] retval.append(dict(cidr=parts[1], broadcast=broadcast, scope=scope, ip_version=version, dynamic=('dynamic' == parts[-1]))) return retval class IpRouteCommand(IpDeviceCommandBase): COMMAND = 'route' def add_gateway(self, gateway, metric=None): args = ['replace', 'default', 'via', gateway] if metric: args += ['metric', metric] args += ['dev', self.name] self._as_root(*args) def delete_gateway(self, gateway): self._as_root('del', 'default', 'via', gateway, 'dev', self.name) def get_gateway(self, scope=None, filters=None): if filters is None: filters = [] retval = None if scope: filters += ['scope', scope] route_list_lines = self._run('list', 'dev', self.name, *filters).split('\n') default_route_line = next((x.strip() for x in route_list_lines if x.strip().startswith('default')), None) if default_route_line: gateway_index = 2 parts = default_route_line.split() retval = dict(gateway=parts[gateway_index]) metric_index = 4 parts_has_metric = (len(parts) > metric_index) if parts_has_metric: retval.update(metric=int(parts[metric_index])) return retval def pullup_route(self, interface_name): """Ensures that the route entry for the interface is before all others on the same subnet. """ device_list = [] device_route_list_lines = self._run('list', 'proto', 'kernel', 'dev', interface_name).split('\n') for device_route_line in device_route_list_lines: try: subnet = device_route_line.split()[0] except Exception: continue subnet_route_list_lines = self._run('list', 'proto', 'kernel', 'match', subnet).split('\n') for subnet_route_line in subnet_route_list_lines: i = iter(subnet_route_line.split()) while(i.next() != 'dev'): pass device = i.next() try: while(i.next() != 'src'): pass src = i.next() except Exception: src = '' if device != interface_name: device_list.append((device, src)) else: break for (device, src) in device_list: self._as_root('del', subnet, 'dev', device) if (src != ''): self._as_root('append', subnet, 'proto', 'kernel', 'src', src, 'dev', device) else: self._as_root('append', subnet, 'proto', 'kernel', 'dev', device) class IpNetnsCommand(IpCommandBase): COMMAND = 'netns' def add(self, name): self._as_root('add', name, use_root_namespace=True) return IPWrapper(name) def delete(self, name): self._as_root('delete', name, use_root_namespace=True) def execute(self, cmds, addl_env={}, check_exit_code=True): if not self._parent.namespace: raise Exception(_('No namespace defined for parent')) else: env_params = [] if addl_env: env_params = (['env'] + ['%s=%s' % pair for pair in addl_env.items()]) total_cmd = ['ip', 'netns', 'exec', self._parent.namespace] + \ env_params + list(cmds) return utils.execute(*total_cmd, run_as_root=True, check_exit_code=check_exit_code) def exists(self, name): output = self._as_root('list', options='o', use_root_namespace=True) for line in output.split('\n'): if name == line.strip(): return True return False def device_exists(device_name, namespace=None): try: address = IPDevice(device_name, namespace).link.address except Exception as e: if 'does not exist' in str(e): return False raise return bool(address) def iproute_arg_supported(command, arg): command += ['help'] stdout, stderr = utils.execute(command, check_exit_code=False, return_stderr=True) return any(arg in line for line in stderr.split('\n')) manila-2013.2.dev175.gbf1a399/manila/image/0000775000175000017500000000000012301410516020032 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/image/__init__.py0000664000175000017500000000124712301410454022150 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/image/glance.py0000664000175000017500000003773212301410454021652 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of an image service that uses Glance as the backend""" from __future__ import absolute_import import copy import itertools import random import sys import time import urlparse import glanceclient import glanceclient.exc from manila import exception from manila.openstack.common import jsonutils from manila.openstack.common import log as logging from manila.openstack.common import timeutils from oslo.config import cfg LOG = logging.getLogger(__name__) CONF = cfg.CONF def _parse_image_ref(image_href): """Parse an image href into composite parts. :param image_href: href of an image :returns: a tuple of the form (image_id, host, port) :raises ValueError """ url = urlparse.urlparse(image_href) port = url.port or 80 host = url.netloc.split(':', 1)[0] image_id = url.path.split('/')[-1] use_ssl = (url.scheme == 'https') return (image_id, host, port, use_ssl) def _create_glance_client(context, host, port, use_ssl, version=CONF.glance_api_version): """Instantiate a new glanceclient.Client object""" if version is None: version = CONF.glance_api_version if use_ssl: scheme = 'https' else: scheme = 'http' params = {} params['insecure'] = CONF.glance_api_insecure if CONF.auth_strategy == 'keystone': params['token'] = context.auth_token endpoint = '%s://%s:%s' % (scheme, host, port) return glanceclient.Client(str(version), endpoint, **params) def get_api_servers(): """ Shuffle a list of CONF.glance_api_servers and return an iterator that will cycle through the list, looping around to the beginning if necessary. """ api_servers = [] for api_server in CONF.glance_api_servers: if '//' not in api_server: api_server = 'http://' + api_server url = urlparse.urlparse(api_server) port = url.port or 80 host = url.netloc.split(':', 1)[0] use_ssl = (url.scheme == 'https') api_servers.append((host, port, use_ssl)) random.shuffle(api_servers) return itertools.cycle(api_servers) class GlanceClientWrapper(object): """Glance client wrapper class that implements retries.""" def __init__(self, context=None, host=None, port=None, use_ssl=False, version=None): if host is not None: self.client = self._create_static_client(context, host, port, use_ssl, version) else: self.client = None self.api_servers = None self.version = version def _create_static_client(self, context, host, port, use_ssl, version): """Create a client that we'll use for every call.""" self.host = host self.port = port self.use_ssl = use_ssl self.version = version return _create_glance_client(context, self.host, self.port, self.use_ssl, self.version) def _create_onetime_client(self, context, version): """Create a client that will be used for one call.""" if self.api_servers is None: self.api_servers = get_api_servers() self.host, self.port, self.use_ssl = self.api_servers.next() return _create_glance_client(context, self.host, self.port, self.use_ssl, version) def call(self, context, method, *args, **kwargs): """ Call a glance client method. If we get a connection error, retry the request according to CONF.glance_num_retries. """ version = self.version if version in kwargs: version = kwargs['version'] retry_excs = (glanceclient.exc.ServiceUnavailable, glanceclient.exc.InvalidEndpoint, glanceclient.exc.CommunicationError) num_attempts = 1 + CONF.glance_num_retries for attempt in xrange(1, num_attempts + 1): client = self.client or self._create_onetime_client(context, version) try: return getattr(client.images, method)(*args, **kwargs) except retry_excs as e: host = self.host port = self.port extra = "retrying" error_msg = _("Error contacting glance server " "'%(host)s:%(port)s' for '%(method)s', " "%(extra)s.") if attempt == num_attempts: extra = 'done trying' LOG.exception(error_msg, locals()) raise exception.GlanceConnectionFailed(host=host, port=port, reason=str(e)) LOG.exception(error_msg, locals()) time.sleep(1) class GlanceImageService(object): """Provides storage and retrieval of disk image objects within Glance.""" def __init__(self, client=None): self._client = client or GlanceClientWrapper() def detail(self, context, **kwargs): """Calls out to Glance for a list of detailed image information.""" params = self._extract_query_params(kwargs) try: images = self._client.call(context, 'list', **params) except Exception: _reraise_translated_exception() _images = [] for image in images: if self._is_image_available(context, image): _images.append(self._translate_from_glance(image)) return _images def _extract_query_params(self, params): _params = {} accepted_params = ('filters', 'marker', 'limit', 'sort_key', 'sort_dir') for param in accepted_params: if param in params: _params[param] = params.get(param) # ensure filters is a dict _params.setdefault('filters', {}) # NOTE(vish): don't filter out private images _params['filters'].setdefault('is_public', 'none') return _params def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" try: image = self._client.call(context, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) if not self._is_image_available(context, image): raise exception.ImageNotFound(image_id=image_id) base_image_meta = self._translate_from_glance(image) return base_image_meta def get_location(self, context, image_id): """Returns the direct url representing the backend storage location, or None if this attribute is not shown by Glance.""" try: client = GlanceClientWrapper() image_meta = client.call(context, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) if not self._is_image_available(context, image_meta): raise exception.ImageNotFound(image_id=image_id) return getattr(image_meta, 'direct_url', None) def download(self, context, image_id, data): """Calls out to Glance for metadata and data and writes data.""" try: image_chunks = self._client.call(context, 'data', image_id) except Exception: _reraise_translated_image_exception(image_id) for chunk in image_chunks: data.write(chunk) def create(self, context, image_meta, data=None): """Store the image data and return the new image object.""" sent_service_image_meta = self._translate_to_glance(image_meta) if data: sent_service_image_meta['data'] = data recv_service_image_meta = self._client.call(context, 'create', **sent_service_image_meta) return self._translate_from_glance(recv_service_image_meta) def update(self, context, image_id, image_meta, data=None, purge_props=True): """Modify the given image with the new data.""" image_meta = self._translate_to_glance(image_meta) image_meta['purge_props'] = purge_props #NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. image_meta.pop('id', None) if data: image_meta['data'] = data try: image_meta = self._client.call(context, 'update', image_id, **image_meta) except Exception: _reraise_translated_image_exception(image_id) else: return self._translate_from_glance(image_meta) def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. :raises: NotAuthorized if the user is not an owner. """ try: self._client.call(context, 'delete', image_id) except glanceclient.exc.NotFound: raise exception.ImageNotFound(image_id=image_id) return True @staticmethod def _translate_to_glance(image_meta): image_meta = _convert_to_string(image_meta) image_meta = _remove_read_only(image_meta) return image_meta @staticmethod def _translate_from_glance(image): image_meta = _extract_attributes(image) image_meta = _convert_timestamps_to_datetimes(image_meta) image_meta = _convert_from_string(image_meta) return image_meta @staticmethod def _is_image_available(context, image): """Check image availability. This check is needed in case Nova and Glance are deployed without authentication turned on. """ # The presence of an auth token implies this is an authenticated # request and we need not handle the noauth use-case. if hasattr(context, 'auth_token') and context.auth_token: return True if image.is_public or context.is_admin: return True properties = image.properties if context.project_id and ('owner_id' in properties): return str(properties['owner_id']) == str(context.project_id) if context.project_id and ('project_id' in properties): return str(properties['project_id']) == str(context.project_id) try: user_id = properties['user_id'] except KeyError: return False return str(user_id) == str(context.user_id) def _convert_timestamps_to_datetimes(image_meta): """Returns image with timestamp fields converted to datetime objects.""" for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) return image_meta # NOTE(bcwaldon): used to store non-string data in glance metadata def _json_loads(properties, attr): prop = properties[attr] if isinstance(prop, basestring): properties[attr] = jsonutils.loads(prop) def _json_dumps(properties, attr): prop = properties[attr] if not isinstance(prop, basestring): properties[attr] = jsonutils.dumps(prop) _CONVERT_PROPS = ('block_device_mapping', 'mappings') def _convert(method, metadata): metadata = copy.deepcopy(metadata) properties = metadata.get('properties') if properties: for attr in _CONVERT_PROPS: if attr in properties: method(properties, attr) return metadata def _convert_from_string(metadata): return _convert(_json_loads, metadata) def _convert_to_string(metadata): return _convert(_json_dumps, metadata) def _extract_attributes(image): IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', 'container_format', 'checksum', 'id', 'name', 'created_at', 'updated_at', 'deleted_at', 'deleted', 'status', 'min_disk', 'min_ram', 'is_public'] output = {} for attr in IMAGE_ATTRIBUTES: output[attr] = getattr(image, attr, None) output['properties'] = getattr(image, 'properties', {}) return output def _remove_read_only(image_meta): IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at'] output = copy.deepcopy(image_meta) for attr in IMAGE_ATTRIBUTES: if attr in output: del output[attr] return output def _reraise_translated_image_exception(image_id): """Transform the exception for the image but keep its traceback intact.""" exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_image_exception(image_id, exc_value) raise new_exc, None, exc_trace def _reraise_translated_exception(): """Transform the exception but keep its traceback intact.""" exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_plain_exception(exc_value) raise new_exc, None, exc_trace def _translate_image_exception(image_id, exc_value): if isinstance(exc_value, (glanceclient.exc.Forbidden, glanceclient.exc.Unauthorized)): return exception.ImageNotAuthorized(image_id=image_id) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.ImageNotFound(image_id=image_id) if isinstance(exc_value, glanceclient.exc.BadRequest): return exception.Invalid(exc_value) return exc_value def _translate_plain_exception(exc_value): if isinstance(exc_value, (glanceclient.exc.Forbidden, glanceclient.exc.Unauthorized)): return exception.NotAuthorized(exc_value) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.NotFound(exc_value) if isinstance(exc_value, glanceclient.exc.BadRequest): return exception.Invalid(exc_value) return exc_value def get_remote_image_service(context, image_href): """Create an image_service and parse the id from the given image_href. The image_href param can be an href of the form 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3', or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the image_href is a standalone id, then the default image service is returned. :param image_href: href that describes the location of an image :returns: a tuple of the form (image_service, image_id) """ #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a # standalone image ID if '/' not in str(image_href): image_service = get_default_image_service() return image_service, image_href try: (image_id, glance_host, glance_port, use_ssl) = \ _parse_image_ref(image_href) glance_client = GlanceClientWrapper(context=context, host=glance_host, port=glance_port, use_ssl=use_ssl) except ValueError: raise exception.InvalidImageRef(image_href=image_href) image_service = GlanceImageService(client=glance_client) return image_service, image_id def get_default_image_service(): return GlanceImageService() manila-2013.2.dev175.gbf1a399/manila/image/image_utils.py0000664000175000017500000002343612301410454022717 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods to deal with images. This is essentially a copy from nova.virt.images.py Some slight modifications, but at some point we should look at maybe pushign this up to OSLO """ import os import re import tempfile from oslo.config import cfg from manila import exception from manila.openstack.common import log as logging from manila import utils LOG = logging.getLogger(__name__) image_helper_opt = [cfg.StrOpt('image_conversion_dir', default='/tmp', help='parent dir for tempdir used for image conversion'), ] CONF = cfg.CONF CONF.register_opts(image_helper_opt) class QemuImgInfo(object): BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:" r"\s+(.*?)\)\s*$"), re.I) TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$") SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I) def __init__(self, cmd_output): details = self._parse(cmd_output) self.image = details.get('image') self.backing_file = details.get('backing_file') self.file_format = details.get('file_format') self.virtual_size = details.get('virtual_size') self.cluster_size = details.get('cluster_size') self.disk_size = details.get('disk_size') self.snapshots = details.get('snapshot_list', []) self.encryption = details.get('encryption') def __str__(self): lines = [ 'image: %s' % self.image, 'file_format: %s' % self.file_format, 'virtual_size: %s' % self.virtual_size, 'disk_size: %s' % self.disk_size, 'cluster_size: %s' % self.cluster_size, 'backing_file: %s' % self.backing_file, ] if self.snapshots: lines.append("snapshots: %s" % self.snapshots) return "\n".join(lines) def _canonicalize(self, field): # Standardize on underscores/lc/no dash and no spaces # since qemu seems to have mixed outputs here... and # this format allows for better integration with python # - ie for usage in kwargs and such... field = field.lower().strip() for c in (" ", "-"): field = field.replace(c, '_') return field def _extract_bytes(self, details): # Replace it with the byte amount real_size = self.SIZE_RE.search(details) if real_size: details = real_size.group(1) try: details = utils.to_bytes(details) except (TypeError, ValueError): pass return details def _extract_details(self, root_cmd, root_details, lines_after): consumed_lines = 0 real_details = root_details if root_cmd == 'backing_file': # Replace it with the real backing file backing_match = self.BACKING_FILE_RE.match(root_details) if backing_match: real_details = backing_match.group(2).strip() elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']: # Replace it with the byte amount (if we can convert it) real_details = self._extract_bytes(root_details) elif root_cmd == 'file_format': real_details = real_details.strip().lower() elif root_cmd == 'snapshot_list': # Next line should be a header, starting with 'ID' if not lines_after or not lines_after[0].startswith("ID"): msg = _("Snapshot list encountered but no header found!") raise ValueError(msg) consumed_lines += 1 possible_contents = lines_after[1:] real_details = [] # This is the sprintf pattern we will try to match # "%-10s%-20s%7s%20s%15s" # ID TAG VM SIZE DATE VM CLOCK (current header) for line in possible_contents: line_pieces = line.split(None) if len(line_pieces) != 6: break else: # Check against this pattern occuring in the final position # "%02d:%02d:%02d.%03d" date_pieces = line_pieces[5].split(":") if len(date_pieces) != 3: break real_details.append({ 'id': line_pieces[0], 'tag': line_pieces[1], 'vm_size': line_pieces[2], 'date': line_pieces[3], 'vm_clock': line_pieces[4] + " " + line_pieces[5], }) consumed_lines += 1 return (real_details, consumed_lines) def _parse(self, cmd_output): # Analysis done of qemu-img.c to figure out what is going on here # Find all points start with some chars and then a ':' then a newline # and then handle the results of those 'top level' items in a separate # function. # # TODO(harlowja): newer versions might have a json output format # we should switch to that whenever possible. # see: http://bit.ly/XLJXDX if not cmd_output: cmd_output = '' contents = {} lines = cmd_output.splitlines() i = 0 line_am = len(lines) while i < line_am: line = lines[i] if not line.strip(): i += 1 continue consumed_lines = 0 top_level = self.TOP_LEVEL_RE.match(line) if top_level: root = self._canonicalize(top_level.group(1)) if not root: i += 1 continue root_details = top_level.group(2).strip() details, consumed_lines = self._extract_details(root, root_details, lines[i + 1:]) contents[root] = details i += consumed_lines + 1 return contents def qemu_img_info(path): """Return a object containing the parsed output from qemu-img info.""" out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, run_as_root=True) return QemuImgInfo(out) def convert_image(source, dest, out_format): """Convert image to other format""" cmd = ('qemu-img', 'convert', '-O', out_format, source, dest) utils.execute(*cmd, run_as_root=True) def fetch(context, image_service, image_id, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. with utils.remove_path_on_error(path): with open(path, "wb") as image_file: image_service.download(context, image_id, image_file) def fetch_to_raw(context, image_service, image_id, dest, user_id=None, project_id=None): if (CONF.image_conversion_dir and not os.path.exists(CONF.image_conversion_dir)): os.makedirs(CONF.image_conversion_dir) # NOTE(avishay): I'm not crazy about creating temp files which may be # large and cause disk full errors which would confuse users. # Unfortunately it seems that you can't pipe to 'qemu-img convert' because # it seeks. Maybe we can think of something for a future version. fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir) os.close(fd) with utils.remove_path_on_error(tmp): fetch(context, image_service, image_id, tmp, user_id, project_id) data = qemu_img_info(tmp) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:" "%(backing_file)s") % locals()) # NOTE(jdg): I'm using qemu-img convert to write # to the volume regardless if it *needs* conversion or not # TODO(avishay): We can speed this up by checking if the image is raw # and if so, writing directly to the device. However, we need to keep # check via 'qemu-img info' that what we copied was in fact a raw # image and not a different format with a backing file, which may be # malicious. LOG.debug("%s was %s, converting to raw" % (image_id, fmt)) convert_image(tmp, dest, 'raw') data = qemu_img_info(dest) if data.file_format != "raw": raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to raw, but format is now %s") % data.file_format) os.unlink(tmp) manila-2013.2.dev175.gbf1a399/manila/version.py0000664000175000017500000000165112301410454021013 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pbr import version as pbr_version MANILA_VENDOR = "OpenStack Foundation" MANILA_PRODUCT = "OpenStack Manila" MANILA_PACKAGE = None # OS distro package version suffix loaded = False version_info = pbr_version.VersionInfo('manila') version_string = version_info.version_string manila-2013.2.dev175.gbf1a399/manila/policy.py0000664000175000017500000000763212301410454020632 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Manila""" import functools from oslo.config import cfg from manila import exception from manila.openstack.common import policy from manila import utils policy_opts = [ cfg.StrOpt('policy_file', default='policy.json', help=_('JSON file representing policy')), cfg.StrOpt('policy_default_rule', default='default', help=_('Rule checked when requested rule is not found')), ] CONF = cfg.CONF CONF.register_opts(policy_opts) _POLICY_PATH = None _POLICY_CACHE = {} def reset(): global _POLICY_PATH global _POLICY_CACHE _POLICY_PATH = None _POLICY_CACHE = {} policy.reset() def init(): global _POLICY_PATH global _POLICY_CACHE if not _POLICY_PATH: _POLICY_PATH = utils.find_config(CONF.policy_file) utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, reload_func=_set_brain) def _set_brain(data): default_rule = CONF.policy_default_rule policy.set_brain(policy.HttpBrain.load_json(data, default_rule)) def enforce(context, action, target): """Verifies that the action is valid on the target in this context. :param context: manila context :param action: string representing the action to be checked this should be colon separated for clarity. i.e. ``compute:create_instance``, ``compute:attach_volume``, ``volume:attach_volume`` :param object: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :raises manila.exception.PolicyNotAuthorized: if verification fails. """ init() match_list = ('rule:%s' % action,) credentials = context.to_dict() policy.enforce(match_list, target, credentials, exception.PolicyNotAuthorized, action=action) def check_is_admin(roles): """Whether or not roles contains 'admin' role according to policy setting. """ init() action = 'context_is_admin' match_list = ('rule:%s' % action,) # include project_id on target to avoid KeyError if context_is_admin # policy definition is missing, and default admin_or_owner rule # attempts to apply. Since our credentials dict does not include a # project_id, this target can never match as a generic rule. target = {'project_id': ''} credentials = {'roles': roles} return policy.enforce(match_list, target, credentials) def wrap_check_policy(resource): """Check policy corresponding to the wrapped methods prior to execution. """ def check_policy_wraper(func): @functools.wraps(func) def wrapped(self, context, target_obj, *args, **kwargs): check_policy(context, resource, func.__name__, target_obj) return func(self, context, target_obj, *args, **kwargs) return wrapped return check_policy_wraper def check_policy(context, resource, action, target_obj=None): target = { 'project_id': context.project_id, 'user_id': context.user_id, } target.update(target_obj or {}) _action = '%s:%s' % (resource, action) enforce(context, _action, target) manila-2013.2.dev175.gbf1a399/manila/service.py0000664000175000017500000005150412301410454020770 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import errno import inspect import os import random import signal import sys import time import eventlet import greenlet from oslo.config import cfg from manila import context from manila import db from manila import exception from manila.openstack.common import importutils from manila.openstack.common import log as logging from manila.openstack.common import rpc from manila import utils from manila import version from manila import wsgi LOG = logging.getLogger(__name__) service_opts = [ cfg.IntOpt('report_interval', default=10, help='seconds between nodes reporting state to datastore'), cfg.IntOpt('periodic_interval', default=60, help='seconds between running periodic tasks'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='range of seconds to randomly delay when starting the' ' periodic task scheduler to reduce stampeding.' ' (Disable by setting to 0)'), cfg.StrOpt('osapi_share_listen', default="0.0.0.0", help='IP address for OpenStack Share API to listen'), cfg.IntOpt('osapi_share_listen_port', default=8786, help='port for os share api to listen'), ] CONF = cfg.CONF CONF.register_opts(service_opts) class SignalExit(SystemExit): def __init__(self, signo, exccode=1): super(SignalExit, self).__init__(exccode) self.signo = signo class Launcher(object): """Launch one or more services and wait for them to complete.""" def __init__(self): """Initialize the service launcher. :returns: None """ self._services = [] @staticmethod def run_server(server): """Start and wait for a server to finish. :param service: Server to run and wait for. :returns: None """ server.start() server.wait() def launch_server(self, server): """Load and start the given server. :param server: The server you would like to start. :returns: None """ gt = eventlet.spawn(self.run_server, server) self._services.append(gt) def stop(self): """Stop all services which are currently running. :returns: None """ for service in self._services: service.kill() def wait(self): """Waits until all services have been stopped, and then returns. :returns: None """ def sigterm(sig, frame): LOG.audit(_("SIGTERM received")) # NOTE(jk0): Raise a ^C which is caught by the caller and cleanly # shuts down the service. This does not yet handle eventlet # threads. raise KeyboardInterrupt signal.signal(signal.SIGTERM, sigterm) for service in self._services: try: service.wait() except greenlet.GreenletExit: pass class ServerWrapper(object): def __init__(self, server, workers): self.server = server self.workers = workers self.children = set() self.forktimes = [] self.failed = False class ProcessLauncher(object): def __init__(self): self.children = {} self.sigcaught = None self.totalwrap = 0 self.failedwrap = 0 self.running = True rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') signal.signal(signal.SIGTERM, self._handle_signal) signal.signal(signal.SIGINT, self._handle_signal) def _handle_signal(self, signo, frame): self.sigcaught = signo self.running = False # Allow the process to be killed again and die from natural causes signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.signal(signal.SIGINT, signal.SIG_DFL) def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read() LOG.info(_('Parent process has died unexpectedly, exiting')) sys.exit(1) def _child_process(self, server): # Setup child signal handlers differently def _sigterm(*args): signal.signal(signal.SIGTERM, signal.SIG_DFL) raise SignalExit(signal.SIGTERM) signal.signal(signal.SIGTERM, _sigterm) # Block SIGINT and let the parent send us a SIGTERM # signal.signal(signal.SIGINT, signal.SIG_IGN) # This differs from the behavior in nova in that we dont ignore this # It allows the non-wsgi services to be terminated properly signal.signal(signal.SIGINT, _sigterm) # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() # Close write to ensure only parent has it open os.close(self.writepipe) # Create greenthread to watch for parent to close pipe eventlet.spawn(self._pipe_watcher) # Reseed random number generator random.seed() launcher = Launcher() launcher.run_server(server) def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: # Limit ourselves to one process a second (over the period of # number of workers * 1 second). This will allow workers to # start up quickly but ensure we don't fork off children that # die instantly too quickly. if time.time() - wrap.forktimes[0] < wrap.workers: LOG.info(_('Forking too fast, sleeping')) time.sleep(1) wrap.forktimes.pop(0) wrap.forktimes.append(time.time()) pid = os.fork() if pid == 0: # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. status = 0 try: self._child_process(wrap.server) except SignalExit as exc: signame = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'}[exc.signo] LOG.info(_('Caught %s, exiting'), signame) status = exc.code except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_('Unhandled exception')) status = 2 finally: wrap.server.stop() os._exit(status) LOG.info(_('Started child %d'), pid) wrap.children.add(pid) self.children[pid] = wrap return pid def launch_server(self, server, workers=1): wrap = ServerWrapper(server, workers) self.totalwrap = self.totalwrap + 1 LOG.info(_('Starting %d workers'), wrap.workers) while (self.running and len(wrap.children) < wrap.workers and not wrap.failed): self._start_child(wrap) def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None code = 0 if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals()) else: code = os.WEXITSTATUS(status) LOG.info(_('Child %(pid)d exited with status %(code)d'), locals()) if pid not in self.children: LOG.warning(_('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) if 2 == code: wrap.failed = True self.failedwrap = self.failedwrap + 1 LOG.info(_('_wait_child %d'), self.failedwrap) if self.failedwrap == self.totalwrap: self.running = False return wrap def wait(self): """Loop waiting on children to die and respawning as necessary.""" while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) eventlet.greenthread.sleep(.01) continue LOG.info(_('wait wrap.failed %s'), wrap.failed) while (self.running and len(wrap.children) < wrap.workers and not wrap.failed): self._start_child(wrap) if self.sigcaught: signame = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'}[self.sigcaught] LOG.info(_('Caught %s, stopping children'), signame) for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child() class Service(object): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager and reports it state to the database services table.""" def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, *args, **kwargs): self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, service_name=service_name, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay super(Service, self).__init__(*args, **kwargs) self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] def start(self): version_string = version.version_string() LOG.audit(_('Starting %(topic)s node (version %(version_string)s)'), {'topic': self.topic, 'version_string': version_string}) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) self.conn = rpc.create_connection(new=True) LOG.debug(_("Creating Consumer connection for Service %s") % self.topic) rpc_dispatcher = self.manager.create_rpc_dispatcher() # Share this same connection for these Consumers self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic) def _create_service_ref(self, context): zone = CONF.storage_availability_zone service_ref = db.service_create(context, {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': zone}) self.service_id = service_ref['id'] def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'manila-' part :param manager: defaults to CONF._manager :param report_interval: defaults to CONF.report_interval :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay """ if not host: host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary if not manager: subtopic = topic.rpartition('manila-')[2] manager = CONF.get('%s_manager' % subtopic, None) if report_interval is None: report_interval = CONF.report_interval if periodic_interval is None: periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name) return service_obj def kill(self): """Destroy the service object in the datastore.""" self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: LOG.warn(_('Service killed that has no database entry')) def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.conn.close() except Exception: pass for x in self.timers: try: x.stop() except Exception: pass self.timers = [] def wait(self): for x in self.timers: try: x.wait() except Exception: pass def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) def report_state(self): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() zone = CONF.storage_availability_zone state_catalog = {} try: try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: LOG.debug(_('The service database object disappeared, ' 'Recreating it.')) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) state_catalog['report_count'] = service_ref['report_count'] + 1 if zone != service_ref['availability_zone']: state_catalog['availability_zone'] = zone db.service_update(ctxt, self.service_id, state_catalog) # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): self.model_disconnected = False LOG.error(_('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception(_('model server went away')) class WSGIService(object): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader() self.app = self.loader.load_app(name) self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.server = wsgi.Server(name, self.app, host=self.host, port=self.port) def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if fl not in CONF: return None manager_class_name = CONF.get(fl, None) if not manager_class_name: return None manager_class = importutils.import_class(manager_class_name) return manager_class() def start(self): """Start serving this service using loaded configuration. Also, retrieve updated port number in case '0' was passed in, which indicates a random port should be used. :returns: None """ if self.manager: self.manager.init_host() self.server.start() self.port = self.server.port def stop(self): """Stop serving this API. :returns: None """ self.server.stop() def wait(self): """Wait for the service to stop serving this API. :returns: None """ self.server.wait() # NOTE(vish): the global launcher is to maintain the existing # functionality of calling service.serve + # service.wait _launcher = None def serve(*servers): global _launcher if not _launcher: _launcher = Launcher() for server in servers: _launcher.launch_server(server) def wait(): LOG.debug(_('Full set of CONF:')) for flag in CONF: flag_get = CONF.get(flag, None) # hide flag contents from log if contains a password # should use secret flag when switch over to openstack-common if ("_password" in flag or "_key" in flag or (flag == "sql_connection" and "mysql:" in flag_get)): LOG.debug(_('%(flag)s : FLAG SET ') % locals()) else: LOG.debug('%(flag)s : %(flag_get)s' % locals()) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop() rpc.cleanup() manila-2013.2.dev175.gbf1a399/manila/common/0000775000175000017500000000000012301410516020240 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/common/sqlalchemyutils.py0000775000175000017500000001140312301410454024040 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of paginate query.""" import sqlalchemy from manila import exception from manila.openstack.common import log as logging LOG = logging.getLogger(__name__) # copied from glance/db/sqlalchemy/api.py def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] assert(len(sort_dirs) == len(sort_keys)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise exception.InvalidInput(reason='Invalid sort key') query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in xrange(0, len(sort_keys)): crit_attrs = [] for j in xrange(0, i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) if sort_dirs[i] == 'desc': crit_attrs.append((model_attr < marker_values[i])) elif sort_dirs[i] == 'asc': crit_attrs.append((model_attr > marker_values[i])) else: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query manila-2013.2.dev175.gbf1a399/manila/common/constants.py0000664000175000017500000000157512301410454022637 0ustar chuckchuck00000000000000# Copyright 2013 Openstack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_NEW = 'NEW' STATUS_CREATING = 'CREATING' STATUS_DELETING = 'DELETING' STATUS_DELETED = 'DELETED' STATUS_ERROR = 'ERROR' STATUS_ACTIVE = 'ACTIVE' STATUS_INACTIVE = 'INACTIVE' SECURITY_SERVICES_ALLOWED_TYPES = ['active_directory', 'ldap', 'kerberos'] manila-2013.2.dev175.gbf1a399/manila/common/__init__.py0000664000175000017500000000121612301410454022352 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. manila-2013.2.dev175.gbf1a399/manila/common/config.py0000664000175000017500000002170712301410454022067 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command-line flag library. Emulates gflags by wrapping cfg.ConfigOpts. The idea is to move fully to cfg eventually, and this wrapper is a stepping stone. """ import os import socket from oslo.config import cfg CONF = cfg.CONF def _get_my_ip(): """ Returns the actual ip of the local machine. This code figures out what source address would be used if some traffic were to be sent out to some well known address on the Internet. In this case, a Google DNS server is used, but the specific address does not matter much. No traffic is actually sent. """ try: csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) csock.connect(('8.8.8.8', 80)) (addr, port) = csock.getsockname() csock.close() return addr except socket.error: return "127.0.0.1" core_opts = [ cfg.StrOpt('connection_type', default=None, help='Virtualization api connection type : libvirt, xenapi, ' 'or fake'), cfg.StrOpt('sql_connection', default='sqlite:///$state_path/$sqlite_db', help='The SQLAlchemy connection string used to connect to the ' 'database', secret=True), cfg.IntOpt('sql_connection_debug', default=0, help='Verbosity of SQL debugging information. 0=None, ' '100=Everything'), cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for manila-api'), cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')), help='Directory where the manila python module is installed'), cfg.StrOpt('bindir', default='$pybasedir/bin', help='Directory where manila binaries are installed'), cfg.StrOpt('state_path', default='$pybasedir', help="Top-level directory for maintaining manila's state"), ] debug_opts = [ ] CONF.register_cli_opts(core_opts) CONF.register_cli_opts(debug_opts) global_opts = [ cfg.StrOpt('my_ip', default=_get_my_ip(), help='ip address of this host'), cfg.StrOpt('glance_host', default='$my_ip', help='default glance hostname or ip'), cfg.IntOpt('glance_port', default=9292, help='default glance port'), cfg.ListOpt('glance_api_servers', default=['$glance_host:$glance_port'], help='A list of the glance api servers available to manila ' '([hostname|ip]:port)'), cfg.IntOpt('glance_api_version', default=1, help='Version of the glance api to use'), cfg.IntOpt('glance_num_retries', default=0, help='Number retries when downloading an image from glance'), cfg.BoolOpt('glance_api_insecure', default=False, help='Allow to perform insecure SSL (https) requests to ' 'glance'), cfg.StrOpt('scheduler_topic', default='manila-scheduler', help='the topic scheduler nodes listen on'), cfg.StrOpt('share_topic', default='manila-share', help='the topic share nodes listen on'), cfg.BoolOpt('enable_v1_api', default=True, help=_("Deploy v1 of the Manila API. ")), cfg.BoolOpt('enable_v2_api', default=True, help=_("Deploy v2 of the Manila API. ")), cfg.BoolOpt('api_rate_limit', default=True, help='whether to rate limit the api'), cfg.ListOpt('osapi_share_ext_list', default=[], help='Specify list of extensions to load when using osapi_' 'share_extension option with manila.api.contrib.' 'select_extensions'), cfg.MultiStrOpt('osapi_share_extension', default=['manila.api.contrib.standard_extensions'], help='osapi share extension to load'), cfg.StrOpt('osapi_share_base_URL', default=None, help='Base URL that will be presented to users in links ' 'to the OpenStack Share API', deprecated_name='osapi_compute_link_prefix'), cfg.IntOpt('osapi_max_limit', default=1000, help='the maximum number of items returned in a single ' 'response from a collection resource'), cfg.StrOpt('sqlite_db', default='manila.sqlite', help='the filename to use with sqlite'), cfg.BoolOpt('sqlite_synchronous', default=True, help='If passed, use synchronous mode for sqlite'), cfg.IntOpt('sql_idle_timeout', default=3600, help='timeout before idle sql connections are reaped'), cfg.IntOpt('sql_max_retries', default=10, help='maximum db connection retries during startup. ' '(setting -1 implies an infinite retry count)'), cfg.IntOpt('sql_retry_interval', default=10, help='interval between retries of opening a sql connection'), cfg.StrOpt('scheduler_manager', default='manila.scheduler.manager.SchedulerManager', help='full class name for the Manager for scheduler'), cfg.StrOpt('share_manager', default='manila.share.manager.ShareManager', help='full class name for the Manager for share'), cfg.StrOpt('host', default=socket.gethostname(), help='Name of this node. This can be an opaque identifier. ' 'It is not necessarily a hostname, FQDN, or IP address.'), # NOTE(vish): default to nova for compatibility with nova installs cfg.StrOpt('storage_availability_zone', default='nova', help='availability zone of this node'), cfg.ListOpt('memcached_servers', default=None, help='Memcached servers or None for in process cache.'), cfg.StrOpt('share_usage_audit_period', default='month', help='time period to generate share usages for. ' 'Time period must be hour, day, month or year'), cfg.StrOpt('root_helper', default='sudo', help='Deprecated: command to use for running commands as root'), cfg.StrOpt('rootwrap_config', default=None, help='Path to the rootwrap configuration file to use for ' 'running commands as root'), cfg.BoolOpt('monkey_patch', default=False, help='Whether to log monkey patching'), cfg.ListOpt('monkey_patch_modules', default=[], help='List of modules/decorators to monkey patch'), cfg.IntOpt('service_down_time', default=60, help='maximum time since last check-in for up service'), cfg.StrOpt('share_api_class', default='manila.share.api.API', help='The full class name of the share API class to use'), cfg.StrOpt('auth_strategy', default='noauth', help='The strategy to use for auth. Supports noauth, keystone, ' 'and deprecated.'), cfg.ListOpt('enabled_backends', default=None, help='A list of backend names to use. These backend names ' 'should be backed by a unique [CONFIG] group ' 'with its options'), cfg.ListOpt('enabled_share_backends', default=None, help='A list of share backend names to use. These backend ' 'names should be backed by a unique [CONFIG] group ' 'with its options'), cfg.BoolOpt('no_snapshot_gb_quota', default=False, help='Whether snapshots count against GigaByte quota'), ] CONF.register_opts(global_opts) manila-2013.2.dev175.gbf1a399/manila/context.py0000664000175000017500000001323712301410454021015 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of manila.""" import copy import uuid from manila.openstack.common import local from manila.openstack.common import log as logging from manila.openstack.common import timeutils from manila import policy LOG = logging.getLogger(__name__) def generate_request_id(): return 'req-' + str(uuid.uuid4()) class RequestContext(object): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, service_catalog=None, **kwargs): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn(_('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self.roles) elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, basestring): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if service_catalog: self.service_catalog = [s for s in service_catalog if s.get('type') in ('compute', 'volume')] else: self.service_catalog = [] if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token self.quota_class = quota_class if overwrite or not hasattr(local.store, 'context'): self.update_store() def _get_read_deleted(self): return self._read_deleted def _set_read_deleted(self, read_deleted): if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self): del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def update_store(self): local.store.context = self def to_dict(self): return {'user_id': self.user_id, 'project_id': self.project_id, 'is_admin': self.is_admin, 'read_deleted': self.read_deleted, 'roles': self.roles, 'remote_address': self.remote_address, 'timestamp': timeutils.strtime(self.timestamp), 'request_id': self.request_id, 'auth_token': self.auth_token, 'quota_class': self.quota_class, 'tenant': self.tenant, 'service_catalog': self.service_catalog, 'user': self.user} @classmethod def from_dict(cls, values): return cls(**values) def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" context = copy.copy(self) context.is_admin = True if 'admin' not in context.roles: context.roles.append('admin') if read_deleted is not None: context.read_deleted = read_deleted return context # NOTE(sirp): the openstack/common version of RequestContext uses # tenant/user whereas the Manila version uses project_id/user_id. We need # this shim in order to use context-aware code from openstack/common, like # logging, until we make the switch to using openstack/common's version of # RequestContext. @property def tenant(self): return self.project_id @property def user(self): return self.user_id def get_admin_context(read_deleted="no"): return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) manila-2013.2.dev175.gbf1a399/manila/manager.py0000664000175000017500000001772112301410454020745 0ustar chuckchuck00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo.config import cfg from manila.db import base from manila.openstack.common import log as logging from manila.openstack.common.rpc import dispatcher as rpc_dispatcher from manila.scheduler import rpcapi as scheduler_rpcapi from manila import version CONF = cfg.CONF LOG = logging.getLogger(__name__) def periodic_task(*args, **kwargs): """Decorator to indicate that a method is a periodic task. This decorator can be used in two ways: 1. Without arguments '@periodic_task', this will be run on every tick of the periodic scheduler. 2. With arguments, @periodic_task(ticks_between_runs=N), this will be run on every N ticks of the periodic scheduler. """ def decorator(f): f._periodic_task = True f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0) return f # NOTE(sirp): The `if` is necessary to allow the decorator to be used with # and without parens. # # In the 'with-parens' case (with kwargs present), this function needs to # return a decorator function since the interpreter will invoke it like: # # periodic_task(*args, **kwargs)(f) # # In the 'without-parens' case, the original function will be passed # in as the first argument, like: # # periodic_task(f) if kwargs: return decorator else: return decorator(args[0]) class ManagerMeta(type): def __init__(cls, names, bases, dict_): """Metaclass that allows us to collect decorated periodic tasks.""" super(ManagerMeta, cls).__init__(names, bases, dict_) # NOTE(sirp): if the attribute is not present then we must be the base # class, so, go ahead an initialize it. If the attribute is present, # then we're a subclass so make a copy of it so we don't step on our # parent's toes. try: cls._periodic_tasks = cls._periodic_tasks[:] except AttributeError: cls._periodic_tasks = [] try: cls._ticks_to_skip = cls._ticks_to_skip.copy() except AttributeError: cls._ticks_to_skip = {} for value in cls.__dict__.values(): if getattr(value, '_periodic_task', False): task = value name = task.__name__ cls._periodic_tasks.append((name, task)) cls._ticks_to_skip[name] = task._ticks_between_runs class Manager(base.Base): __metaclass__ = ManagerMeta # Set RPC API version to 1.0 by default. RPC_API_VERSION = '1.0' def __init__(self, host=None, db_driver=None): if not host: host = CONF.host self.host = host super(Manager, self).__init__(db_driver) def create_rpc_dispatcher(self): '''Get the rpc dispatcher for this manager. If a manager would like to set an rpc API version, or support more than one class as the target of rpc messages, override this method. ''' return rpc_dispatcher.RpcDispatcher([self]) def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) ticks_to_skip = self._ticks_to_skip[task_name] if ticks_to_skip > 0: LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s" " ticks left until next run"), locals()) self._ticks_to_skip[task_name] -= 1 continue self._ticks_to_skip[task_name] = task._ticks_between_runs LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), locals()) def init_host(self): """Handle initialization if this is a standalone service. Child classes should override this method. """ pass def service_version(self, context): return version.version_string() def service_config(self, context): config = {} for key in CONF: config[key] = CONF.get(key, None) return config class SchedulerDependentManager(Manager): """Periodically send capability updates to the Scheduler services. Services that need to update the Scheduler of their capabilities should derive from this class. Otherwise they can derive from manager.Manager directly. Updates are only sent after update_service_capabilities is called with non-None values. """ def __init__(self, host=None, db_driver=None, service_name='undefined'): self.last_capabilities = None self.service_name = service_name self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() super(SchedulerDependentManager, self).__init__(host, db_driver) def update_service_capabilities(self, capabilities): """Remember these capabilities to send on next periodic update.""" self.last_capabilities = capabilities @periodic_task def _publish_service_capabilities(self, context): """Pass data back to the scheduler at a periodic interval.""" if self.last_capabilities: LOG.debug(_('Notifying Schedulers of capabilities ...')) self.scheduler_rpcapi.update_service_capabilities( context, self.service_name, self.host, self.last_capabilities) manila-2013.2.dev175.gbf1a399/manila/testing/0000775000175000017500000000000012301410516020425 5ustar chuckchuck00000000000000manila-2013.2.dev175.gbf1a399/manila/testing/README.rst0000664000175000017500000000355112301410454022121 0ustar chuckchuck00000000000000===================================== OpenStack Manila Testing Infrastructure ===================================== A note of clarification is in order, to help those who are new to testing in OpenStack manila: - actual unit tests are created in the "tests" directory; - the "testing" directory is used to house the infrastructure needed to support testing in OpenStack Manila. This README file attempts to provide current and prospective contributors with everything they need to know in order to start creating unit tests and utilizing the convenience code provided in manila.testing. Note: the content for the rest of this file will be added as the work items in the following blueprint are completed: https://blueprints.launchpad.net/manila/+spec/consolidate-testing-infrastructure Test Types: Unit vs. Functional vs. Integration ----------------------------------------------- TBD Writing Unit Tests ------------------ TBD Using Fakes ~~~~~~~~~~~ TBD test.TestCase ------------- The TestCase class from manila.test (generally imported as test) will automatically manage self.stubs using the stubout module and self.mox using the mox module during the setUp step. They will automatically verify and clean up during the tearDown step. If using test.TestCase, calling the super class setUp is required and calling the super class tearDown is required to be last if tearDown is overridden. Writing Functional Tests ------------------------ TBD Writing Integration Tests ------------------------- TBD Tests and assertRaises ---------------------- When asserting that a test should raise an exception, test against the most specific exception possible. An overly broad exception type (like Exception) can mask errors in the unit test itself. Example:: self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, elevated, instance_uuid)